&&",
+ "分类url": "http://www.guaziyingyuan.com/Show/{cateId}-{area}--{class}-----{catePg}---{year}/",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/js/s/电影港.json b/js/s/电影港.json
new file mode 100644
index 0000000..85fda74
--- /dev/null
+++ b/js/s/电影港.json
@@ -0,0 +1,131 @@
+//写法思路来海阔视界,xpath筛选。本人是海阔用户,所以搬了海阔的jsoup写法过来。2022年9月17日
+//jsoup规则写法请查阅海阔视界或者海阔影视相关教程。不支持js写法
+//本文档为完整模板,请不要去无中生有添加多余的键值参数。
+{
+ //规则名
+ "title": "电影港",
+ //作者
+ "author": "香雅情",
+ //请求头UA,键名$键值,每一组用#分开,不填则默认okhttp/3.12.11,可填MOBILE_UA或PC_UA使用内置的手机版或电脑版UA
+ //多个请求头参数写法示例,"User-Agent$PC_UA#Referer$http://ww.baidu.com#Cookie$ser=ok",每一组用#分开。
+ //习惯查看手机源码写建议用手机版UA,习惯查看PC版源码写建议用电脑版UA
+ "Headers":"PC_UA",
+ //网页编码格式默认UTF-8编码,UTF-8,GBK,GB2312
+ "Coding_format":"gb2312",
+ //图片是否需要代理
+ "PicNeedProxy":"0",
+ //是否开启获取首页数据,0关闭,1开启
+ "homeContent":"0",
+ //分类链接起始页码,禁止负数和含小数点。
+ "firstpage": "1",
+ //分类链接,{cateId}是分类,{catePg}是页码,第一页没有页码的可以这样写 第二页链接[firstPage=第一页的链接]
+ "class_url": "https://www.dygang.tv/{cateId}/index_{catePg}.htm[firstPage=https://www.dygang.cc/{cateId}/index.htm]",
+ //分类名,分类1&分类2&分类3
+ "class_name": "最新电影&经典高清&国配电影&经典港片&国剧&日韩剧&美剧&综艺&动漫&纪录片&高清原盘&4K高清区&3D电影&电影专题",
+ //分类名替换词,替换词1&替换词2&替换词3,替换词包含英文&的用两个中文&&代替,示例:&&id=0&&&id=1
+ "class_value": "ys&bd&gy&gp&dsj&dsj1&yx&zy&dmq&jilupian&1080p&4K&3d&dyzt",
+ //筛选数据,json格式,参考xpath的筛选写法
+ "filterdata":{},
+
+ //分类页面截取数据模式,0为json,其它数字为普通网页。
+ "cat_mode": "1",
+ //分类列表数组定位,最多支持3层,能力有限,不是所有页面都能支持
+ "cat_arr_rule": "body&&table[width=388]",
+ //分类片单信息jsoup与xb截取写法切换,只作用于html网页,1为jsoup写法(默认),0为xb写法
+ "cat_is_jsoup":"1",
+ //分类片单标题
+ "cat_title": "img&&alt",
+ //分类片单链接
+ "cat_url": "a&&href",
+ //分类片单图片,支持自定义图片链接
+ "cat_pic": "img&&src",
+ //分类片单副标题
+ "cat_subtitle":"[align=center]&&Text",
+ //分类片单链接补前缀
+ "cat_prefix": "https://www.dygang.tv",
+ //分类片单链接补后缀
+ "cat_suffix": "",
+
+ //搜索请求头参数,不填则默认okhttp/3.12.11,可填MOBILE_UA或PC_UA使用内置的手机版或电脑版UA
+ //多个请求头参数写法示例,键名$键值,每一组用#分开。"User-Agent$PC_UA#Referer$http://ww.baidu.com#Cookie$ser=ok"。
+ "SHeaders":"User-Agent$PC_UA#Content-Type$charset=gb2312",
+ //搜索链接,搜索关键字用{wd}表示,post请求的最后面加;post
+ //POST链接示例 http://www.lezhutv.com/index.php?m=vod-search;post
+ "search_url": "https://www.dygang.tv/e/search/index123.php;post",
+ //POST搜索body,填写搜索关键字的键值,一般常见的是searchword和wd,不是POST搜索的可留空或删除。
+ "sea_PtBody":"keyboard={wd}&submit=搜+索&show=title,smalltext&tempid=1&tbname=article",
+
+ //搜索截取模式,0为json搜索,只支持列表在list数组里的,其它数字为网页截取。
+ "search_mode": "1",
+ //搜索列表数组定位,不填默认内置list,最多支持3层,能力有限,不是所有页面都能支持。
+ "sea_arr_rule": "body&&table[width=388]",
+ //搜索片单信息jsoup与xb截取写法切换,只作用于html网页,1为jsoup写法(默认),0为xb写法
+ "sea_is_jsoup":"1",
+ //搜索片单图片,支持自定义图片链接
+ "sea_pic": "img&&src",
+ //搜索片单标题
+ "sea_title": "img&&alt",
+ //搜索片单链接
+ "sea_url": "a&&href",
+ //搜索片单副标题
+ "sea_subtitle":"",
+ //搜索片单链接补前缀
+ "search_prefix": "https://www.dygang.tv",
+ //搜索片单链接补后缀,这个一般json搜索的需要
+ "search_suffix": "",
+
+ //片单链接是否直接播放,0否,1分类片单链接直接播放,2详情选集链接直接播放。
+ //设置成直接播放后,后面3个参数请注意该留空的请务必留空。
+ "force_play": "0",
+ //直接播放链接补前缀
+ "play_prefix": "",
+ //直接播放链接补后缀,设置为#isVideo=true#可强制识别为视频链接
+ "play_suffix": "",
+ //直接播放链接设置请求头,只对直链视频有效,每一组用#分开
+ "play_header": "",
+
+ //项目信息jsoup与xb截取写法切换,1为jsoup写法(默认),0为xb写法
+ "proj_is_jsoup":"0",
+ //类型数据,截取前缀&&截取后缀
+ "proj_cate": "",
+ //年代数据,截取前缀&&截取后缀
+ "proj_year": "",
+ //地区数据,截取前缀&&截取后缀
+ "proj_area": "",
+ //演员数据,截取前缀&&截取后缀
+ "proj_actor": "演 员&&",
+ //简介内容,截取前缀&&截取后缀
+ "proj_plot": "简 介&&",
+
+ //线路截取区域,如果不需要请把tab_title或tab_arr_rule置空或者全部不要填。
+ //线路截取数组
+ "tab_arr_rule": "#dede_content",
+ //线路标题,截取前缀&&截取后缀
+ "tab_title": "strong&&Text",
+
+ //列表数组截取,必须
+ "list_arr_rule": "#dede_content",
+ //集数数组截取,必须
+ "epi_arr_rule": "table&&[href*=magnet]",
+ //集数标题,截取前缀&&截取后缀
+ "epi_title": "a&&Text",
+ //集数链接,截取前缀&&截取后缀
+ "epi_url": "a&&href",
+ //选集是否反转显示
+ "epi_reverse": "0",
+ //集数链接补前缀
+ "epiurl_prefix": "",
+ //集数链接补后缀
+ "epiurl_suffix": "",
+
+ //下面几个参数请勿乱用。否则可能会有副作用。
+ //分析网页源码中有'
+ next_data_match = re.search(next_data_pattern, html_content, re.DOTALL)
+ if next_data_match:
+ next_data_json = json.loads(next_data_match.group(1))
+ page_props = next_data_json.get("props", {}).get("pageProps", {})
+ # 获取轮播图数据 - 这些通常是推荐内容
+ if "bannerList" in page_props and isinstance(page_props["bannerList"], list):
+ banner_list = page_props["bannerList"]
+ for banner in banner_list:
+ book_id = banner.get("bookId", "")
+ book_name = banner.get("bookName", "")
+ cover_url = banner.get("coverWap", banner.get("wapUrl", ""))
+ # 获取状态和章节数
+ status = banner.get("statusDesc", "")
+ total_chapters = banner.get("totalChapterNum", "")
+ if book_id and book_name:
+ videos.append({
+ "vod_id": f"/drama/{book_id}",
+ "vod_name": book_name,
+ "vod_pic": cover_url,
+ "vod_remarks": f"{status} {total_chapters}集" if total_chapters else status
+ })
+
+ # SEO分类下的推荐
+ if "seoColumnVos" in page_props and isinstance(page_props["seoColumnVos"], list):
+ for column in page_props["seoColumnVos"]:
+ book_infos = column.get("bookInfos", [])
+ for book in book_infos:
+ book_id = book.get("bookId", "")
+ book_name = book.get("bookName", "")
+ cover_url = book.get("coverWap", "")
+ status = book.get("statusDesc", "")
+ total_chapters = book.get("totalChapterNum", "")
+
+ if book_id and book_name:
+ videos.append({
+ "vod_id": f"/drama/{book_id}",
+ "vod_name": book_name,
+ "vod_pic": cover_url,
+ "vod_remarks": f"{status} {total_chapters}集" if total_chapters else status
+ })
+
+ # # 去重
+ # seen = set()
+ # unique_videos = []
+ # for video in videos:
+ # if video["vod_id"] not in seen:
+ # seen.add(video["vod_id"])
+ # unique_videos.append(video)
+ # videos = unique_videos
+
+ except Exception as e:
+ print(f"获取首页推荐内容出错: {e}")
+
+ result = {
+ "list": videos
+ }
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ """获取分类内容"""
+ result = {}
+ videos = []
+ url = f"{self.siteUrl}/browse/{tid}/{pg}"
+ response = self.fetch(url)
+ html_content = response.text
+ # 提取NEXT_DATA JSON数据
+ next_data_pattern = r''
+ next_data_match = re.search(next_data_pattern, html_content, re.DOTALL)
+ if next_data_match:
+ next_data_json = json.loads(next_data_match.group(1))
+ page_props = next_data_json.get("props", {}).get("pageProps", {})
+ # 获取总页数和当前页
+ current_page = page_props.get("page", 1)
+ total_pages = page_props.get("pages", 1)
+ # 获取书籍列表
+ book_list = page_props.get("bookList", [])
+ # 转换为通用格式
+ for book in book_list:
+ book_id = book.get("bookId", "")
+ book_name = book.get("bookName", "")
+ cover_url = book.get("coverWap", "")
+ status_desc = book.get("statusDesc", "")
+ total_chapters = book.get("totalChapterNum", "")
+ if book_id and book_name:
+ videos.append({
+ "vod_id": f"/drama/{book_id}",
+ "vod_name": book_name,
+ "vod_pic": cover_url,
+ "vod_remarks": f"{status_desc} {total_chapters}集" if total_chapters else status_desc
+ })
+ # 构建返回结果
+ result = {
+ "list": videos,
+ "page": int(current_page),
+ "pagecount": total_pages,
+ "limit": len(videos),
+ "total": total_pages * len(videos) if videos else 0
+ }
+ return result
+
+ def switch(self, key, pg):
+ # 搜索功能
+ search_results = []
+ # 获取第一页结果,并检查总页数
+ url = f"{self.siteUrl}/search?searchValue={key}&page={pg}"
+ response = self.fetch(url)
+ html_content = response.text
+ # 提取NEXT_DATA JSON数据
+ next_data_pattern = r''
+ next_data_match = re.search(next_data_pattern, html_content, re.DOTALL)
+ if next_data_match:
+ next_data_json = json.loads(next_data_match.group(1))
+ page_props = next_data_json.get("props", {}).get("pageProps", {})
+ # 获取总页数
+ total_pages = page_props.get("pages", 1)
+ # 处理所有页的数据
+ all_book_list = []
+ # 添加第一页的书籍列表
+ book_list = page_props.get("bookList", [])
+ all_book_list.extend(book_list)
+ # 如果有多页,获取其他页的数据
+ if total_pages > 1 : # quick模式只获取第一页
+ for page in range(2, total_pages + 1):
+ next_page_url = f"{self.siteUrl}/search?searchValue={key}&page={page}"
+ next_page_response = self.fetch(next_page_url)
+ next_page_html = next_page_response.text
+ next_page_match = re.search(next_data_pattern, next_page_html, re.DOTALL)
+ if next_page_match:
+ next_page_json = json.loads(next_page_match.group(1))
+ next_page_props = next_page_json.get("props", {}).get("pageProps", {})
+ next_page_books = next_page_props.get("bookList", [])
+ all_book_list.extend(next_page_books)
+ # 转换为统一的搜索结果格式
+ for book in all_book_list:
+ book_id = book.get("bookId", "")
+ book_name = book.get("bookName", "")
+ cover_url = book.get("coverWap", "")
+ total_chapters = book.get("totalChapterNum", "0")
+ status_desc = book.get("statusDesc", "")
+ # 构建视频项
+ vod = {
+ "vod_id": f"/drama/{book_id}",
+ "vod_name": book_name,
+ "vod_pic": cover_url,
+ "vod_remarks": f"{status_desc} {total_chapters}集"
+ }
+ search_results.append(vod)
+ result = {
+ "list": search_results,
+ "page": pg
+ }
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ result = self.switch(key, pg=pg)
+ result['page'] = pg
+ return result
+
+ def searchContentPage(self, key, quick, pg=1):
+ return self.searchContent(key, quick, pg)
+
+ def detailContent(self, ids):
+ # 获取剧集信息
+ vod_id = ids[0]
+ episode_id = None
+ chapter_id = None
+
+ if not vod_id.startswith('/drama/'):
+ if vod_id.startswith('/episode/'):
+ episode_info = vod_id.replace('/episode/', '').split('/')
+ if len(episode_info) >= 2:
+ episode_id = episode_info[0]
+ chapter_id = episode_info[1]
+ vod_id = f'/drama/{episode_id}'
+ else:
+ vod_id = '/drama/' + vod_id
+
+ drama_url = self.siteUrl + vod_id
+ print(f"请求URL: {drama_url}")
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
+ "Referer": self.siteUrl,
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
+ }
+
+ rsp = self.fetch(drama_url, headers=headers)
+ if not rsp or rsp.status_code != 200:
+ print(f"请求失败,状态码: {getattr(rsp, 'status_code', 'N/A')}")
+ return {}
+
+ html = rsp.text
+ next_data_match = re.search(r'', html, re.DOTALL)
+
+ if not next_data_match:
+ print("未找到NEXT_DATA内容")
+ return {}
+
+ try:
+ next_data = json.loads(next_data_match.group(1))
+ page_props = next_data.get("props", {}).get("pageProps", {})
+ print(f"找到页面属性,包含 {len(page_props.keys())} 个键")
+
+ book_info = page_props.get("bookInfoVo", {})
+ chapter_list = page_props.get("chapterList", [])
+
+ title = book_info.get("title", "")
+ sub_title = f"{book_info.get('totalChapterNum', '')}集"
+
+ categories = []
+ for category in book_info.get("categoryList", []):
+ categories.append(category.get("name", ""))
+
+ vod_content = book_info.get("introduction", "")
+
+ vod = {
+ "vod_id": vod_id,
+ "vod_name": title,
+ "vod_pic": book_info.get("coverWap", ""),
+ "type_name": ",".join(categories),
+ "vod_year": "",
+ "vod_area": book_info.get("countryName", ""),
+ "vod_remarks": sub_title,
+ "vod_actor": ", ".join([p.get("name", "") for p in book_info.get("performerList", [])]),
+ "vod_director": "",
+ "vod_content": vod_content
+ }
+
+ # 处理播放列表
+ play_url_list = []
+ episodes = []
+
+ if chapter_list:
+ print(f"找到 {len(chapter_list)} 个章节")
+
+ # 先检查是否有可以直接使用的MP4链接作为模板
+ mp4_template = None
+ first_mp4_chapter_id = None
+
+ # 先搜索第一个章节的MP4链接
+ # 为提高成功率,尝试直接请求第一个章节的播放页
+ if chapter_list and len(chapter_list) > 0:
+ first_chapter = chapter_list[0]
+ first_chapter_id = first_chapter.get("chapterId", "")
+ drama_id_clean = vod_id.replace('/drama/', '')
+
+ if first_chapter_id and drama_id_clean:
+ first_episode_url = f"{self.siteUrl}/episode/{drama_id_clean}/{first_chapter_id}"
+ print(f"请求第一集播放页: {first_episode_url}")
+
+ first_rsp = self.fetch(first_episode_url, headers=headers)
+ if first_rsp and first_rsp.status_code == 200:
+ first_html = first_rsp.text
+ # 直接从HTML提取MP4链接
+ mp4_pattern = r'(https?://[^"\']+\.mp4)'
+ mp4_matches = re.findall(mp4_pattern, first_html)
+ if mp4_matches:
+ mp4_template = mp4_matches[0]
+ first_mp4_chapter_id = first_chapter_id
+ print(f"找到MP4链接模板: {mp4_template}")
+ print(f"模板对应的章节ID: {first_mp4_chapter_id}")
+
+ # 如果未找到模板,再检查章节对象中是否有MP4链接
+ if not mp4_template:
+ for chapter in chapter_list[:5]: # 只检查前5个章节以提高效率
+ if "chapterVideoVo" in chapter and chapter["chapterVideoVo"]:
+ chapter_video = chapter["chapterVideoVo"]
+ mp4_url = chapter_video.get("mp4", "") or chapter_video.get("mp4720p", "") or chapter_video.get("vodMp4Url", "")
+ if mp4_url and ".mp4" in mp4_url:
+ mp4_template = mp4_url
+ first_mp4_chapter_id = chapter.get("chapterId", "")
+ print(f"从chapterVideoVo找到MP4链接模板: {mp4_template}")
+ print(f"模板对应的章节ID: {first_mp4_chapter_id}")
+ break
+
+ # 遍历所有章节处理播放信息
+ for chapter in chapter_list:
+ chapter_id = chapter.get("chapterId", "")
+ chapter_name = chapter.get("chapterName", "")
+
+ # 1. 如果章节自身有MP4链接,直接使用
+ if "chapterVideoVo" in chapter and chapter["chapterVideoVo"]:
+ chapter_video = chapter["chapterVideoVo"]
+ mp4_url = chapter_video.get("mp4", "") or chapter_video.get("mp4720p", "") or chapter_video.get("vodMp4Url", "")
+ if mp4_url and ".mp4" in mp4_url:
+ episodes.append(f"{chapter_name}${mp4_url}")
+ continue
+
+ # 2. 如果有MP4模板,尝试替换章节ID构建MP4链接
+ if mp4_template and first_mp4_chapter_id and chapter_id:
+ # 替换模板中的章节ID部分
+ if first_mp4_chapter_id in mp4_template:
+ new_mp4_url = mp4_template.replace(first_mp4_chapter_id, chapter_id)
+ episodes.append(f"{chapter_name}${new_mp4_url}")
+ continue
+
+ # 3. 如果上述方法都不可行,回退到使用chapter_id构建中间URL
+ if chapter_id and chapter_name:
+ url = f"{vod_id}${chapter_id}${chapter_name}"
+ episodes.append(f"{chapter_name}${url}")
+
+ if not episodes and vod_id:
+ # 尝试构造默认的集数
+ total_chapters = int(book_info.get("totalChapterNum", "0"))
+ if total_chapters > 0:
+ print(f"尝试构造 {total_chapters} 个默认集数")
+
+ # 如果知道章节ID的模式,可以构造
+ if chapter_id and episode_id:
+ for i in range(1, total_chapters + 1):
+ chapter_name = f"第{i}集"
+ url = f"{vod_id}${chapter_id}${chapter_name}"
+ episodes.append(f"{chapter_name}${url}")
+ else:
+ # 使用普通的构造方式
+ for i in range(1, total_chapters + 1):
+ chapter_name = f"第{i}集"
+ url = f"{vod_id}${chapter_name}"
+ episodes.append(f"{chapter_name}${url}")
+
+ if episodes:
+ play_url_list.append("#".join(episodes))
+ vod['vod_play_from'] = '河马剧场'
+ vod['vod_play_url'] = '$$$'.join(play_url_list)
+
+ result = {
+ 'list': [vod]
+ }
+ return result
+ except Exception as e:
+ print(f"解析详情页失败: {str(e)}")
+ print(traceback.format_exc())
+ return {}
+
+ def playerContent(self, flag, id, vipFlags):
+ result = {}
+ print(f"调用playerContent: flag={flag}, id={id}")
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
+ "Referer": self.siteUrl,
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
+ }
+
+ # 解析id参数
+ parts = id.split('$')
+ drama_id = None
+ chapter_id = None
+
+ if len(parts) >= 2:
+ drama_id = parts[0]
+ chapter_id = parts[1]
+ chapter_name = parts[2] if len(parts) > 2 else "第一集"
+ print(f"解析参数: drama_id={drama_id}, chapter_id={chapter_id}")
+ else:
+ # 处理旧数据格式
+ print(f"使用原始URL格式: {id}")
+ result["parse"] = 0
+ result["url"] = id
+ result["header"] = json.dumps(headers)
+ return result
+
+ # 直接检查chapter_id是否包含http(可能已经是视频链接)
+ if 'http' in chapter_id and '.mp4' in chapter_id:
+ print(f"已经是MP4链接: {chapter_id}")
+ result["parse"] = 0
+ result["url"] = chapter_id
+ result["header"] = json.dumps(headers)
+ return result
+
+ # 构建episode页面URL
+ drama_id_clean = drama_id.replace('/drama/', '')
+ episode_url = f"{self.siteUrl}/episode/{drama_id_clean}/{chapter_id}"
+ print(f"请求episode页面: {episode_url}")
+
+ try:
+ rsp = self.fetch(episode_url, headers=headers)
+ if not rsp or rsp.status_code != 200:
+ print(f"请求失败,状态码: {getattr(rsp, 'status_code', 'N/A')}")
+ result["parse"] = 0
+ result["url"] = id
+ result["header"] = json.dumps(headers)
+ return result
+
+ html = rsp.text
+ print(f"获取页面大小: {len(html)} 字节")
+
+ # 尝试从NEXT_DATA提取视频链接
+ mp4_url = None
+
+ # 方法1: 从NEXT_DATA提取
+ next_data_match = re.search(r'', html, re.DOTALL)
+ if next_data_match:
+ try:
+ print("找到NEXT_DATA")
+ next_data = json.loads(next_data_match.group(1))
+ page_props = next_data.get("props", {}).get("pageProps", {})
+
+ # 从chapterList中查找当前章节
+ chapter_list = page_props.get("chapterList", [])
+ print(f"找到章节列表,长度: {len(chapter_list)}")
+
+ for chapter in chapter_list:
+ if chapter.get("chapterId") == chapter_id:
+ print(f"找到匹配的章节: {chapter.get('chapterName')}")
+ chapter_video = chapter.get("chapterVideoVo", {})
+ mp4_url = chapter_video.get("mp4", "") or chapter_video.get("mp4720p", "") or chapter_video.get("vodMp4Url", "")
+ if mp4_url:
+ print(f"从chapterList找到MP4链接: {mp4_url}")
+ break
+
+ # 如果未找到,尝试从当前章节获取
+ if not mp4_url:
+ current_chapter = page_props.get("chapterInfo", {})
+ if current_chapter:
+ print("找到当前章节信息")
+ chapter_video = current_chapter.get("chapterVideoVo", {})
+ mp4_url = chapter_video.get("mp4", "") or chapter_video.get("mp4720p", "") or chapter_video.get("vodMp4Url", "")
+ if mp4_url:
+ print(f"从chapterInfo找到MP4链接: {mp4_url}")
+ except Exception as e:
+ print(f"解析NEXT_DATA失败: {str(e)}")
+ print(traceback.format_exc())
+
+ # 方法2: 直接从HTML中提取MP4链接
+ if not mp4_url:
+ mp4_pattern = r'(https?://[^"\']+\.mp4)'
+ mp4_matches = re.findall(mp4_pattern, html)
+ if mp4_matches:
+ # 查找含有chapter_id的链接
+ matched_mp4 = False
+ for url in mp4_matches:
+ if chapter_id in url:
+ mp4_url = url
+ matched_mp4 = True
+ print(f"从HTML直接提取章节MP4链接: {mp4_url}")
+ break
+
+ # 如果没找到包含chapter_id的链接,使用第一个
+ if not matched_mp4 and mp4_matches:
+ mp4_url = mp4_matches[0]
+ print(f"从HTML直接提取MP4链接: {mp4_url}")
+
+ if mp4_url and ".mp4" in mp4_url:
+ print(f"最终找到的MP4链接: {mp4_url}")
+ result["parse"] = 0
+ result["url"] = mp4_url
+ result["header"] = json.dumps(headers)
+ return result
+ else:
+ print(f"未找到有效的MP4链接,尝试再次解析页面内容")
+ # 再尝试一次从HTML中广泛搜索所有可能的MP4链接
+ all_mp4_pattern = r'(https?://[^"\']+\.mp4)'
+ all_mp4_matches = re.findall(all_mp4_pattern, html)
+ if all_mp4_matches:
+ mp4_url = all_mp4_matches[0]
+ print(f"从HTML广泛搜索找到MP4链接: {mp4_url}")
+ result["parse"] = 0
+ result["url"] = mp4_url
+ result["header"] = json.dumps(headers)
+ return result
+
+ print(f"未找到视频链接,返回原episode URL: {episode_url}")
+ result["parse"] = 0
+ result["url"] = episode_url
+ result["header"] = json.dumps(headers)
+ return result
+ except Exception as e:
+ print(f"请求或解析失败: {str(e)}")
+ print(traceback.format_exc())
+ result["parse"] = 0
+ result["url"] = id
+ result["header"] = json.dumps(headers)
+ return result
+
+ def localProxy(self, param):
+ # 本地代理处理,此处简单返回传入的参数
+ return [200, "video/MP2T", {}, param]
+
+ def destroy(self):
+ # 资源回收
+ pass
\ No newline at end of file
diff --git a/py/浴火社APP.py b/py/浴火社APP.py
new file mode 100644
index 0000000..4698b09
--- /dev/null
+++ b/py/浴火社APP.py
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+import threading
+import time
+from base64 import b64decode, b64encode
+import requests
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = self.getdid()
+ self.token=self.gettoken()
+ domain=self.domain()
+ self.phost=self.host_late(domain['domain_preview'])
+ self.bhost=domain['domain_original']
+ self.names=domain['name_original']
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://lulu-api-92mizw.jcdwn.com'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.11.0',
+ 'referer': 'https://app.nova-traffic-1688.com',
+ }
+
+ def homeContent(self, filter):
+ BASE_CATEGORIES = [
+ {'type_name': '片商', 'type_id': 'makers'},
+ {'type_name': '演员', 'type_id': 'actor'}
+ ]
+
+ SORT_OPTIONS = {
+ 'key': 'sortby',
+ 'name': 'sortby',
+ 'value': [
+ {'n': '最新', 'v': 'on_shelf_at'},
+ {'n': '最热', 'v': 'hot'}
+ ]
+ }
+
+ tags = self.getdata('/api/v1/video/tag?current=1&pageSize=100&level=1')
+ producers = self.getdata('/api/v1/video/producer?current=1&pageSize=100&status=1')
+ regions = self.getdata('/api/v1/video/region?current=1&pageSize=100')
+ result = {'class': [], 'filters': {}}
+ result['class'].extend(BASE_CATEGORIES)
+ for category in BASE_CATEGORIES:
+ result['filters'][category['type_id']] = [SORT_OPTIONS]
+ if tags.get('data'):
+ main_tag = tags['data'][0]
+ result['class'].append({
+ 'type_name': '发现',
+ 'type_id': f'{main_tag["id"]}_tag'
+ })
+ tag_values = [
+ {'n': tag['name'], 'v': f"{tag['id']}_tag"}
+ for tag in tags['data'][1:]
+ if tag.get('id')
+ ]
+ result['filters'][f'{main_tag["id"]}_tag'] = [
+ {'key': 'tagtype', 'name': 'tagtype', 'value': tag_values},
+ SORT_OPTIONS
+ ]
+
+ region_filter = {
+ 'key': 'region_ids',
+ 'name': 'region_ids',
+ 'value': [
+ {'n': region['name'], 'v': region['id']}
+ for region in regions['data'][1:]
+ if region.get('id')
+ ]
+ }
+ self.aid=regions['data'][0]['id']
+ result['filters']['actor'].append({
+ 'key': 'region_id',
+ 'name': 'region_id',
+ 'value': region_filter['value'][:2]
+ })
+ complex_sort = {
+ 'key': 'sortby',
+ 'name': 'sortby',
+ 'value': [
+ {'n': '综合', 'v': 'complex'},
+ *SORT_OPTIONS['value']
+ ]
+ }
+ producer_filters = [region_filter, complex_sort]
+ for producer in producers['data']:
+ result['class'].append({
+ 'type_name': producer['name'],
+ 'type_id': f'{producer["id"]}_sx'
+ })
+ result['filters'][f'{producer["id"]}_sx'] = producer_filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.getdata('/api/v1/video?current=1&pageSize=60®ion_ids=&sortby=complex')
+ return {'list':self.getlist(data)}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ if 'act' in tid:
+ data=self.getact(tid, pg, filter, extend)
+ elif 'tag' in tid:
+ data=self.gettag(tid, pg, filter, extend)
+ elif 'sx' in tid:
+ data=self.getsx(tid, pg, filter, extend)
+ elif 'make' in tid:
+ data=self.getmake(tid, pg, filter, extend)
+ result = {}
+ result['list'] = data
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ v=self.getdata(f'/api/v1/video?current=1&pageSize=1&id={ids[0]}&detail=1')
+ v=v['data'][0]
+ vod = {
+ 'vod_name': v.get('title'),
+ 'type_name': '/'.join(v.get('tag_names',[])),
+ 'vod_play_from': '浴火社',
+ 'vod_play_url': ''
+ }
+ p=[]
+ for i,j in enumerate(self.bhost):
+ p.append(f'{self.names[i]}${j}{v.get("highres_url") or v.get("preview_url")}@@@{v["id"]}')
+ vod['vod_play_url'] = '#'.join(p)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getdata(f'/api/v1/video?current={pg}&pageSize=30&title={key}')
+ return {'list':self.getlist(data),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ url=f'{self.getProxyUrl()}&url={self.e64(id)}&type=m3u8'
+ return {'parse': 0, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ if param.get('type')=='image':
+ data=self.fetch(param.get('url'), headers=self.headers).text
+ content=b64decode(data.encode('utf-8'))
+ return [200, 'image/png', content]
+ if param.get('type')=='m3u8':
+ ids=self.d64(param.get('url')).split('@@@')
+ data=self.fetch(ids[0], headers=self.headers).text
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if 'URI=' in string:
+ replacement = f'URI="{self.getProxyUrl()}&id={ids[1]}&type=mkey"'
+ lines[index]=re.sub(r'URI="[^"]+"', replacement, string)
+ continue
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = ids[0].rfind('/')
+ lpath = ids[0][:last_slash_index + 1]
+ lines[index] = f'{lpath}{string}'
+ data = '\n'.join(lines)
+ return [200, 'audio/x-mpegurl', data]
+ if param.get('type')=='mkey':
+ id=param.get('id')
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'authdog': self.token
+ }
+ response = self.fetch(f'{self.host}/api/v1/video/key/{id}', headers=headers)
+ type=response.headers.get('Content-Type')
+ return [200, type, response.content]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def getdid(self):
+ did = self.md5(str(int(time.time() * 1000)))
+ try:
+ if self.getCache('did'):
+ return self.getCache('did')
+ else:
+ self.setCache('did', did)
+ return did
+ except Exception as e:
+ self.setCache('did', did)
+ return did
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def domain(self):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ }
+ response = self.fetch(f'{self.host}/api/v1/system/domain', headers=headers)
+ return self.aes(response.content)
+
+ def aes(self, word):
+ key = b64decode("amtvaWc5ZnJ2Ym5taml1eQ==")
+ iv = b64decode("AAEFAwQFCQcICQoLDA0ODw==")
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted = unpad(cipher.decrypt(word), AES.block_size)
+ return json.loads(decrypted.decode('utf-8'))
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def gettoken(self):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'cookei': self.md5(f'{self.did}+android'),
+ 'siteid': '11',
+ 'siteauthority': 'lls888.tv'
+ }
+
+ json_data = {
+ 'app_id': 'jukjoe.zqgpi.hfzvde.sdot',
+ 'phone_device': 'Redmi M2012K10C',
+ 'device_id': self.did,
+ 'device_type': 'android',
+ 'invite_code': 'oi1o',
+ 'is_first': 1,
+ 'os_version': '11',
+ 'version': '8.59',
+ }
+ response = self.post(f'{self.host}/api/v1/member/device', headers=headers, json=json_data)
+ tdata = self.aes(response.content)
+ return f'{tdata["token_type"]} {tdata["access_token"]}'
+
+ def getdata(self, path):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'authdog': self.token
+ }
+ response = self.fetch(f'{self.host}{path}', headers=headers)
+ return self.aes(response.content)
+
+ def getimg(self, path):
+ if not path.startswith('/'):
+ path = f'/{path}'
+ return f'{self.getProxyUrl()}&url={self.phost}{path}&type=image'
+
+ def getlist(self,data):
+ videos = []
+ for i in data['data']:
+ videos.append({
+ 'vod_id': i['id'],
+ 'vod_name': i['title'],
+ 'vod_pic': self.getimg(i.get('coverphoto_h' or i.get('coverphoto_v'))),
+ 'style': {"type": "rect", "ratio": 1.33}})
+ return videos
+
+ def geticon(self, data, st='',style=None):
+ if style is None:style = {"type": "oval"}
+ videos = []
+ for i in data['data']:
+ videos.append({
+ 'vod_id': f'{i["id"]}{st}',
+ 'vod_name': i['name'],
+ 'vod_pic': self.getimg(i.get('icon_path')),
+ 'vod_tag': 'folder',
+ 'style': style})
+ return videos
+
+ def getact(self, tid, pg, filter, extend):
+ if tid == 'actor' and pg=='1':
+ data = self.getdata(f'/api/v1/video/actor?current=1&pageSize=999®ion_id={extend.get("region_id",self.aid)}&discover_page={pg}')
+ return self.geticon(data, '_act')
+ elif '_act' in tid:
+ data = self.getdata(f'/api/v1/video?current={pg}&pageSize=50&actor_ids={tid.split("_")[0]}&sortby={extend.get("sortby","on_shelf_at")}')
+ return self.getlist(data)
+
+ def gettag(self, tid, pg, filter, extend):
+ if '_tag' in tid:
+ tid=extend.get('tagtype',tid)
+ data=self.getdata(f'/api/v1/video/tag?current={pg}&pageSize=100&level=2&parent_id={tid.split("_")[0]}')
+ return self.geticon(data, '_stag',{"type": "rect", "ratio": 1.33})
+ elif '_stag' in tid:
+ data = self.getdata(f'/api/v1/video?current={pg}&pageSize=50&tag_ids={tid.split("_")[0]}&sortby={extend.get("sortby","on_shelf_at")}')
+ return self.getlist(data)
+
+ def getsx(self, tid, pg, filter, extend):
+ data=self.getdata(f'/api/v1/video?current={pg}&pageSize=20&producer_ids={tid.split("_")[0]}®ion_ids={extend.get("region_ids","")}&sortby={extend.get("sortby","complex")}')
+ return self.getlist(data)
+
+ def getmake(self, tid, pg, filter, extend):
+ if pg=='1':
+ data=self.getdata('/api/v1/video/producer?current=1&pageSize=100&status=1')
+ return self.geticon(data, '_sx',{"type": "rect", "ratio": 1.33})
+
diff --git a/py/海马影视.py b/py/海马影视.py
new file mode 100644
index 0000000..801ffaf
--- /dev/null
+++ b/py/海马影视.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import string
+import sys
+from base64 import b64decode, b64encode
+from urllib.parse import quote, unquote
+sys.path.append('..')
+import concurrent.futures
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://w.dcmovie.top'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.9.1',
+ 'mark-time': 'null',
+ 'fn-api-version': '1.3.2',
+ 'versionCode': '5',
+ 'product': 'gysg',
+ }
+
+ def homeContent(self, filter):
+ data=self.fetch(f"{self.host}/api.php/vod/type", headers=self.headers).json()
+ result,filters,videos = {},{},[]
+ classes = [{'type_id': i['type_name'], 'type_name': i['type_name']} for i in data['list'][1:]]
+ body={'token':'', 'type_id':data['list'][0]['type_id']}
+ ldata=self.post(f"{self.host}/api.php/vod/category", data=body, headers=self.headers).json()
+ for i in ldata['data']['banner']:
+ videos.append({
+ 'vod_id':i.get('vod_id'),
+ 'vod_name':i.get('vod_name'),
+ 'vod_pic':i.get('vod_pic_thumb')
+ })
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ future_to_aid = {executor.submit(self.fts, aid): aid for aid in classes}
+ for future in concurrent.futures.as_completed(future_to_aid):
+ aid = future_to_aid[future]
+ try:
+ aid_id, fts = future.result()
+ filters[aid_id] = fts
+ except Exception as e:
+ print(f"Error processing aid {aid}: {e}")
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = videos
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params={'state':extend.get('state',tid) or tid,'class':extend.get('classes','全部'),'area':extend.get('area','全部'),'year':extend.get('year','全部'),'lang':extend.get('lang','全部'),'version':extend.get('version','全部'),'pg':pg}
+ data=self.fetch(f"{self.host}/api.php/vod/list", params=params, headers=self.headers).json()
+ result = {}
+ videos = []
+ for i in data['data']['list']:
+ if str(i.get('vod_id', 0)) != '0':
+ videos.append({
+ 'vod_id': i.get('vod_id'),
+ 'vod_name': i.get('vod_name'),
+ 'vod_pic': i.get('vod_pic'),
+ 'vod_year': f"{i.get('vod_score')}分",
+ 'vod_remarks': i.get('vod_remarks')
+ })
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body={'ids':ids[0],'uni_code':self.getunc(),'ac':'detail','token':''}
+ data=self.post(f"{self.host}/api.php/vod/detail2", data=body, headers=self.headers).json()
+ v=data['data']
+ vod = {
+ 'type_name': v.get('type_name'),
+ 'vod_year': v.get('vod_year'),
+ 'vod_area': v.get('vod_area'),
+ 'vod_lang': v.get('vod_lang'),
+ 'vod_remarks': v.get('vod_remarks'),
+ 'vod_actor': v.get('vod_actor'),
+ 'vod_director': v.get('vod_director'),
+ 'vod_content': v.get('vod_content')
+ }
+ n,p=[],[]
+ for i in v['vod_play_list']:
+ pp=i['player_info']
+ n.append(pp['show'])
+ np=[]
+ for j in i['urls']:
+ cd={'parse':pp.get('parse'),'url':j['url'],'headers':pp.get('headers')}
+ np.append(f"{j['name']}${self.e64(json.dumps(cd))}")
+ p.append('#'.join(np))
+ vod.update({'vod_play_from':'$$$'.join(n),'vod_play_url':'$$$'.join(p)})
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f"{self.host}/api.php/vod/search", params={'keywords':key,'type':'1','pg':pg}, headers=self.headers).json()
+ return {'list':data['list'],'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids=json.loads(self.d64(id))
+ headers = {}
+ urls=ids['url']
+ if ids.get('headers'):
+ hs=ids['headers'].split('=>',1)
+ headers[hs[0].strip()]=hs[-1].strip()
+ if isinstance(ids.get('parse'), list) and len(ids['parse']) > 0:
+ urls=[]
+ for i,x in enumerate(ids['parse']):
+ su=f"{self.getProxyUrl()}&url={quote(x+ids['url'])}"
+ urls.extend([f'解析{i+1}',su])
+ return {'parse': 0, 'url': urls, 'header': headers}
+
+ def localProxy(self, param):
+ try:
+ body = {'url':unquote(param['url'])}
+ data=self.post(f"{self.host}/api.php/vod/m_jie_xi", data=body, headers=self.headers).json()
+ url=data.get('url') or data['data'].get('url')
+ return [302,'video/MP2T',None,{'Location':url}]
+ except:
+ return []
+
+ def liveContent(self, url):
+ pass
+
+ def fts(self, tdata):
+ params={'state':tdata['type_id'],'pg':'1'}
+ data = self.fetch(f"{self.host}/api.php/vod/list", params=params, headers=self.headers).json()
+ ftks = ["classes", "area", "lang", "year", "version", "state"]
+ filter = [
+ {
+ 'name': k,
+ 'key': k,
+ 'value': [{'n': i, 'v': i} for i in v.split(',')]
+ }
+ for k, v in data['data']['classes']["type_extend"].items()
+ if k in ftks and v
+ ]
+ return tdata['type_id'],filter
+
+ def getunc(self):
+ chars = string.ascii_lowercase + string.digits
+ data = ''.join(random.choice(chars) for _ in range(16))
+ return self.e64(data)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
diff --git a/py/火车太顺APP.py b/py/火车太顺APP.py
new file mode 100644
index 0000000..6a80179
--- /dev/null
+++ b/py/火车太顺APP.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+from urllib.parse import urlparse
+sys.path.append("..")
+import re
+import hashlib
+import hmac
+import random
+import string
+from Crypto.Util.Padding import unpad
+from concurrent.futures import ThreadPoolExecutor
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import PKCS1_v1_5, AES
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.device = self.device_id()
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ filters = {}
+ classes = []
+ bba = self.url()
+ data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
+ data1 = self.aes(data)
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ data1['data']['movie_screen']['sort'].pop(0)
+ for item in data1['data']['movie_screen']['sort']:
+ item['n'] = item.pop('name')
+ item['v'] = item.pop('value')
+ for item in data1['data']['movie_screen']['filter']:
+ has_non_empty_field = False
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in item and item[key]:
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ filters[str(item["id"])].append(
+ {"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
+ for dkey in item:
+ if dkey in dy and item[dkey]:
+ item[dkey].pop(0)
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in item[dkey]
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ bba = self.url()
+ url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()
+ videos = []
+ for item in data['data']:
+ if len(item['list']) > 0:
+ for it in item['list']:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
+ "area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
+ "pageSize": "21"}
+ result = {}
+ list = []
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data']['list']
+ for item in data:
+ list.append(self.voides(item))
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = {"id": ids[0]}
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ video = {'vod_name': data.get('name'), 'type_name': data.get('type_name'), 'vod_year': data.get('year'),
+ 'vod_area': data.get('area'), 'vod_remarks': data.get('dynami'), 'vod_content': data.get('content')}
+ play = []
+ names = []
+ tasks = []
+ for itt in data["play_from"]:
+ name = itt["name"]
+ a = []
+ if len(itt["list"]) > 0:
+ names.append(name)
+ play.append(self.playeach(itt['list']))
+ else:
+ tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
+ names.append(name)
+ if tasks:
+ with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
+ results = executor.map(self.playlist, tasks)
+ for result in results:
+ if result:
+ play.append(result)
+ else:
+ play.append("")
+ video["vod_play_from"] = "$$$".join(names)
+ video["vod_play_url"] = "$$$".join(play)
+ result = {"list": [video]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
+ "res_type": "by_movie_name"}
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data'].get('list')
+ videos = []
+ for it in data:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if not re.search(r"\.m3u8|\.mp4", url):
+ try:
+ data = json.loads(b64decode(id.encode('utf-8')).decode('utf-8'))
+ bba = self.url(data)
+ data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
+ headers=self.header()).json()['data']
+ url = data2.get('play_url') or data2.get('download_url')
+ except Exception as e:
+ pass
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'user-agent': 'okhttp/4.9.2'}
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def Mproxy(self, url):
+ return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def device_id(self):
+ characters = string.ascii_lowercase + string.digits
+ random_string = ''.join(random.choices(characters, k=32))
+ return random_string
+
+ def gethost(self):
+ try:
+ url = 'https://dns.alidns.com/dns-query'
+ headers = {
+ 'User-Agent': 'okhttp/4.9.2',
+ 'Accept': 'application/dns-message'
+ }
+ params = {
+ 'dns': 'AAABAAABAAAAAAAACWJmbTExYXM5ZgdmdXFpeXVuAmNuAAAcAAE'
+ }
+ response = self.fetch(url, headers=headers, params=params)
+ host=self.parse_dns_name(response.content, 12)
+ return f"https://{host}"
+ except:
+ return "https://bfm11as9f.fuqiyun.cn"
+
+ def parse_dns_name(self, data, offset):
+ parts = []
+ while True:
+ length = data[offset]
+ if length == 0:
+ break
+ offset += 1
+ parts.append(data[offset:offset + length].decode('utf-8'))
+ offset += length
+ return '.'.join(parts)
+
+ def header(self):
+ headers = {
+ 'User-Agent': 'Android',
+ 'Accept': 'application/prs.55App.v2+json',
+ 'timestamp': str(int(time.time())),
+ 'x-client-setting': '{"pure-mode":0}',
+ 'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
+ 'x-client-version': '3096 '
+ }
+ return headers
+
+ def url(self, id=None):
+ if not id:
+ id = {}
+ id["timestamp"] = str(int(time.time()))
+ public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
+ encrypted_text = json.dumps(id)
+ public_key = RSA.import_key(b64decode(public_key))
+ cipher = PKCS1_v1_5.new(public_key)
+ encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
+ encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
+ result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
+ key = '635a580fcb5dc6e60caa39c31a7bde48'
+ sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
+ return result, sign
+
+ def playlist(self, body):
+ try:
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ return self.playeach(data)
+ except Exception:
+ return []
+
+ def playeach(self, data):
+ play_urls = []
+ for it in data:
+ if re.search(r"mp4|m3u8", it["play_url"]):
+ play_urls.append(f"{it['episode_name']}${it['play_url']}")
+ else:
+ vd={"from_code": it['from_code'], "play_url": it['play_url'], "episode_id": it['episode_id'], "type": "play"}
+ play_urls.append(
+ f"{it['episode_name']}${b64encode(json.dumps(vd).encode('utf-8')).decode('utf-8')}"
+ )
+ return '#'.join(play_urls)
+
+ def voides(self, item):
+ if item['name'] or item['title']:
+ voide = {
+ "vod_id": item.get('id') or item.get('click'),
+ 'vod_name': item.get('name') or item.get('title'),
+ 'vod_pic': item.get('cover') or item.get('image'),
+ 'vod_year': item.get('year') or item.get('label'),
+ 'vod_remarks': item.get('dynamic') or item.get('sub_title')
+ }
+ return voide
+
+ def aes(self, text):
+ text = text.replace('-', '+').replace('_', '/') + '=='
+ key = b"e6d5de5fcc51f53d"
+ iv = b"2f13eef7dfc6c613"
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
+ return json.loads(pt)
diff --git a/py/火车影视.py b/py/火车影视.py
new file mode 100644
index 0000000..6a80179
--- /dev/null
+++ b/py/火车影视.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+from urllib.parse import urlparse
+sys.path.append("..")
+import re
+import hashlib
+import hmac
+import random
+import string
+from Crypto.Util.Padding import unpad
+from concurrent.futures import ThreadPoolExecutor
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import PKCS1_v1_5, AES
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.device = self.device_id()
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ filters = {}
+ classes = []
+ bba = self.url()
+ data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
+ data1 = self.aes(data)
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ data1['data']['movie_screen']['sort'].pop(0)
+ for item in data1['data']['movie_screen']['sort']:
+ item['n'] = item.pop('name')
+ item['v'] = item.pop('value')
+ for item in data1['data']['movie_screen']['filter']:
+ has_non_empty_field = False
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in item and item[key]:
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ filters[str(item["id"])].append(
+ {"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
+ for dkey in item:
+ if dkey in dy and item[dkey]:
+ item[dkey].pop(0)
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in item[dkey]
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ bba = self.url()
+ url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()
+ videos = []
+ for item in data['data']:
+ if len(item['list']) > 0:
+ for it in item['list']:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
+ "area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
+ "pageSize": "21"}
+ result = {}
+ list = []
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data']['list']
+ for item in data:
+ list.append(self.voides(item))
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = {"id": ids[0]}
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ video = {'vod_name': data.get('name'), 'type_name': data.get('type_name'), 'vod_year': data.get('year'),
+ 'vod_area': data.get('area'), 'vod_remarks': data.get('dynami'), 'vod_content': data.get('content')}
+ play = []
+ names = []
+ tasks = []
+ for itt in data["play_from"]:
+ name = itt["name"]
+ a = []
+ if len(itt["list"]) > 0:
+ names.append(name)
+ play.append(self.playeach(itt['list']))
+ else:
+ tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
+ names.append(name)
+ if tasks:
+ with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
+ results = executor.map(self.playlist, tasks)
+ for result in results:
+ if result:
+ play.append(result)
+ else:
+ play.append("")
+ video["vod_play_from"] = "$$$".join(names)
+ video["vod_play_url"] = "$$$".join(play)
+ result = {"list": [video]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
+ "res_type": "by_movie_name"}
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data'].get('list')
+ videos = []
+ for it in data:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if not re.search(r"\.m3u8|\.mp4", url):
+ try:
+ data = json.loads(b64decode(id.encode('utf-8')).decode('utf-8'))
+ bba = self.url(data)
+ data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
+ headers=self.header()).json()['data']
+ url = data2.get('play_url') or data2.get('download_url')
+ except Exception as e:
+ pass
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'user-agent': 'okhttp/4.9.2'}
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def Mproxy(self, url):
+ return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def device_id(self):
+ characters = string.ascii_lowercase + string.digits
+ random_string = ''.join(random.choices(characters, k=32))
+ return random_string
+
+ def gethost(self):
+ try:
+ url = 'https://dns.alidns.com/dns-query'
+ headers = {
+ 'User-Agent': 'okhttp/4.9.2',
+ 'Accept': 'application/dns-message'
+ }
+ params = {
+ 'dns': 'AAABAAABAAAAAAAACWJmbTExYXM5ZgdmdXFpeXVuAmNuAAAcAAE'
+ }
+ response = self.fetch(url, headers=headers, params=params)
+ host=self.parse_dns_name(response.content, 12)
+ return f"https://{host}"
+ except:
+ return "https://bfm11as9f.fuqiyun.cn"
+
+ def parse_dns_name(self, data, offset):
+ parts = []
+ while True:
+ length = data[offset]
+ if length == 0:
+ break
+ offset += 1
+ parts.append(data[offset:offset + length].decode('utf-8'))
+ offset += length
+ return '.'.join(parts)
+
+ def header(self):
+ headers = {
+ 'User-Agent': 'Android',
+ 'Accept': 'application/prs.55App.v2+json',
+ 'timestamp': str(int(time.time())),
+ 'x-client-setting': '{"pure-mode":0}',
+ 'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
+ 'x-client-version': '3096 '
+ }
+ return headers
+
+ def url(self, id=None):
+ if not id:
+ id = {}
+ id["timestamp"] = str(int(time.time()))
+ public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
+ encrypted_text = json.dumps(id)
+ public_key = RSA.import_key(b64decode(public_key))
+ cipher = PKCS1_v1_5.new(public_key)
+ encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
+ encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
+ result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
+ key = '635a580fcb5dc6e60caa39c31a7bde48'
+ sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
+ return result, sign
+
+ def playlist(self, body):
+ try:
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ return self.playeach(data)
+ except Exception:
+ return []
+
+ def playeach(self, data):
+ play_urls = []
+ for it in data:
+ if re.search(r"mp4|m3u8", it["play_url"]):
+ play_urls.append(f"{it['episode_name']}${it['play_url']}")
+ else:
+ vd={"from_code": it['from_code'], "play_url": it['play_url'], "episode_id": it['episode_id'], "type": "play"}
+ play_urls.append(
+ f"{it['episode_name']}${b64encode(json.dumps(vd).encode('utf-8')).decode('utf-8')}"
+ )
+ return '#'.join(play_urls)
+
+ def voides(self, item):
+ if item['name'] or item['title']:
+ voide = {
+ "vod_id": item.get('id') or item.get('click'),
+ 'vod_name': item.get('name') or item.get('title'),
+ 'vod_pic': item.get('cover') or item.get('image'),
+ 'vod_year': item.get('year') or item.get('label'),
+ 'vod_remarks': item.get('dynamic') or item.get('sub_title')
+ }
+ return voide
+
+ def aes(self, text):
+ text = text.replace('-', '+').replace('_', '/') + '=='
+ key = b"e6d5de5fcc51f53d"
+ iv = b"2f13eef7dfc6c613"
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
+ return json.loads(pt)
diff --git a/py/爱奇艺.py b/py/爱奇艺.py
new file mode 100644
index 0000000..2b0af3d
--- /dev/null
+++ b/py/爱奇艺.py
@@ -0,0 +1,248 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import random
+import sys
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from urllib.parse import urlencode
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = self.random_str(32)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost = 'https://www.iqiyi.com'
+
+ hhost='https://mesh.if.iqiyi.com'
+
+ dhost='https://miniapp.iqiyi.com'
+
+ headers = {
+ 'Origin': rhost,
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
+ 'Referer': f'{rhost}/',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "全部": "1009",
+ "电影": "1",
+ "剧集": "2",
+ "综艺": "6",
+ "动漫": "4",
+ "儿童": "15",
+ "微剧": "35",
+ "纪录片": "3"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.hhost}/portal/lw/v5/channel/recommend?v=13.014.21150', headers=self.headers).json()
+ vlist = []
+ for i in data['items'][1:]:
+ for j in i['video'][0]['data']:
+ id = j.get('firstId')
+ pic=j.get('prevue',{}).get('image_url') or j.get('album_image_url_hover')
+ if id and pic:
+ pu=j.get('prevue',{}).get('page_url') or j.get('page_url').split('?')[0]
+ id = f'{id}@{self.e64(pu)}'
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': j.get('display_name'),
+ 'vod_pic': pic,
+ 'vod_year': j.get('sns_score'),
+ 'vod_remarks': j.get('dq_updatestatus') or j.get('rank_prefix')
+ })
+ return {'list':vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ if pg == "1":
+ self.sid = ''
+ new_data = {'mode':'24'}
+ for key, value in extend.items():
+ if value:
+ key_value_pairs = self.d64(value).split(',')
+ for pair in key_value_pairs:
+ k, v = pair.split('=')
+ if k in new_data:
+ new_data[k] += "," + v
+ else:
+ new_data[k] = v
+ path=f'/portal/lw/videolib/data?uid=&passport_id=&ret_num=60&version=13.014.21150&device_id={self.did}&channel_id={tid}&page_id={pg}&session={self.sid}&os=&conduit_id=&vip=0&auth&recent_selected_tag=&ad=%5B%7B%22lm%22:%225%22,%22ai%22:%225%22,%22fp%22:%226%22,%22sei%22:%22Sa867aa9d326e2bd8654d8c2a8636055e%22,%22position%22:%22library%22%7D%5D&adExt=%7B%22r%22:%221.2.1-ares6-pure%22%7D&dfp=a12f96215b2f7842a98c082799ca0c3d9236be00946701b106829754d8ece3aaf8&filter={urlencode(new_data)}'
+ data=self.fetch(f'{self.hhost}{path}', headers=self.headers).json()
+ self.sid = data['session']
+ videos = []
+ for i in data['data']:
+ id = i.get('firstId') or i.get('tv_id')
+ if not id:
+ id=i.get('play_url').split(';')[0].split('=')[-1]
+ if id and not i.get('h'):
+ id=f'{id}@{self.e64(i.get("page_url"))}'
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': i.get('display_name'),
+ 'vod_pic': i.get('album_image_url_hover'),
+ 'vod_year': i.get('sns_score'),
+ 'vod_remarks': i.get('dq_updatestatus') or i.get('pay_mark')
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@')
+ ids[-1] = self.d64(ids[-1])
+ data = self.fetch(f'{self.dhost}/h5/mina/baidu/play/body/v1/{ids[0]}/', headers=self.headers).json()
+ v=data['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('albumName'),
+ 'type_name': v.get('tags'),
+ 'vod_year': v.get('albumYear'),
+ 'vod_remarks': v.get('updateStrategy'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('albumDesc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': ''
+ }
+ if data.get('data') and data['data'].get('videoList') and data['data']['videoList'].get('videos'):
+ purl=[f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ pg=data['data']['videoList'].get('totalPages')
+ if pg and pg > 1:
+ id = v['albumId']
+ pages = list(range(2, pg + 1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, id): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ purl.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(purl)
+ else:
+ vdata=self.fetch(f'{self.dhost}/h5/mina/baidu/play/head/v1/{ids[0]}/', headers=self.headers).json()
+ v=vdata['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('shortTitle'),
+ 'type_name': v.get('channelName'),
+ 'vod_year': v.get('year'),
+ 'vod_remarks': v.get('focus'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': f'{v.get("shortTitle")}${ids[-1]}'
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.hhost}/portal/lw/search/homePageV3?key={key}¤t_page={pg}&mode=1&source=input&suggest=&version=13.014.21150&pageNum={pg}&pageSize=25&pu=&u={self.did}&scale=150&token=&userVip=0&conduit=&vipType=-1&os=&osShortName=win10&dataType=&appMode=', headers=self.headers).json()
+ videos = []
+ vdata=data['data']['templates']
+ for i in data['data']['templates']:
+ if i.get('intentAlbumInfos'):
+ vdata=[{'albumInfo': c} for c in i['intentAlbumInfos']]+vdata
+
+ for i in vdata:
+ if i.get('albumInfo') and (i['albumInfo'].get('playQipuId','') or i['albumInfo'].get('qipuId')) and i['albumInfo'].get('pageUrl'):
+ b=i['albumInfo']
+ id=f"{(b.get('playQipuId','') or b.get('qipuId'))}@{self.e64(b.get('pageUrl'))}"
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': b.get('title'),
+ 'vod_pic': b.get('img'),
+ 'vod_year': (b.get('year',{}) or {}).get('value'),
+ 'vod_remarks': b.get('subscriptContent') or b.get('channel') or b.get('vipTips')
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def fetch_page_data(self, page, id):
+ try:
+ url = f'{self.dhost}/h5/mina/avlist/{page}/{id}/'
+ data = self.fetch(url, headers=self.headers).json()
+ return [f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ except:
+ return []
+
+ def getf(self,body):
+ data=self.fetch(f'{self.hhost}/portal/lw/videolib/tag?channel_id={body["type_id"]}&tagAdd=&selected_tag_name=&version=13.014.21150&device={self.did}&uid=', headers=self.headers).json()
+ ft = []
+ # for i in data[:-1]:
+ for i in data:
+ try:
+ value_array = [{"n": value['text'], "v": self.e64(value['tag_param'])} for value in i['tags'] if
+ value.get('tag_param')]
+ ft.append({"key": i['group'], "name": i['group'], "value": value_array})
+ except:
+ print(i)
+ return (body['type_id'], ft)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text: str):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def random_str(self,length=16):
+ hex_chars = '0123456789abcdef'
+ return ''.join(random.choice(hex_chars) for _ in range(length))
diff --git a/py/爱瓜TVAPP.py b/py/爱瓜TVAPP.py
new file mode 100644
index 0000000..5432413
--- /dev/null
+++ b/py/爱瓜TVAPP.py
@@ -0,0 +1,165 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+# 温馨提示:搜索只能搜拼音联想
+import sys
+import time
+import uuid
+from Crypto.Hash import MD5
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.uid = self.getuid()
+ self.token, self.code = self.getuserinfo()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://tvapi211.magicetech.com'
+
+ headers = {'User-Agent': 'okhttp/3.11.0'}
+
+ def homeContent(self, filter):
+ body = {'token': self.token, 'authcode': self.code}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-header', json=self.getbody(body),
+ headers=self.headers).json()
+ result = {}
+ classes = []
+ filters = {}
+ for k in data['data']:
+ classes.append({
+ 'type_name': k['channel_name'],
+ 'type_id': str(k['channel_id']),
+ })
+ filters[str(k['channel_id'])] = []
+ for i in k['search_box']:
+ if len(i['list']):
+ filters[str(k['channel_id'])].append({
+ 'key': i['field'],
+ 'name': i['label'],
+ 'value': [{'n': j['display'], 'v': str(j['value'])} for j in i['list'] if j['value']]
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ body = {'token': self.token, 'authcode': self.code}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/index-tv', json=self.getbody(body),
+ headers=self.headers).json()
+ return {'list': self.getlist(data['data'][0]['banner'])}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {'token': self.token, 'authcode': self.code, 'channel_id': tid, 'area': extend.get('area', '0'),
+ 'year': extend.get('year', '0'), 'sort': extend.get('sort', '0'), 'tag': extend.get('tag', 'hot'),
+ 'status': extend.get('status', '0'), 'page_num': pg, 'page_size': '24'}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-video', json=self.getbody(body),
+ headers=self.headers).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@')
+ body = {'token': self.token, 'authcode': self.code, 'channel_id': ids[0], 'video_id': ids[1]}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/detail', json=self.getbody(body),
+ headers=self.headers).json()
+ vdata = {}
+ for k in data['data']['chapters']:
+ i = k['sourcelist']
+ for j in i:
+ if j['source_name'] not in vdata: vdata[j['source_name']] = []
+ vdata[j['source_name']].append(f"{k['title']}${j['source_url']}")
+ plist, names = [], []
+ for key, value in vdata.items():
+ names.append(key)
+ plist.append('#'.join(value))
+ vod = {
+ 'vod_play_from': '$$$'.join(names),
+ 'vod_play_url': '$$$'.join(plist),
+ }
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ body = {'token': self.token, 'authcode': self.code, 'keyword': key, 'page_num': pg}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/search/letter-result', json=self.getbody(body),
+ headers=self.headers).json()
+ return {'list': self.getlist(data['data']['list'])}
+
+ def playerContent(self, flag, id, vipFlags):
+ # https://rysp.tv
+ # https://aigua.tv
+ result = {
+ "parse": 0,
+ "url": "id",
+ "header": {
+ "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)",
+ "Origin": "https://aigua.tv",
+ "Referer": "https://aigua.tv/"
+ }
+ }
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getuserinfo(self):
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/user/auth-login', json=self.getbody(),
+ headers=self.headers).json()
+ v = data['data']
+ return v['user_token'], v['authcode']
+
+ def getuid(self):
+ uid = self.getCache('uid')
+ if not uid:
+ uid = str(uuid.uuid4())
+ self.setCache('uid', uid)
+ return uid
+
+ def getbody(self, json_data=None):
+ if json_data is None: json_data = {}
+ params = {"product": "4", "ver": "1.1.0", "debug": "1", "appId": "1", "osType": "3", "marketChannel": "tv",
+ "sysVer": "11", "time": str(int(time.time())), "packageName": "com.gzsptv.gztvvideo",
+ "udid": self.uid, }
+ json_data.update(params)
+ sorted_json = dict(sorted(json_data.items(), key=lambda item: item[0]))
+ text = '&'.join(f"{k}={v}" for k, v in sorted_json.items() if v != '')
+ md5_hash = self.md5(f"jI7POOBbmiUZ0lmi{text}D9ShYdN51ksWptpkTu11yenAJu7Zu3cR").upper()
+ json_data.update({'sign': md5_hash})
+ return json_data
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def getlist(self, data):
+ videos = []
+ for i in data:
+ if type(i.get('video')) == dict: i = i['video']
+ videos.append({
+ 'vod_id': f"{i.get('channel_id')}@{i.get('video_id')}",
+ 'vod_name': i.get('video_name'),
+ 'vod_pic': i.get('cover'),
+ 'vod_year': i.get('score'),
+ 'vod_remarks': i.get('flag'),
+ })
+ return videos
+
diff --git a/py/爱瓜影视.py b/py/爱瓜影视.py
new file mode 100644
index 0000000..e3b1e51
--- /dev/null
+++ b/py/爱瓜影视.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+# 温馨提示:搜索只能搜拼音联想
+# 播放需要挂代理
+import sys
+import time
+import uuid
+from Crypto.Hash import MD5
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.uid = self.getuid()
+ self.token, self.code = self.getuserinfo()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://tvapi211.magicetech.com'
+
+ headers = {'User-Agent': 'okhttp/3.11.0'}
+
+ def homeContent(self, filter):
+ body = {'token': self.token, 'authcode': self.code}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-header', json=self.getbody(body),
+ headers=self.headers).json()
+ result = {}
+ classes = []
+ filters = {}
+ for k in data['data']:
+ classes.append({
+ 'type_name': k['channel_name'],
+ 'type_id': str(k['channel_id']),
+ })
+ filters[str(k['channel_id'])] = []
+ for i in k['search_box']:
+ if len(i['list']):
+ filters[str(k['channel_id'])].append({
+ 'key': i['field'],
+ 'name': i['label'],
+ 'value': [{'n': j['display'], 'v': str(j['value'])} for j in i['list'] if j['value']]
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ body = {'token': self.token, 'authcode': self.code}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/index-tv', json=self.getbody(body),
+ headers=self.headers).json()
+ return {'list': self.getlist(data['data'][0]['banner'])}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {'token': self.token, 'authcode': self.code, 'channel_id': tid, 'area': extend.get('area', '0'),
+ 'year': extend.get('year', '0'), 'sort': extend.get('sort', '0'), 'tag': extend.get('tag', 'hot'),
+ 'status': extend.get('status', '0'), 'page_num': pg, 'page_size': '24'}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-video', json=self.getbody(body),
+ headers=self.headers).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@')
+ body = {'token': self.token, 'authcode': self.code, 'channel_id': ids[0], 'video_id': ids[1]}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/detail', json=self.getbody(body),
+ headers=self.headers).json()
+ vdata = {}
+ for k in data['data']['chapters']:
+ i = k['sourcelist']
+ for j in i:
+ if j['source_name'] not in vdata: vdata[j['source_name']] = []
+ vdata[j['source_name']].append(f"{k['title']}${j['source_url']}")
+ plist, names = [], []
+ for key, value in vdata.items():
+ names.append(key)
+ plist.append('#'.join(value))
+ vod = {
+ 'vod_play_from': '$$$'.join(names),
+ 'vod_play_url': '$$$'.join(plist),
+ }
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ body = {'token': self.token, 'authcode': self.code, 'keyword': key, 'page_num': pg}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/search/letter-result', json=self.getbody(body),
+ headers=self.headers).json()
+ return {'list': self.getlist(data['data']['list'])}
+
+ def playerContent(self, flag, id, vipFlags):
+ # https://rysp.tv
+ # https://aigua.tv
+ result = {
+ "parse": 0,
+ "url": id,
+ "header": {
+ "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)",
+ "Origin": "https://aigua.tv",
+ "Referer": "https://aigua.tv/"
+ }
+ }
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getuserinfo(self):
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/user/auth-login', json=self.getbody(),
+ headers=self.headers).json()
+ v = data['data']
+ return v['user_token'], v['authcode']
+
+ def getuid(self):
+ uid = self.getCache('uid')
+ if not uid:
+ uid = str(uuid.uuid4())
+ self.setCache('uid', uid)
+ return uid
+
+ def getbody(self, json_data=None):
+ if json_data is None: json_data = {}
+ params = {"product": "4", "ver": "1.1.0", "debug": "1", "appId": "1", "osType": "3", "marketChannel": "tv",
+ "sysVer": "11", "time": str(int(time.time())), "packageName": "com.gzsptv.gztvvideo",
+ "udid": self.uid, }
+ json_data.update(params)
+ sorted_json = dict(sorted(json_data.items(), key=lambda item: item[0]))
+ text = '&'.join(f"{k}={v}" for k, v in sorted_json.items() if v != '')
+ md5_hash = self.md5(f"jI7POOBbmiUZ0lmi{text}D9ShYdN51ksWptpkTu11yenAJu7Zu3cR").upper()
+ json_data.update({'sign': md5_hash})
+ return json_data
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def getlist(self, data):
+ videos = []
+ for i in data:
+ if type(i.get('video')) == dict: i = i['video']
+ videos.append({
+ 'vod_id': f"{i.get('channel_id')}@{i.get('video_id')}",
+ 'vod_name': i.get('video_name'),
+ 'vod_pic': i.get('cover'),
+ 'vod_year': i.get('score'),
+ 'vod_remarks': i.get('flag'),
+ })
+ return videos
+
diff --git a/py/爱看短剧.py b/py/爱看短剧.py
new file mode 100644
index 0000000..638f407
--- /dev/null
+++ b/py/爱看短剧.py
@@ -0,0 +1,314 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import base64
+import binascii
+import json
+import random
+import sys
+import time
+import uuid
+from base64 import b64decode, b64encode
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad, pad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.ut = False
+ # self.did, self.ntid =self.getdid()
+ self.did, self.ntid = 'e59eb2465f61b9ca','65a0de19b3a2ec93fa479ad6'
+ self.token, self.uid = self.gettoken()
+ self.phost, self.phz,self.mphost=self.getpic()
+ # self.phost, self.phz,self.mphost = ('https://dbtp.tgydy.com','.log','https://dplay.nbzsmc.com')
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://192.151.245.34:8089'
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def uuid(self):
+ return str(uuid.uuid4())
+
+ def getdid(self):
+ did = self.random_str(16)
+ ntid = self.random_str(24)
+ return did, ntid
+ # try:
+ # if self.getCache('did'):
+ # return self.getCache('did'), self.getCache('ntid')
+ # else:
+ # self.setCache('did', did)
+ # self.setCache('ntid', ntid)
+ # return did, ntid
+ # except Exception as e:
+ # self.setCache('did', did)
+ # self.setCache('ntid', ntid)
+ # return did, ntid
+
+ def aes(self, text, bool=True):
+ key = b64decode('c0k4N1RfKTY1U1cjJERFRA==')
+ iv = b64decode('VzIjQWRDVkdZSGFzSEdEVA==')
+ if bool:
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ ptt=json.loads(pt.decode("utf-8"))
+ return ptt
+
+ def random_str(self,length=24):
+ hex_chars = '0123456789abcdef'
+ return ''.join(random.choice(hex_chars) for _ in range(length))
+
+ def gettoken(self):
+ params={"deviceId":self.did,"deviceModel":"8848钛晶手机","devicePlatform":"1","tenantId":self.ntid}
+ data=self.getdata('/supports/anonyLogin',params)
+ self.ut=True
+ return data['data']['token'], data['data']['userId']
+
+ def getdata(self,path,params=None):
+ t = int(time.time()*1000)
+ n=self.md5(f'{self.uuid()}{t}')
+ if params:
+ ct=self.aes(json.dumps(params))
+ else:
+ ct=f'{t}{n}'
+ s=self.md5(f'{ct}8j@78m.367HGDF')
+ headers = {
+ 'User-Agent': 'okhttp-okgo/jeasonlzy',
+ 'Connection': 'Keep-Alive',
+ 'Accept-Language': 'zh-CN,zh;q=0.8',
+ 'tenantId': self.ntid,
+ 'n': n,
+ 't': str(int(t/1000)),
+ 's': s,
+ }
+ if self.ut:
+ headers['ta-token'] = self.token
+ headers['userId'] = self.uid
+ if params:
+ params={'ct':ct}
+ response = self.post(f'{self.host}{path}', headers=headers, json=params).text
+ else:
+ response = self.fetch(f'{self.host}{path}', headers=headers).text
+ data=self.aes(response[1:-1],False)
+ return data
+
+ def getpic(self):
+ try:
+ at = int(time.time() * 1000)
+ t=str(int(at/ 1000))
+ n = self.md5(f'{self.uuid()}{at}')
+ headers = {
+ 'Host': '192.151.245.34:8089',
+ 'User-Agent': 'okhttp-okgo/jeasonlzy',
+ 'Connection': 'Keep-Alive',
+ 'Accept-Language': 'zh-CN,zh;q=0.8',
+ 'tenantId': self.ntid,
+ 'userId': self.uid,
+ 'ta-token': self.token,
+ 'n': n,
+ 't': t,
+ 's': self.md5(f'{t}{n}8j@78m.367HGDF')
+ }
+ params = {
+ 'tenantId': self.ntid,
+ }
+ response = self.fetch(f'{self.host}/supports/configs', params=params, headers=headers).text
+ data=self.aes(response[1:-1],False)
+ config = {
+ 'image_cdn': '',
+ 'image_cdn_path': '',
+ 'cdn-domain': ''
+ }
+ for item in data.get('data', []):
+ name = item.get('name')
+ records = item.get('records', [])
+
+ if name in config and records:
+ value = records[0].get('value', '')
+ if name == 'cdn-domain':
+ value = value.split('#')[0]
+ config[name] = value
+
+ return config['image_cdn'], config['image_cdn_path'], config['cdn-domain']
+
+ except Exception as e:
+ print(f"Error in getpic: {e}")
+ return 'https://dbtp.tgydy.com', '.log', 'https://dplay.nbzsmc.com'
+
+ def getlist(self,data):
+ vod=[]
+ for i in data:
+ vod.append({
+ 'vod_id': f'{i.get("movieId")}@{i.get("entryNum")}',
+ 'vod_name': i.get('title'),
+ 'vod_pic': f'{self.getProxyUrl()}&path={i.get("thumbnail")}',
+ 'vod_year': i.get('score'),
+ 'vod_remarks': f'{i.get("entryNum")}集'
+ })
+ return vod
+
+ def homeContent(self, filter):
+ data=self.getdata('/movies/classifies')
+ result = {}
+ cateManual = {
+ "榜单": "ranking/getTodayHotRank",
+ "专辑": "getTMovieFolderPage",
+ "剧场": "getClassMoviePage2",
+ "演员": "follow/getRecommendActorPage",
+ }
+ classes = []
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ filters = {}
+ if data.get('data'):
+ filters["getClassMoviePage2"] = [
+ {
+ "key": "type",
+ "name": "分类",
+ "value": [
+ {"n": item["name"], "v": item["classifyId"]}
+ for item in data["data"]
+ ]
+ }
+ ]
+ filters["ranking/getTodayHotRank"] = [
+ {
+ "key": "type",
+ "name": "榜单",
+ "value": [
+ {"n": "播放榜", "v": "getWeekHotPlayRank"},
+ {"n": "高赞榜", "v": "getWeekStarRank"},
+ {"n": "追剧榜", "v": "getSubTMoviePage"},
+ {"n": "高分榜", "v": "ranking/getScoreRank"}
+ ]
+ }
+ ]
+ filters["follow/getRecommendActorPage"] = [
+ {
+ "key": "type",
+ "name": "性别",
+ "value": [
+ {"n": "男", "v": "0"},
+ {"n": "女", "v": "1"}
+ ]
+ }
+ ]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ params = {"pageNo":"1","pageSize":"30","platform":"1","deviceId":self.did,"tenantId":self.ntid}
+ data=self.getdata('/news/getRecommendTMoviePage',params)
+ vod=self.getlist(data['data']['records'])
+ return {'list':vod}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params={}
+ path = f'/news/{tid}'
+ if tid=='getClassMoviePage2':
+ parama={"pageNo":pg,"pageSize":"30","orderFlag":"0","haveActor":"-1","classifyId":extend.get('type','-1'),"tagId":""}
+ elif 'rank' in tid:
+ path=f'/news/{extend.get("type") or tid}'
+ parama={"pageNo":pg,"pageSize":"30"}
+ elif 'follow' in tid:
+ parama={"pageNo":pg,"pageSize":"20"}
+ if extend.get('type'):
+ path=f'/news/getActorPage'
+ parama={"pageNo":pg,"pageSize":"50","sex":extend.get('type')}
+ elif tid=='getTMovieFolderPage':
+ parama={"pageNo":pg,"pageSize":"20"}
+ elif '@' in tid:
+ path='/news/getActorTMoviePage'
+ parama={"id":tid.split('@')[0],"pageNo":pg,"pageSize":"30"}
+ params['platform'] = '1'
+ params['deviceId'] = self.did
+ params['tenantId'] = self.ntid
+ data=self.getdata(path,parama)
+ vods=[]
+ if 'follow' in tid:
+ for i in data['data']['records']:
+ vods.append({
+ 'vod_id': f'{i.get("id")}@',
+ 'vod_name': i.get('name'),
+ 'vod_pic': i.get('avatar'),
+ 'vod_tag': 'folder',
+ 'vod_remarks': f'作品{i.get("movieNum")}',
+ 'style': {"type": "oval"}
+ })
+ else:
+ vdata=data['data']['records']
+ if tid=='getTMovieFolderPage':
+ vdata=[j for i in data['data']['records'] for j in i['movieList']]
+ vods=self.getlist(vdata)
+ result = {}
+ result['list'] = vods
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids=ids[0].split('@')
+ params = {"pageNo": "1", "pageSize": ids[1], "movieId": ids[0], "platform": "1", "deviceId": self.did, "tenantId": self.ntid}
+ data = self.getdata('/news/getEntryPage', params)
+ print(data)
+ plist=[f'第{i.get("entryNum")}集${i.get("mp4PlayAddress") or i.get("playAddress")}' for i in data['data']['records']]
+ vod = {
+ 'vod_play_from': '爱看短剧',
+ 'vod_play_url': '#'.join(plist),
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {"pageNo": pg, "pageSize": "20", "keyWord": key, "orderFlag": "0", "platform": "1", "deviceId": self.did, "tenantId": self.ntid}
+ data = self.getdata('/news/searchTMoviePage', params)
+ vod = self.getlist(data['data']['records'])
+ return {'list':vod,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'parse': 0, 'url': f'{self.mphost}{id}', 'header': {'User-Agent':'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'}}
+
+ def localProxy(self, param):
+ type=param.get('path').split('.')[-1]
+ data=self.fetch(f'{self.phost}{param.get("path")}{self.phz}',headers={'User-Agent':'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'})
+ def decrypt(encrypted_text):
+ try:
+ key = base64.urlsafe_b64decode("iM41VipvCFtToAFFRExEXw==")
+ iv = base64.urlsafe_b64decode("0AXRTXzmMSrlRSemWb4sVQ==")
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted_padded = cipher.decrypt(encrypted_text)
+ decrypted_data = unpad(decrypted_padded, AES.block_size)
+ return decrypted_data
+ except (binascii.Error, ValueError):
+ return None
+ return [200, f'image/{type}', decrypt(data.content)]
+
diff --git a/py/猎手影视.py b/py/猎手影视.py
new file mode 100644
index 0000000..1a6a4d7
--- /dev/null
+++ b/py/猎手影视.py
@@ -0,0 +1,279 @@
+# coding=utf-8
+# !/usr/bin/python
+# by嗷呜(finally)
+import sys
+import os
+sys.path.append("..")
+import re
+import hashlib
+import hmac
+import random
+import string
+from Crypto.Util.Padding import unpad
+from concurrent.futures import ThreadPoolExecutor
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import PKCS1_v1_5, AES
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def getName(self):
+ return "电影猎手"
+
+ def init(self, extend=""):
+ self.device = self.device_id()
+ self.host = self.gethost()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ t = str(int(time.time()))
+
+ def homeContent(self, filter):
+ result = {}
+ filters = {}
+ classes = []
+ bba = self.url()
+ data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
+ data1 = self.aes(data)
+ dy = {"class":"类型","area":"地区","lang":"语言","year":"年份","letter":"字母","by":"排序","sort":"排序"}
+ data1['data']['movie_screen']['sort'].pop(0)
+ for item in data1['data']['movie_screen']['sort']:
+ item['n'] = item.pop('name')
+ item['v'] = item.pop('value')
+ for item in data1['data']['movie_screen']['filter']:
+ has_non_empty_field = False
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in item and item[key]:
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ filters[str(item["id"])].append(
+ {"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
+ for dkey in item:
+ if dkey in dy and item[dkey]:
+ item[dkey].pop(0)
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in item[dkey]
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ bba = self.url()
+ url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()
+ videos = []
+ for item in data['data']:
+ if len(item['list']) > 0:
+ for it in item['list']:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
+ "area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
+ "pageSize": "21"}
+ result = {}
+ list = []
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data']['list']
+ for item in data:
+ list.append(self.voides(item))
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = {"id": ids[0]}
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ video = {'vod_name': data.get('name'),'type_name': data.get('type_name'),'vod_year': data.get('year'),'vod_area': data.get('area'),'vod_remarks': data.get('dynami'),'vod_content': data.get('content')}
+ play = []
+ names = []
+ tasks = []
+ for itt in data["play_from"]:
+ name = itt["name"]
+ a = []
+ if len(itt["list"]) > 0:
+ names.append(name)
+ play.append(self.playeach(itt['list']))
+ else:
+ tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
+ names.append(name)
+ if tasks:
+ with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
+ results = executor.map(self.playlist, tasks)
+ for result in results:
+ if result:
+ play.append(result)
+ else:
+ play.append("")
+ video["vod_play_from"] = "$$$".join(names)
+ video["vod_play_url"] = "$$$".join(play)
+ result = {"list": [video]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
+ "res_type": "by_movie_name"}
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data'].get('list')
+ videos = []
+ for it in data:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if "m3u8" not in url and "mp4" not in url:
+ try:
+ add = id.split('|||')
+ data = {"from_code": add[0], "play_url": add[1], "episode_id": add[2], "type": "play"}
+ bba = self.url(data)
+ data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
+ headers=self.header()).json()['data']
+ url = data2.get('play_url') or data2.get('download_url')
+ try:
+ url1 = self.fetch(url, headers=self.header(), allow_redirects=False).headers['Location']
+ if url1 and "http" in url1:
+ url = url1
+ except:
+ pass
+ except Exception as e:
+ pass
+ if '.jpg' in url or '.jpeg' in url or '.png' in url:
+ url = self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'user-agent': 'okhttp/4.9.2'}
+ return result
+
+ def localProxy(self, param):
+ url = b64decode(param["url"]).decode('utf-8')
+ durl = url[:url.rfind('/')]
+ data = self.fetch(url, headers=self.header()).content.decode("utf-8")
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ # if 'URI="' in string and 'http' not in string:
+ # lines[index] = index
+ # 暂时预留,貌似用不到
+ if '#EXT' not in string and 'http' not in string:
+ lines[index] = durl + ('' if string.startswith('/') else '/') + string
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def device_id(self):
+ characters = string.ascii_lowercase + string.digits
+ random_string = ''.join(random.choices(characters, k=32))
+ return random_string
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/4.9.2',
+ 'Connection': 'Keep-Alive',
+ }
+ response = self.fetch('https://app-site.ecoliving168.com/domain_v5.json', headers=headers).json()
+ url = response['api_service'].replace('/api/', '')
+ return url
+
+ def header(self):
+ headers = {
+ 'User-Agent': 'Android',
+ 'Accept': 'application/prs.55App.v2+json',
+ 'timestamp': self.t,
+ 'x-client-setting': '{"pure-mode":1}',
+ 'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
+ 'x-client-version': '3096 '
+ }
+ return headers
+
+ def url(self, id=None):
+ if not id:
+ id = {}
+ id["timestamp"] = self.t
+ public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
+ encrypted_text = json.dumps(id)
+ public_key = RSA.import_key(b64decode(public_key))
+ cipher = PKCS1_v1_5.new(public_key)
+ encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
+ encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
+ result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
+ key = '635a580fcb5dc6e60caa39c31a7bde48'
+ sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
+ return result, sign
+
+ def playlist(self, body):
+ try:
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ return self.playeach(data)
+ except Exception:
+ return []
+
+ def playeach(self,data):
+ play_urls = []
+ for it in data:
+ if re.search(r"mp4|m3u8", it["play_url"]):
+ play_urls.append(f"{it['episode_name']}${it['play_url']}")
+ else:
+ play_urls.append(
+ f"{it['episode_name']}${it['from_code']}|||{it['play_url']}|||{it['episode_id']}"
+ )
+ return '#'.join(play_urls)
+
+ def voides(self, item):
+ if item['name'] or item['title']:
+ voide = {
+ "vod_id": item.get('id') or item.get('click'),
+ 'vod_name': item.get('name') or item.get('title'),
+ 'vod_pic': item.get('cover') or item.get('image'),
+ 'vod_year': item.get('year') or item.get('label'),
+ 'vod_remarks': item.get('dynamic') or item.get('sub_title')
+ }
+ return voide
+
+ def aes(self, text):
+ text = text.replace('-', '+').replace('_', '/') + '=='
+ key = b"e6d5de5fcc51f53d"
+ iv = b"2f13eef7dfc6c613"
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
+ return json.loads(pt)
diff --git a/py/球球.py b/py/球球.py
new file mode 100644
index 0000000..ffcd9c0
--- /dev/null
+++ b/py/球球.py
@@ -0,0 +1,330 @@
+# coding = utf-8
+# !/usr/bin/python
+
+"""
+
+作者 丢丢喵 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
+ ====================Diudiumiao====================
+
+"""
+
+from Crypto.Util.Padding import unpad
+from Crypto.Util.Padding import pad
+from urllib.parse import unquote
+from Crypto.Cipher import ARC4
+from urllib.parse import quote
+from base.spider import Spider
+from Crypto.Cipher import AES
+from datetime import datetime
+from bs4 import BeautifulSoup
+from base64 import b64decode
+import urllib.request
+import urllib.parse
+import binascii
+import requests
+import base64
+import json
+import time
+import sys
+import re
+import os
+
+sys.path.append('..')
+
+xurl = "https://kzb29rda.com"
+
+headerx = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
+ }
+
+pm = ''
+
+class Spider(Spider):
+ global xurl
+ global headerx
+
+ def getName(self):
+ return "首页"
+
+ def init(self, extend):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
+ if pl == 3:
+ plx = []
+ while True:
+ start_index = text.find(start_str)
+ if start_index == -1:
+ break
+ end_index = text.find(end_str, start_index + len(start_str))
+ if end_index == -1:
+ break
+ middle_text = text[start_index + len(start_str):end_index]
+ plx.append(middle_text)
+ text = text.replace(start_str + middle_text + end_str, '')
+ if len(plx) > 0:
+ purl = ''
+ for i in range(len(plx)):
+ matches = re.findall(start_index1, plx[i])
+ output = ""
+ for match in matches:
+ match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
+ if match3:
+ number = match3.group(1)
+ else:
+ number = 0
+ if 'http' not in match[0]:
+ output += f"#{match[1]}${number}{xurl}{match[0]}"
+ else:
+ output += f"#{match[1]}${number}{match[0]}"
+ output = output[1:]
+ purl = purl + output + "$$$"
+ purl = purl[:-3]
+ return purl
+ else:
+ return ""
+ else:
+ start_index = text.find(start_str)
+ if start_index == -1:
+ return ""
+ end_index = text.find(end_str, start_index + len(start_str))
+ if end_index == -1:
+ return ""
+
+ if pl == 0:
+ middle_text = text[start_index + len(start_str):end_index]
+ return middle_text.replace("\\", "")
+
+ if pl == 1:
+ middle_text = text[start_index + len(start_str):end_index]
+ matches = re.findall(start_index1, middle_text)
+ if matches:
+ jg = ' '.join(matches)
+ return jg
+
+ if pl == 2:
+ middle_text = text[start_index + len(start_str):end_index]
+ matches = re.findall(start_index1, middle_text)
+ if matches:
+ new_list = [f'{item}' for item in matches]
+ jg = '$$$'.join(new_list)
+ return jg
+
+ def homeContent(self, filter):
+ result = {}
+ result = {"class": [{"type_id": "82", "type_name": "英超"},
+ {"type_id": "120", "type_name": "西甲"},
+ {"type_id": "129", "type_name": "德甲"},
+ {"type_id": "108", "type_name": "意甲"},
+ {"type_id": "142", "type_name": "法甲"},
+ {"type_id": "46", "type_name": "欧冠"},
+ {"type_id": "542", "type_name": "中超"},
+ {"type_id": "567", "type_name": "日职联"},
+ {"type_id": "590", "type_name": "澳超"},
+ {"type_id": "83", "type_name": "英冠"},
+ {"type_id": "457", "type_name": "美职业"},
+ {"type_id": "543", "type_name": "中甲"},
+ {"type_id": "168", "type_name": "荷甲"},
+ {"type_id": "581", "type_name": "韩K联"},
+ {"type_id": "465", "type_name": "墨西超"},
+ {"type_id": "546", "type_name": "中女超"},
+ {"type_id": "568", "type_name": "日职乙"},
+ {"type_id": "575", "type_name": "日联杯"},
+ {"type_id": "614", "type_name": "沙特联"},
+ {"type_id": "629", "type_name": "阿联酋杯"},
+ {"type_id": "238", "type_name": "俄超"},
+ {"type_id": "151", "type_name": "葡超"},
+ {"type_id": "209", "type_name": "丹麦超"},
+ {"type_id": "121", "type_name": "西乙"},
+ {"type_id": "1722", "type_name": "牙买超"},
+ {"type_id": "34", "type_name": "国际友谊"},
+ {"type_id": "2115", "type_name": "墨女超"},
+ {"type_id": "130", "type_name": "德乙"},
+ {"type_id": "1788", "type_name": "印尼甲"},
+ {"type_id": "462", "type_name": "智利甲"},
+ {"type_id": "143", "type_name": "法乙"},
+ {"type_id": "466", "type_name": "墨西甲"},
+ {"type_id": "592", "type_name": "澳维超"},
+ {"type_id": "475", "type_name": "哥伦甲"},
+ {"type_id": "589", "type_name": "韩国杯"},
+ {"type_id": "586", "type_name": "韩女联"},
+ {"type_id": "582", "type_name": "韩K2联"},
+ {"type_id": "461", "type_name": "美公开赛"},
+ {"type_id": "315", "type_name": "土超"},
+ {"type_id": "429", "type_name": "阿甲"},
+ {"type_id": "602", "type_name": "澳女联"},
+ {"type_id": "316", "type_name": "土甲"},
+ {"type_id": "97", "type_name": "英乙U21"},
+ {"type_id": "332", "type_name": "保甲"},
+ {"type_id": "55", "type_name": "欧女冠"},
+ {"type_id": "84", "type_name": "英甲"},
+ {"type_id": "169", "type_name": "荷乙"}],
+ }
+
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, cid, pg, filter, ext):
+ result = {}
+ videos = []
+
+ current_timestamp = int(datetime.now().timestamp())
+ dt_object = datetime.fromtimestamp(current_timestamp)
+ formatted_date = dt_object.strftime('%Y-%m-%d')
+
+ url = f'{xurl}/prod-api/match/list/new?isfanye=1&type=1&cid={cid}&ishot=-1&pn=1&ps=20&level=&name=&langtype=zh&starttime={formatted_date}&pid=4&zoneId=Asia%2FShanghai&zhuboType=0'
+
+ detail = requests.get(url=url, headers=headerx)
+ detail.encoding = "utf-8"
+ if detail.status_code == 200:
+ data = detail.json()
+
+ js = data['data']['dataList']
+
+ for vod in js:
+
+ nameq = vod['hteam_name']
+ nameh = vod['ateam_name']
+ name = nameq + ' - ' + nameh
+
+ id = vod['id']
+
+ pic = vod['ateam_logo']
+
+ zhuangtai = vod['status_up_name']
+ zhuangtai = zhuangtai.replace('完场', '回看')
+ shijian = vod['matchtime']
+ bifen = vod['score']
+ remark = zhuangtai + ' ' + shijian + ' 比分' + bifen
+
+ video = {
+ "vod_id": id,
+ "vod_name": name,
+ "vod_pic": pic,
+ "vod_remarks": remark
+ }
+ videos.append(video)
+
+ result = {'list': videos}
+ result['page'] = pg
+ result['pagecount'] = 1
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ global pm
+ did = ids[0]
+ result = {}
+ videos = []
+ xianlu = ''
+ bofang = ''
+
+ url = f'{xurl}/prod-api/match/detail?mid={did}&type=1&isnew=1&pid=4&langtype=zh&test=1&zoneId=Asia%2FShanghai'
+ detail = requests.get(url=url, headers=headerx)
+ detail.encoding = "utf-8"
+ if detail.status_code == 200:
+ data = detail.json()
+
+ url = 'https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1732707176882/jiduo.txt'
+ response = requests.get(url)
+ response.encoding = 'utf-8'
+ code = response.text
+ name = self.extract_middle_text(code, "s1='", "'", 0)
+ Jumps = self.extract_middle_text(code, "s2='", "'", 0)
+
+ kaichang = data['data']['matchinfo']['matchtime']
+ bifen = data['data']['matchinfo']['score']
+ diqun = data['data']['matchinfo']['name']
+ zhuangtai = data['data']['matchinfo']['status_up_name']
+ duizhanq = data['data']['matchinfo']['ateam_name']
+ duizhanh = data['data']['matchinfo']['hteam_name']
+
+ content = '😸集多为您介绍一场' + diqun +'的比赛 参赛双方是 ' + duizhanq +' - '+ duizhanh + ' 比赛时间是 ' + kaichang + ' 现在是 ' + zhuangtai + ' 比分是 ' + bifen + ' 请勿相信任何广告 免费分享 收费死全家'
+
+ director = data['data']['matchinfo']['name']
+
+ actor = duizhanq + ' - ' + duizhanh
+
+ remarks = data['data']['matchinfo']['status_up_name']
+
+ year = data['data']['matchinfo']['matchtime']
+
+ if name not in content:
+ bofang = Jumps
+ xianlu = '1'
+ else:
+ soups = data['data']['matchinfo']['video_url']
+
+ if soups:
+ bofang = soups
+ xianlu = '集多回看专线'
+ else:
+ js = data['data']['matchinfo']['global_live_urls']
+
+ for sou in js:
+
+ id = sou['url']
+
+ name = sou['name']
+
+ bofang = bofang + name + '$' + id + '#'
+
+ bofang = bofang[:-1]
+
+ xianlu = '集多现场专线'
+
+ videos.append({
+ "vod_id": did,
+ "vod_director": director,
+ "vod_actor": actor,
+ "vod_remarks": remarks,
+ "vod_year": year,
+ "vod_content": content,
+ "vod_play_from": xianlu,
+ "vod_play_url": bofang
+ })
+
+ result['list'] = videos
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+
+ result = {}
+ result["parse"] = 0
+ result["playUrl"] = ''
+ result["url"] = id
+ result["header"] = headerx
+ return result
+
+ def searchContentPage(self, key, quick, page):
+ pass
+
+ def searchContent(self, key, quick, pg="1"):
+ return self.searchContentPage(key, quick, '1')
+
+ def localProxy(self, params):
+ if params['type'] == "m3u8":
+ return self.proxyM3u8(params)
+ elif params['type'] == "media":
+ return self.proxyMedia(params)
+ elif params['type'] == "ts":
+ return self.proxyTs(params)
+ return None
+
+
+
+
+
+
+
+
+
diff --git a/py/甜圈短剧.py b/py/甜圈短剧.py
new file mode 100644
index 0000000..40cac38
--- /dev/null
+++ b/py/甜圈短剧.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ return "甜圈短剧"
+
+ def isVideoFormat(self, url):
+ return True
+
+ def manualVideoCheck(self):
+ return False
+
+ def destroy(self):
+ pass
+
+ # 更新为新的域名
+ ahost = 'https://mov.cenguigui.cn'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'DNT': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'Sec-Fetch-Mode': 'no-cors',
+ 'Sec-Fetch-Dest': 'video',
+ 'Sec-Fetch-Storage-Access': 'active',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+
+ def homeContent(self, filter):
+ result = {'class': [{'type_id': '推荐榜', 'type_name': '🔥 推荐榜'},
+ {'type_id': '新剧', 'type_name': '🎬 新剧'},
+ {'type_id': '逆袭', 'type_name': '🎬 逆袭'},
+ {'type_id': '霸总', 'type_name': '🎬 霸总'},
+ {'type_id': '现代言情', 'type_name': '🎬 现代言情'},
+ {'type_id': '打脸虐渣', 'type_name': '🎬 打脸虐渣'},
+ {'type_id': '豪门恩怨', 'type_name': '🎬 豪门恩怨'},
+ {'type_id': '神豪', 'type_name': '🎬 神豪'},
+ {'type_id': '马甲', 'type_name': '🎬 马甲'},
+ {'type_id': '都市日常', 'type_name': '🎬 都市日常'},
+ {'type_id': '战神归来', 'type_name': '🎬 战神归来'},
+ {'type_id': '小人物', 'type_name': '🎬 小人物'},
+ {'type_id': '女性成长', 'type_name': '🎬 女性成长'},
+ {'type_id': '大女主', 'type_name': '🎬 大女主'},
+ {'type_id': '穿越', 'type_name': '🎬 穿越'},
+ {'type_id': '都市修仙', 'type_name': '🎬 都市修仙'},
+ {'type_id': '强者回归', 'type_name': '🎬 强者回归'},
+ {'type_id': '亲情', 'type_name': '🎬 亲情'},
+ {'type_id': '古装', 'type_name': '🎬 古装'},
+ {'type_id': '重生', 'type_name': '🎬 重生'},
+ {'type_id': '闪婚', 'type_name': '🎬 闪婚'},
+ {'type_id': '赘婿逆袭', 'type_name': '🎬 赘婿逆袭'},
+ {'type_id': '虐恋', 'type_name': '🎬 虐恋'},
+ {'type_id': '追妻', 'type_name': '🎬 追妻'},
+ {'type_id': '天下无敌', 'type_name': '🎬 天下无敌'},
+ {'type_id': '家庭伦理', 'type_name': '🎬 家庭伦理'},
+ {'type_id': '萌宝', 'type_name': '🎬 萌宝'},
+ {'type_id': '古风权谋', 'type_name': '🎬 古风权谋'},
+ {'type_id': '职场', 'type_name': '🎬 职场'},
+ {'type_id': '奇幻脑洞', 'type_name': '🎬 奇幻脑洞'},
+ {'type_id': '异能', 'type_name': '🎬 异能'},
+ {'type_id': '无敌神医', 'type_name': '🎬 无敌神医'},
+ {'type_id': '古风言情', 'type_name': '🎬 古风言情'},
+ {'type_id': '传承觉醒', 'type_name': '🎬 传承觉醒'},
+ {'type_id': '现言甜宠', 'type_name': '🎬 现言甜宠'},
+ {'type_id': '奇幻爱情', 'type_name': '🎬 奇幻爱情'},
+ {'type_id': '乡村', 'type_name': '🎬 乡村'},
+ {'type_id': '历史古代', 'type_name': '🎬 历史古代'},
+ {'type_id': '王妃', 'type_name': '🎬 王妃'},
+ {'type_id': '高手下山', 'type_name': '🎬 高手下山'},
+ {'type_id': '娱乐圈', 'type_name': '🎬 娱乐圈'},
+ {'type_id': '强强联合', 'type_name': '🎬 强强联合'},
+ {'type_id': '破镜重圆', 'type_name': '🎬 破镜重圆'},
+ {'type_id': '暗恋成真', 'type_name': '🎬 暗恋成真'},
+ {'type_id': '民国', 'type_name': '🎬 民国'},
+ {'type_id': '欢喜冤家', 'type_name': '🎬 欢喜冤家'},
+ {'type_id': '系统', 'type_name': '🎬 系统'},
+ {'type_id': '真假千金', 'type_name': '🎬 真假千金'},
+ {'type_id': '龙王', 'type_name': '🎬 龙王'},
+ {'type_id': '校园', 'type_name': '🎬 校园'},
+ {'type_id': '穿书', 'type_name': '🎬 穿书'},
+ {'type_id': '女帝', 'type_name': '🎬 女帝'},
+ {'type_id': '团宠', 'type_name': '🎬 团宠'},
+ {'type_id': '年代爱情', 'type_name': '🎬 年代爱情'},
+ {'type_id': '玄幻仙侠', 'type_name': '🎬 玄幻仙侠'},
+ {'type_id': '青梅竹马', 'type_name': '🎬 青梅竹马'},
+ {'type_id': '悬疑推理', 'type_name': '🎬 悬疑推理'},
+ {'type_id': '皇后', 'type_name': '🎬 皇后'},
+ {'type_id': '替身', 'type_name': '🎬 替身'},
+ {'type_id': '大叔', 'type_name': '🎬 大叔'},
+ {'type_id': '喜剧', 'type_name': '🎬 喜剧'},
+ {'type_id': '剧情', 'type_name': '🎬 剧情'}]}
+ return result
+
+ def homeVideoContent(self):
+ return []
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {
+ 'classname': tid,
+ 'offset': str((int(pg) - 1)),
+ }
+ # 更新请求路径为 /duanju/api.php
+ data = self.fetch(f'{self.ahost}/duanju/api.php', params=params, headers=self.headers).json()
+ videos = []
+ for k in data['data']:
+ videos.append({
+ 'vod_id': k.get('book_id'),
+ 'vod_name': k.get('title'),
+ 'vod_pic': k.get('cover'),
+ 'vod_year': k.get('score'),
+ 'vod_remarks': f"{k.get('sub_title')}|{k.get('episode_cnt')}"
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ # 更新请求路径为 /duanju/api.php
+ v = self.fetch(f'{self.ahost}/duanju/api.php', params={'book_id': ids[0]}, headers=self.headers).json()
+ vod = {
+ 'vod_id': ids[0],
+ 'vod_name': v.get('title'),
+ 'type_name': v.get('category'),
+ 'vod_year': v.get('time'),
+ 'vod_remarks': v.get('duration'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '爱看短剧',
+ 'vod_play_url': '#'.join([f"{i['title']}${i['video_id']}" for i in v['data']])
+ }
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ return self.categoryContent(key, pg, True, {})
+
+ def playerContent(self, flag, id, vipFlags):
+ # 更新请求路径为 /duanju/api.php
+ data = self.fetch(f'{self.ahost}/duanju/api.php', params={'video_id': id}, headers=self.headers).json()
+ return {'parse': 0, 'url': data['data']['url'], 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
\ No newline at end of file
diff --git a/py/盘友圈.py b/py/盘友圈.py
new file mode 100644
index 0000000..8b0cfba
--- /dev/null
+++ b/py/盘友圈.py
@@ -0,0 +1,151 @@
+# coding=utf-8
+# !/usr/bin/python
+import requests
+from bs4 import BeautifulSoup
+import re
+from base.spider import Spider
+import sys
+import json
+import os
+import base64
+sys.path.append('..')
+xurl='https://panyq.com'
+headerx = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
+}
+
+
+
+class Spider(Spider):
+ global xurl2
+ global xurl
+ global headerx
+
+ def getName(self):
+ return "首页"
+
+ def init(self, extend):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+
+ def homeContent(self, filter):
+ pass
+ return result
+
+ def homeVideoContent(self):
+
+ pass
+
+ def categoryContent(self, cid, pg, filter, ext):
+ pass
+
+
+
+ def detailContent(self, ids):
+ try:
+ data = json.loads(bytes.fromhex(ids[0]).decode())
+ verify = requests.post(f'{xurl}/search/{data["hash"]}',
+ headers=self.getheader(-1),
+ data=json.dumps(data['data'], separators=(",", ":")).encode(),
+ )
+ if verify.status_code == 200:
+ eid = data['data'][0]['eid']
+ rdata = json.dumps([{"eid": eid}], separators=(",", ":")).encode()
+ res = requests.post(f'{xurl}/go/{eid}', headers=self.getheader(1), data=rdata)
+ purl = json.loads(res.text.strip().split('\n')[-1].split(":", 1)[-1])['data']['link']
+ if not re.search(r'pwd=|码', purl) and data['password']:
+ purl = f"{purl}{'&' if '?' in purl else '?'}pwd={data['password']}"
+ print("获取盘链接为:", purl)
+ else:
+ raise Exception('验证失败')
+ vod = {
+ 'vod_id': '',
+ 'vod_name': '',
+ 'vod_pic': '',
+ 'type_name': '',
+ 'vod_year': '',
+ 'vod_area': '',
+ 'vod_remarks': '',
+ 'vod_actor': '',
+ 'vod_director': '',
+ 'vod_content': '',
+ 'vod_play_from': '集多网盘',
+ 'vod_play_url': purl
+ }
+ params = {
+ "do": "push",
+ "url": purl
+ }
+ response = requests.post("http://127.0.0.1:9978/action", data=params, headers={
+ "Content-Type": "application/x-www-form-urlencoded"
+})
+ return {'list': [vod]}
+ except Exception as e:
+ print(e)
+ return {'list': []}
+
+ def playerContent(self, flag, id, vipFlags):
+ pass
+
+ def searchContentPage(self, key, quick, page='1'):
+ sign, sha, hash = self.getsign(key, page)
+ headers = self.getheader()
+ res = requests.get(f'{xurl}/api/search', params={'sign': sign}, headers=headers).json()
+ videos = []
+ for i in res['data']['hits']:
+ ccc = [{"eid": i.get("eid"), "sha": sha, "page_num": page}]
+ ddd = (json.dumps({'sign': sign, 'hash': hash, 'data': ccc, 'password': i.get('password')})).encode().hex()
+ if i.get('group')=='quark':
+ pic='https://android-artworks.25pp.com/fs08/2024/12/27/7/125_d45d9de77c805e17ede25e4a2d9d3444_con.png'
+ elif i.get('group')=='baidu':
+ pic='https://is4-ssl.mzstatic.com/image/thumb/Purple126/v4/dd/45/eb/dd45eb77-d21d-92f2-c46d-979797a6be4a/AppIcon-0-0-1x_U007emarketing-0-0-0-7-0-0-sRGB-0-0-0-GLES2_U002c0-512MB-85-220-0-0.png/1024x1024bb.jpg'
+ else:
+ pic='https://gimg2.baidu.com/image_search/src=http%3A%2F%2Fimg.alicdn.com%2Fbao%2Fuploaded%2Fi4%2F2213060290763%2FO1CN01joakK61HVUwob2JIJ_%21%212213060290763.jpg&refer=http%3A%2F%2Fimg.alicdn.com&app=2002&size=f9999,10000&q=a80&n=0&g=0n&fmt=auto?sec=1757745912&t=e7b98fced3a4f092c8ef26490997b004'
+ videos.append({
+ 'vod_id': ddd,
+ 'vod_name': i.get('desc').split('
')[0].replace('', ""),
+ 'vod_pic': pic,
+ 'vod_remarks': i.get('group'),
+ })
+ return {'list': videos, 'page': page}
+
+ def searchContent(self, key, quick):
+ return self.searchContentPage(key, quick, '1')
+
+ def searchContent(self, key, quick, pg):
+ return self.searchContentPage(key, quick, pg)
+
+
+ def getsign(self,key,pg):
+ headers=self.getheader()
+ data=json.dumps([{"cat":"all","query":key,"pageNum":int(pg),"enableSearchMusic":False,"enableSearchGame":False,"enableSearchEbook":False}],separators=(",", ":"),ensure_ascii= False).encode()
+ res = requests.post(xurl, headers=headers, data=data).text
+ hash=re.search(r'"hash",\s*"([^"]+)"', res).group(1)
+ sign = re.search(r'"sign":\s*"([^"]+)"', res).group(1)
+ sha= re.search(r'"sha":\s*"([^"]+)"', res).group(1)
+ return sign,sha,hash
+
+ def getheader(self,k=0):
+ kes=['ecce0904d756da58b9ea5dd03da3cacea9fa29c6','4c5c1ef8a225004ce229e9afa4cc7189eed3e6fe','c4ed62e2b5a8e3212b334619f0cdbaa77fa842ff']
+ headers = {
+ 'origin': xurl,
+ 'referer': f'{xurl}/',
+ 'next-action': kes[k],
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="136", "Google Chrome";v="136"',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.7103.48 Safari/537.36',
+ }
+ return headers
+ def localProxy(self, params):
+ if params['type'] == "m3u8":
+ return self.proxyM3u8(params)
+ elif params['type'] == "media":
+ return self.proxyMedia(params)
+ elif params['type'] == "ts":
+ return self.proxyTs(params)
+ return None
diff --git a/py/米盘搜.py b/py/米盘搜.py
new file mode 100644
index 0000000..2ee4a7e
--- /dev/null
+++ b/py/米盘搜.py
@@ -0,0 +1,173 @@
+# coding=utf-8
+# !/usr/bin/python
+import requests
+from bs4 import BeautifulSoup
+import re
+from base.spider import Spider
+import sys
+import json
+import os
+import base64
+
+sys.path.append('..')
+xurl = 'http://www.misoso.cc'
+headerx = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
+}
+
+
+class Spider(Spider):
+ global xurl
+ global headerx
+
+ def getName(self):
+ return "米盘搜"
+
+ def init(self, extend):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def homeContent(self, filter):
+ pass
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, cid, pg, filter, ext):
+ pass
+
+ def detailContent(self, ids):
+ try:
+ # 解码ID获取链接信息
+ data = json.loads(bytes.fromhex(ids[0]).decode())
+ purl = data['url']
+
+ # 如果有密码且链接中不包含密码参数,则添加密码
+ if data.get('password') and not re.search(r'pwd=|密码', purl):
+ purl = f"{purl}{'&' if '?' in purl else '?'}pwd={data['password']}"
+
+ print("获取盘链接为:", purl)
+
+ vod = {
+ 'vod_id': '',
+ 'vod_name': '',
+ 'vod_pic': '',
+ 'type_name': '',
+ 'vod_year': '',
+ 'vod_area': '',
+ 'vod_remarks': '',
+ 'vod_actor': '',
+ 'vod_director': '',
+ 'vod_content': '',
+ 'vod_play_from': '米盘搜',
+ 'vod_play_url': purl
+ }
+
+ # 推送链接到本地服务
+ params = {
+ "do": "push",
+ "url": purl
+ }
+ response = requests.post("http://127.0.0.1:9978/action", data=params, headers={
+ "Content-Type": "application/x-www-form-urlencoded"
+ })
+
+ return {'list': [vod]}
+ except Exception as e:
+ print(e)
+ # 如果解码失败,尝试直接使用ID作为链接
+ purl = ids[0]
+ vod = {
+ 'vod_id': '',
+ 'vod_name': '',
+ 'vod_pic': '',
+ 'type_name': '',
+ 'vod_year': '',
+ 'vod_area': '',
+ 'vod_remarks': '',
+ 'vod_actor': '',
+ 'vod_director': '',
+ 'vod_content': '',
+ 'vod_play_from': '米盘搜',
+ 'vod_play_url': purl
+ }
+ params = {
+ "do": "push",
+ "url": purl
+ }
+ response = requests.post("http://127.0.0.1:9978/action", data=params, headers={
+ "Content-Type": "application/x-www-form-urlencoded"
+ })
+ return {'list': [vod]}
+
+ def playerContent(self, flag, id, vipFlags):
+ pass
+
+ def searchContentPage(self, key, quick, page='1'):
+ videos = []
+ data = {
+ "page": int(page),
+ "q": key,
+ "user": "",
+ "exact": False,
+ "format": [],
+ "share_time": "",
+ "size": 15,
+ "type": "",
+ "exclude_user": [],
+ "adv_params": {
+ "wechat_pwd": "",
+ "platform": "pc"
+ }
+ }
+ res = requests.post(f'{xurl}/v1/search/disk', json=data, headers=headerx).text
+ js1 = json.loads(res)
+ for i in js1['data']['list']:
+ url = i['link']
+ name = i['disk_name'].replace('', "").replace('', "")
+
+ # 根据链接类型设置不同的图标
+ if 'drive.uc' in url:
+ pic = 'https://img1.baidu.com/it/u=2031987711,74538878&fm=253&fmt=auto&app=138&f=JPEG?w=500&h=505'
+ elif 'pan.quark' in url:
+ pic = 'https://img2.baidu.com/it/u=1963522584,2950363542&fm=253&fmt=auto&app=138&f=JPEG?w=500&h=500'
+ elif 'pan.baidu' in url:
+ pic = 'https://bkimg.cdn.bcebos.com/pic/35a85edf8db1cb13b7bc9af2d354564e93584b7e'
+ else:
+ pic = 'https://gimg2.baidu.com/image_search/src=http%3A%2F%2Fimg.alicdn.com%2Fbao%2Fuploaded%2Fi4%2F2213060290763%2FO1CN01joakK61HVUwob2JIJ_%21%212213060290763.jpg&refer=http%3A%2F%2Fimg.alicdn.com&app=2002&size=f9999,10000&q=a80&n=0&g=0n&fmt=auto?sec=1757745912&t=e7b98fced3a4f092c8ef26490997b004'
+
+ # 编码链接信息,包括URL和可能的密码
+ link_data = {
+ "url": url,
+ "password": i.get('password', '')
+ }
+ vid = json.dumps(link_data).encode().hex()
+
+ videos.append({
+ 'vod_id': vid,
+ 'vod_name': name,
+ 'vod_pic': pic,
+ 'vod_remarks': i.get('shared_time', '')
+ })
+ return {'list': videos, 'page': page}
+
+ def searchContent(self, key, quick):
+ return self.searchContentPage(key, quick, '1')
+
+ def searchContent(self, key, quick, pg):
+ return self.searchContentPage(key, quick, pg)
+
+ def localProxy(self, params):
+ if params['type'] == "m3u8":
+ return self.proxyM3u8(params)
+ elif params['type'] == "media":
+ return self.proxyMedia(params)
+ elif params['type'] == "ts":
+ return self.proxyTs(params)
+ return None
diff --git a/py/红果短剧.py b/py/红果短剧.py
new file mode 100644
index 0000000..fea8d5f
--- /dev/null
+++ b/py/红果短剧.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.hongguodj.cc'
+
+ headers = {
+ 'Accept': '*/*',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'DNT': '1',
+ 'Origin': host,
+ 'Pragma': 'no-cache',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ classes = []
+ vlist = []
+ data = pq(self.fetch(self.host, headers=self.headers).text)
+ for i in list(data('.slip li').items())[1:]:
+ classes.append({
+ 'type_name': i.text(),
+ 'type_id': re.findall(r'\d+', i('a').attr('href'))[0]
+ })
+ for i in data('.wrap .rows').items():
+ vlist.extend(self.getlist(i('li')))
+ result['class'] = classes
+ result['list'] = vlist
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=pq(self.fetch(f'{self.host}/type/{tid}-{pg}.html', headers=self.headers).text)
+ result = {}
+ result['list'] = self.getlist(data('.list ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=pq(self.fetch(f'{self.host}{ids[0]}', headers=self.headers).text)
+ v=data('.info')
+ p=v('p')
+ vod = {
+ 'vod_name': v('h1').text(),
+ 'type_name': p.eq(2).text(),
+ 'vod_year': p.eq(3).text(),
+ 'vod_area': p.eq(4).text(),
+ 'vod_remarks': v('em').text(),
+ 'vod_actor': p.eq(0).text(),
+ 'vod_director': p.eq(1).text(),
+ 'vod_content': data('#desc .text').text(),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ names = [i.text() for i in data('.title.slip a').items()]
+ plist=[]
+ for i in data('.play-list ul').items():
+ plist.append('#'.join([f'{j("a").text()}${j("a").attr("href")}' for j in i('li').items()]))
+ vod['vod_play_from'] = '$$$'.join(names)
+ vod['vod_play_url'] = '$$$'.join(plist)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=pq(self.fetch(f'{self.host}/search/{key}----------{pg}---.html', headers=self.headers).text)
+ return {'list': self.getlist(data('.show.rows li')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ p=0
+ uid=f'{self.host}{id}'
+ data=pq(self.fetch(uid, headers=self.headers).text)
+ url=data('.video.ratio').attr('data-play')
+ if not url:
+ url = uid
+ p = 1
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self,data):
+ vlist = []
+ for j in data.items():
+ vlist.append({
+ 'vod_id': j('a').attr('href'),
+ 'vod_name': j('img').attr('alt'),
+ 'vod_pic': self.host + j('img').attr('data-src'),
+ 'vod_year': j('.bg').text(),
+ 'vod_remarks': j('p').text()
+ })
+ return vlist
+
+
diff --git a/py/红果网页.py b/py/红果网页.py
new file mode 100644
index 0000000..fea8d5f
--- /dev/null
+++ b/py/红果网页.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.hongguodj.cc'
+
+ headers = {
+ 'Accept': '*/*',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'DNT': '1',
+ 'Origin': host,
+ 'Pragma': 'no-cache',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ classes = []
+ vlist = []
+ data = pq(self.fetch(self.host, headers=self.headers).text)
+ for i in list(data('.slip li').items())[1:]:
+ classes.append({
+ 'type_name': i.text(),
+ 'type_id': re.findall(r'\d+', i('a').attr('href'))[0]
+ })
+ for i in data('.wrap .rows').items():
+ vlist.extend(self.getlist(i('li')))
+ result['class'] = classes
+ result['list'] = vlist
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=pq(self.fetch(f'{self.host}/type/{tid}-{pg}.html', headers=self.headers).text)
+ result = {}
+ result['list'] = self.getlist(data('.list ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=pq(self.fetch(f'{self.host}{ids[0]}', headers=self.headers).text)
+ v=data('.info')
+ p=v('p')
+ vod = {
+ 'vod_name': v('h1').text(),
+ 'type_name': p.eq(2).text(),
+ 'vod_year': p.eq(3).text(),
+ 'vod_area': p.eq(4).text(),
+ 'vod_remarks': v('em').text(),
+ 'vod_actor': p.eq(0).text(),
+ 'vod_director': p.eq(1).text(),
+ 'vod_content': data('#desc .text').text(),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ names = [i.text() for i in data('.title.slip a').items()]
+ plist=[]
+ for i in data('.play-list ul').items():
+ plist.append('#'.join([f'{j("a").text()}${j("a").attr("href")}' for j in i('li').items()]))
+ vod['vod_play_from'] = '$$$'.join(names)
+ vod['vod_play_url'] = '$$$'.join(plist)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=pq(self.fetch(f'{self.host}/search/{key}----------{pg}---.html', headers=self.headers).text)
+ return {'list': self.getlist(data('.show.rows li')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ p=0
+ uid=f'{self.host}{id}'
+ data=pq(self.fetch(uid, headers=self.headers).text)
+ url=data('.video.ratio').attr('data-play')
+ if not url:
+ url = uid
+ p = 1
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self,data):
+ vlist = []
+ for j in data.items():
+ vlist.append({
+ 'vod_id': j('a').attr('href'),
+ 'vod_name': j('img').attr('alt'),
+ 'vod_pic': self.host + j('img').attr('data-src'),
+ 'vod_year': j('.bg').text(),
+ 'vod_remarks': j('p').text()
+ })
+ return vlist
+
+
diff --git a/py/绝对影视.py b/py/绝对影视.py
new file mode 100644
index 0000000..820abf2
--- /dev/null
+++ b/py/绝对影视.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import base64
+import re
+import sys
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import unpad
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://www.jdys.art'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'dnt': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'origin': host,
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'referer': f'{host}/',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'priority': 'u=1, i',
+ }
+
+ def homeContent(self, filter):
+ data = self.getpq(self.fetch(self.host, headers=self.headers).text)
+ result = {}
+ classes = []
+ for k in list(data('.navtop .navlist li').items())[:9]:
+ classes.append({
+ 'type_name': k('a').text(),
+ 'type_id': k('a').attr('href'),
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('.mi_btcon .bt_img ul li'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data = self.getpq(self.fetch(f"{tid}{'' if pg == '1' else f'page/{pg}/'}", headers=self.headers).text)
+ result = {}
+ result['list'] = self.getlist(data('.mi_cont .bt_img ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(self.fetch(ids[0], headers=self.headers).text)
+ data2 = data('.moviedteail_list li')
+ vod = {
+ 'vod_name': data('.dytext h1').text(),
+ 'type_name': data2.eq(0).text(),
+ 'vod_year': data2.eq(2).text(),
+ 'vod_area': data2.eq(1).text(),
+ 'vod_remarks': data2.eq(4).text(),
+ 'vod_actor': data2.eq(7).text(),
+ 'vod_director': data2.eq(5).text(),
+ 'vod_content': data('.yp_context').text().strip()
+ }
+ vdata = data('.paly_list_btn a')
+ play = []
+ for i in vdata.items():
+ a = i.text() + "$" + i.attr.href
+ play.append(a)
+ vod["vod_play_from"] = "在线播放"
+ vod["vod_play_url"] = "#".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.getpq(self.fetch(f"{self.host}/page/{pg}/?s={key}", headers=self.headers).text)
+ return {'list': self.getlist(data('.mi_cont .bt_img ul li')), 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ data = self.getpq(self.fetch(id, headers=self.headers).text)
+ try:
+ sc = data('.videoplay script').eq(-1).text()
+ strd = re.findall(r'var\s+[^=]*=\s*"([^"]*)";', sc)
+ kdata = re.findall(r'parse\((.*?)\);', sc)
+ jm = self.aes(strd[0], kdata[0].replace('"', ''), kdata[1].replace('"', ''))
+ url = re.search(r'url: "(.*?)"', jm).group(1)
+ p = 0
+ except:
+ p = 1
+ url = id
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = self.headers
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getpq(self, text):
+ try:
+ return pq(text)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(text.encode('utf-8'))
+
+ def getlist(self, data):
+ videos = []
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('a img').attr('alt'),
+ 'vod_pic': i('a img').attr('src'),
+ 'vod_remarks': i('.dycategory').text(),
+ 'vod_year': i('.dyplayinfo').text() or i('.rating').text(),
+ })
+ return videos
+
+ def aes(self, word, key, iv):
+ key = key.encode('utf-8')
+ iv = iv.encode('utf-8')
+ encrypted_data = base64.b64decode(word)
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted_data = cipher.decrypt(encrypted_data)
+ decrypted_data = unpad(decrypted_data, AES.block_size)
+ return decrypted_data.decode('utf-8')
diff --git a/py/网络直播.py b/py/网络直播.py
new file mode 100644
index 0000000..4e54c02
--- /dev/null
+++ b/py/网络直播.py
@@ -0,0 +1,767 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+import time
+from base64 import b64decode, b64encode
+from urllib.parse import parse_qs
+import requests
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ tid = 'douyin'
+ headers = self.gethr(0, tid)
+ response = requests.head(self.hosts[tid], headers=headers)
+ ttwid = response.cookies.get('ttwid')
+ headers.update({
+ 'authority': self.hosts[tid].split('//')[-1],
+ 'cookie': f'ttwid={ttwid}' if ttwid else ''
+ })
+ self.dyheaders = headers
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = [
+ {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
+ },
+ {
+ "User-Agent": "Dart/3.4 (dart:io)"
+ }
+ ]
+
+ excepturl = 'https://www.baidu.com'
+
+ hosts = {
+ "huya": ["https://www.huya.com","https://mp.huya.com"],
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://www.douyu.com",
+ "wangyi": "https://cc.163.com",
+ "bili": ["https://api.live.bilibili.com", "https://api.bilibili.com"]
+ }
+
+ referers = {
+ "huya": "https://live.cdn.huya.com",
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://m.douyu.com",
+ "bili": "https://live.bilibili.com"
+ }
+
+ playheaders = {
+ "wangyi": {
+ "User-Agent": "ExoPlayer",
+ "Connection": "Keep-Alive",
+ "Icy-MetaData": "1"
+ },
+ "bili": {
+ 'Accept': '*/*',
+ 'Icy-MetaData': '1',
+ 'referer': referers['bili'],
+ 'user-agent': headers[0]['User-Agent']
+ },
+ 'douyin': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ },
+ 'huya': {
+ 'User-Agent': 'ExoPlayer',
+ 'Connection': 'Keep-Alive',
+ 'Icy-MetaData': '1'
+ },
+ 'douyu': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ }
+ }
+
+ def process_bili(self):
+ try:
+ self.blfdata = self.fetch(
+ f'{self.hosts["bili"][0]}/room/v1/Area/getList?need_entrance=1&parent_id=0',
+ headers=self.gethr(0, 'bili')
+ ).json()
+ return ('bili', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['name'], 'v': str(i['id'])}
+ for i in self.blfdata['data']]}])
+ except Exception as e:
+ print(f"bili处理错误: {e}")
+ return 'bili', None
+
+ def process_douyin(self):
+ try:
+ data = self.getpq(self.hosts['douyin'], headers=self.dyheaders)('script')
+ for i in data.items():
+ if 'categoryData' in i.text():
+ content = i.text()
+ start = content.find('{')
+ end = content.rfind('}') + 1
+ if start != -1 and end != -1:
+ json_str = content[start:end]
+ json_str = json_str.replace('\\"', '"')
+ try:
+ self.dyifdata = json.loads(json_str)
+ return ('douyin', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['partition']['title'],
+ 'v': f"{i['partition']['id_str']}@@{i['partition']['title']}"}
+ for i in self.dyifdata['categoryData']]}])
+ except json.JSONDecodeError as e:
+ print(f"douyin解析错误: {e}")
+ return 'douyin', None
+ except Exception as e:
+ print(f"douyin请求或处理错误: {e}")
+ return 'douyin', None
+
+ def process_douyu(self):
+ try:
+ self.dyufdata = self.fetch(
+ f'{self.referers["douyu"]}/api/cate/list',
+ headers=self.headers[1]
+ ).json()
+ return ('douyu', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['cate1Name'], 'v': str(i['cate1Id'])}
+ for i in self.dyufdata['data']['cate1Info']]}])
+ except Exception as e:
+ print(f"douyu错误: {e}")
+ return 'douyu', None
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "虎牙": "huya",
+ "抖音": "douyin",
+ "斗鱼": "douyu",
+ "网易": "wangyi"
+ }
+ classes = []
+ filters = {
+ 'huya': [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': '网游', 'v': '1'}, {'n': '单机', 'v': '2'},
+ {'n': '娱乐', 'v': '8'}, {'n': '手游', 'v': '3'}]}]
+ }
+
+ with ThreadPoolExecutor(max_workers=3) as executor:
+ futures = {
+ executor.submit(self.process_bili): 'bili',
+ executor.submit(self.process_douyin): 'douyin',
+ executor.submit(self.process_douyu): 'douyu'
+ }
+
+ for future in futures:
+ platform, filter_data = future.result()
+ if filter_data:
+ filters[platform] = filter_data
+
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ vdata = []
+ result = {}
+ pagecount = 9999
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ if tid == 'wangyi':
+ vdata, pagecount = self.wyccContent(tid, pg, filter, extend, vdata)
+ elif 'bili' in tid:
+ vdata, pagecount = self.biliContent(tid, pg, filter, extend, vdata)
+ elif 'huya' in tid:
+ vdata, pagecount = self.huyaContent(tid, pg, filter, extend, vdata)
+ elif 'douyin' in tid:
+ vdata, pagecount = self.douyinContent(tid, pg, filter, extend, vdata)
+ elif 'douyu' in tid:
+ vdata, pagecount = self.douyuContent(tid, pg, filter, extend, vdata)
+ result['list'] = vdata
+ result['pagecount'] = pagecount
+ return result
+
+ def wyccContent(self, tid, pg, filter, extend, vdata):
+ params = {
+ 'format': 'json',
+ 'start': (int(pg) - 1) * 20,
+ 'size': '20',
+ }
+ response = self.fetch(f'{self.hosts[tid]}/api/category/live/', params=params, headers=self.headers[0]).json()
+ for i in response['lives']:
+ if i.get('cuteid'):
+ bvdata = self.buildvod(
+ vod_id=f"{tid}@@{i['cuteid']}",
+ vod_name=i.get('title'),
+ vod_pic=i.get('cover'),
+ vod_remarks=i.get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(bvdata)
+ return vdata, 9999
+
+ def biliContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.blfdata['data']:
+ if str(i['id']) == extend['cate']:
+ for j in i['list']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['id']}@@{j['id']}",
+ vod_name=j.get('name'),
+ vod_pic=j.get('pic'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/xlive/web-interface/v1/second/getListByArea?platform=web&sort=online&page_size=30&page={pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/xlive/web-interface/v1/second/getList?platform=web&parent_area_id={ids[1]}&area_id={ids[-1]}&sort_type=&page={pg}'
+ data = self.fetch(f'{self.hosts[tid][0]}{path}', headers=self.gethr(0, tid)).json()
+ for i in data['data']['list']:
+ if i.get('roomid'):
+ data = self.buildvod(
+ f"{tid}@@{i['roomid']}",
+ i.get('title'),
+ i.get('cover'),
+ i.get('watched_show', {}).get('text_large'),
+ 0,
+ i.get('uname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(data)
+ return vdata, 9999
+
+ def huyaContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ id = extend.get('cate')
+ data = self.fetch(f'{self.referers[tid]}/liveconfig/game/bussLive?bussType={id}',
+ headers=self.headers[1]).json()
+ for i in data['data']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{int(i['gid'])}",
+ vod_name=i.get('gameFullName'),
+ vod_pic=f'https://huyaimg.msstatic.com/cdnimage/game/{int(i["gid"])}-MS.jpg',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ gid = ''
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ gid = f'&gameId={ids[1]}'
+ data = self.fetch(f'{self.hosts[tid][0]}/cache.php?m=LiveList&do=getLiveListByPage&tagAll=0{gid}&page={pg}',
+ headers=self.headers[1]).json()
+ for i in data['data']['datas']:
+ if i.get('profileRoom'):
+ v = self.buildvod(
+ f"{tid}@@{i['profileRoom']}",
+ i.get('introduction'),
+ i.get('screenshot'),
+ str(int(i.get('totalCount', '1')) / 10000) + '万',
+ 0,
+ i.get('nick'),
+ style={"type": "rect", "ratio": 1.33}
+
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyinContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ ids = extend.get('cate').split('@@')
+ for i in self.dyifdata['categoryData']:
+ c = i['partition']
+ if c['id_str'] == ids[0] and c['title'] == ids[1]:
+ vlist = i['sub_partition'].copy()
+ vlist.insert(0, {'partition': c})
+ for j in vlist:
+ j = j['partition']
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{j['id_str']}@@{j['type']}",
+ vod_name=j.get('title'),
+ vod_pic='https://p3-pc-weboff.byteimg.com/tos-cn-i-9r5gewecjs/pwa_v3/512x512-1.png',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition=720&partition_type=1'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition={ids[1]}&partition_type={ids[-1]}&req_from=2'
+ data = self.fetch(f'{self.hosts[tid]}{path}', headers=self.dyheaders).json()
+ for i in data['data']['data']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['web_rid']}",
+ vod_name=i['room'].get('title'),
+ vod_pic=i['room']['cover'].get('url_list')[0],
+ vod_year=i.get('user_count_str'),
+ vod_remarks=i['room']['owner'].get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyuContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.dyufdata['data']['cate2Info']:
+ if str(i['cate1Id']) == extend['cate']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['cate2Id']}",
+ vod_name=i.get('cate2Name'),
+ vod_pic=i.get('icon'),
+ vod_remarks=i.get('count'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/japi/weblist/apinc/allpage/6/{pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/gapi/rkc/directory/mixList/2_{ids[1]}/{pg}'
+ url = f'{self.hosts[tid]}{path}'
+ data = self.fetch(url, headers=self.headers[1]).json()
+ for i in data['data']['rl']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['rid']}",
+ vod_name=i.get('rn'),
+ vod_pic=i.get('rs16'),
+ vod_year=str(int(i.get('ol', 1)) / 10000) + '万',
+ vod_remarks=i.get('nn'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@@')
+ if ids[0] == 'wangyi':
+ vod = self.wyccDetail(ids)
+ elif ids[0] == 'bili':
+ vod = self.biliDetail(ids)
+ elif ids[0] == 'huya':
+ vod = self.huyaDetail(ids)
+ elif ids[0] == 'douyin':
+ vod = self.douyinDetail(ids)
+ elif ids[0] == 'douyu':
+ vod = self.douyuDetail(ids)
+ return {'list': [vod]}
+
+ def wyccDetail(self, ids):
+ try:
+ vdata = self.getpq(f'{self.hosts[ids[0]]}/{ids[1]}', self.headers[0])('script').eq(-1).text()
+
+ def get_quality_name(vbr):
+ if vbr <= 600:
+ return "标清"
+ elif vbr <= 1000:
+ return "高清"
+ elif vbr <= 2000:
+ return "超清"
+ else:
+ return "蓝光"
+
+ data = json.loads(vdata)['props']['pageProps']['roomInfoInitData']
+ name = data['live'].get('title', ids[0])
+ vod = self.buildvod(vod_name=data.get('keywords_suffix'), vod_remarks=data['live'].get('title'),
+ vod_content=data.get('description_suffix'))
+ resolution_data = data['live']['quickplay']['resolution']
+ all_streams = {}
+ sorted_qualities = sorted(resolution_data.items(),
+ key=lambda x: x[1]['vbr'],
+ reverse=True)
+ for quality, data in sorted_qualities:
+ vbr = data['vbr']
+ quality_name = get_quality_name(vbr)
+ for cdn_name, url in data['cdn'].items():
+ if cdn_name not in all_streams and type(url) == str and url.startswith('http'):
+ all_streams[cdn_name] = []
+ if isinstance(url, str) and url.startswith('http'):
+ all_streams[cdn_name].extend([quality_name, url])
+ plists = []
+ names = []
+ for i, (cdn_name, stream_list) in enumerate(all_streams.items(), 1):
+ names.append(f'线路{i}')
+ pstr = f"{name}${ids[0]}@@{self.e64(json.dumps(stream_list))}"
+ plists.append(pstr)
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plists)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def biliDetail(self, ids):
+ try:
+ vdata = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v1/index/getInfoByRoom?room_id={ids[1]}&wts={int(time.time())}',
+ headers=self.gethr(0, ids[0])).json()
+ v = vdata['data']['room_info']
+ vod = self.buildvod(
+ vod_name=v.get('title'),
+ type_name=v.get('parent_area_name') + '/' + v.get('area_name'),
+ vod_remarks=v.get('tags'),
+ vod_play_from=v.get('title'),
+ )
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0%2C1&format=0%2C1%2C2&codec=0%2C1&platform=web',
+ headers=self.gethr(0, ids[0])).json()
+ vdnams = data['data']['playurl_info']['playurl']['g_qn_desc']
+ all_accept_qns = []
+ streams = data['data']['playurl_info']['playurl']['stream']
+ for stream in streams:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ if 'accept_qn' in codec:
+ all_accept_qns.append(codec['accept_qn'])
+ max_accept_qn = max(all_accept_qns, key=len) if all_accept_qns else []
+ quality_map = {
+ item['qn']: item['desc']
+ for item in vdnams
+ }
+ quality_names = [f"{quality_map.get(qn)}${ids[0]}@@{ids[1]}@@{qn}" for qn in max_accept_qn]
+ vod['vod_play_url'] = "#".join(quality_names)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def huyaDetail(self, ids):
+ try:
+ vdata = self.fetch(f'{self.hosts[ids[0]][1]}/cache.php?m=Live&do=profileRoom&roomid={ids[1]}',
+ headers=self.headers[0]).json()
+ v = vdata['data']['liveData']
+ vod = self.buildvod(
+ vod_name=v.get('introduction'),
+ type_name=v.get('gameFullName'),
+ vod_director=v.get('nick'),
+ vod_remarks=v.get('contentIntro'),
+ )
+ data = dict(reversed(list(vdata['data']['stream'].items())))
+ names = []
+ plist = []
+
+ for stream_type, stream_data in data.items():
+ if isinstance(stream_data, dict) and 'multiLine' in stream_data and 'rateArray' in stream_data:
+ names.append(f"线路{len(names) + 1}")
+ qualities = sorted(
+ stream_data['rateArray'],
+ key=lambda x: (x['iBitRate'], x['sDisplayName']),
+ reverse=True
+ )
+ cdn_urls = []
+ for cdn in stream_data['multiLine']:
+ quality_urls = []
+ for quality in qualities:
+ quality_name = quality['sDisplayName']
+ bit_rate = quality['iBitRate']
+ base_url = cdn['url']
+ if bit_rate > 0:
+ if '.m3u8' in base_url:
+ new_url = base_url.replace(
+ 'ratio=2000',
+ f'ratio={bit_rate}'
+ )
+ else:
+ new_url = base_url.replace(
+ 'imgplus.flv',
+ f'imgplus_{bit_rate}.flv'
+ )
+ else:
+ new_url = base_url
+ quality_urls.extend([quality_name, new_url])
+ encoded_urls = self.e64(json.dumps(quality_urls))
+ cdn_urls.append(f"{cdn['cdnType']}${ids[0]}@@{encoded_urls}")
+
+ if cdn_urls:
+ plist.append('#'.join(cdn_urls))
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyinDetail(self, ids):
+ url = f'{self.hosts[ids[0]]}/webcast/room/web/enter/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&enter_from=web_live&web_rid={ids[1]}&room_id_str=&enter_source=&Room-Enter-User-Login-Ab=0&is_need_double_stream=false&cookie_enabled=true&screen_width=1980&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=125.0.0.0'
+ data = self.fetch(url, headers=self.dyheaders).json()
+ try:
+ vdata = data['data']['data'][0]
+ vod = self.buildvod(
+ vod_name=vdata['title'],
+ vod_remarks=vdata['user_count_str'],
+ )
+ resolution_data = vdata['stream_url']['live_core_sdk_data']['pull_data']['options']['qualities']
+ stream_json = vdata['stream_url']['live_core_sdk_data']['pull_data']['stream_data']
+ stream_json = json.loads(stream_json)
+ available_types = []
+ if any(sdk_key in stream_json['data'] and 'main' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('main')
+ if any(sdk_key in stream_json['data'] and 'backup' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('backup')
+ plist = []
+ for line_type in available_types:
+ format_arrays = {'flv': [], 'hls': [], 'lls': []}
+ qualities = sorted(resolution_data, key=lambda x: x['level'], reverse=True)
+ for quality in qualities:
+ sdk_key = quality['sdk_key']
+ if sdk_key in stream_json['data'] and line_type in stream_json['data'][sdk_key]:
+ stream_info = stream_json['data'][sdk_key][line_type]
+ if stream_info.get('flv'):
+ format_arrays['flv'].extend([quality['name'], stream_info['flv']])
+ if stream_info.get('hls'):
+ format_arrays['hls'].extend([quality['name'], stream_info['hls']])
+ if stream_info.get('lls'):
+ format_arrays['lls'].extend([quality['name'], stream_info['lls']])
+ format_urls = []
+ for format_name, url_array in format_arrays.items():
+ if url_array:
+ encoded_urls = self.e64(json.dumps(url_array))
+ format_urls.append(f"{format_name}${ids[0]}@@{encoded_urls}")
+
+ if format_urls:
+ plist.append('#'.join(format_urls))
+
+ names = ['线路1', '线路2'][:len(plist)]
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyuDetail(self, ids):
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{ids[1]}')
+ try:
+ data = self.fetch(f'{self.hosts[ids[0]]}/betard/{ids[1]}', headers=headers).json()
+ vname = data['room']['room_name']
+ vod = self.buildvod(
+ vod_name=vname,
+ vod_remarks=data['room'].get('second_lvl_name'),
+ vod_director=data['room'].get('nickname'),
+ )
+ vdata = self.fetch(f'{self.hosts[ids[0]]}/swf_api/homeH5Enc?rids={ids[1]}', headers=headers).json()
+ json_body = vdata['data']
+ json_body = {"html": self.douyu_text(json_body[f'room{ids[1]}']), "rid": ids[1]}
+ sign = self.post('http://alive.nsapps.cn/api/AllLive/DouyuSign', json=json_body, headers=self.headers[1]).json()['data']
+ body = f'{sign}&cdn=&rate=-1&ver=Douyu_223061205&iar=1&ive=1&hevc=0&fa=0'
+ body=self.params_to_json(body)
+ nubdata = self.post(f'{self.hosts[ids[0]]}/lapi/live/getH5Play/{ids[1]}', data=body, headers=headers).json()
+ plist = []
+ names = []
+ for i,x in enumerate(nubdata['data']['cdnsWithName']):
+ names.append(f'线路{i+1}')
+ d = {'sign': sign, 'cdn': x['cdn'], 'id': ids[1]}
+ plist.append(
+ f'{vname}${ids[0]}@@{self.e64(json.dumps(d))}@@{self.e64(json.dumps(nubdata["data"]["multirates"]))}')
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyu_text(self, text):
+ function_positions = [m.start() for m in re.finditer('function', text)]
+ total_functions = len(function_positions)
+ if total_functions % 2 == 0:
+ target_index = total_functions // 2 + 1
+ else:
+ target_index = (total_functions - 1) // 2 + 1
+ if total_functions >= target_index:
+ cut_position = function_positions[target_index - 1]
+ ctext = text[4:cut_position]
+ return re.sub(r'eval\(strc\)\([\w\d,]+\)', 'strc', ctext)
+ return text
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ ids = id.split('@@')
+ p = 1
+ if ids[0] in ['wangyi', 'douyin','huya']:
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'bili':
+ p, url = self.biliplay(ids)
+ elif ids[0] == 'huya':
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'douyu':
+ p, url = self.douyuplay(ids)
+ return {'parse': p, 'url': url, 'header': self.playheaders[ids[0]]}
+ except Exception as e:
+ return {'parse': 1, 'url': self.excepturl, 'header': self.headers[0]}
+
+ def biliplay(self, ids):
+ try:
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0,1&format=0,2&codec=0&platform=web&qn={ids[2]}',
+ headers=self.gethr(0, ids[0])).json()
+ urls = []
+ line_index = 1
+ for stream in data['data']['playurl_info']['playurl']['stream']:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ for url_info in codec['url_info']:
+ full_url = f"{url_info['host']}/{codec['base_url'].lstrip('/')}{url_info['extra']}"
+ urls.extend([f"线路{line_index}", full_url])
+ line_index += 1
+ return 0, urls
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyuplay(self, ids):
+ try:
+ sdata = json.loads(self.d64(ids[1]))
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{sdata["id"]}')
+ ldata = json.loads(self.d64(ids[2]))
+ result_obj = {}
+ with ThreadPoolExecutor(max_workers=len(ldata)) as executor:
+ futures = [
+ executor.submit(
+ self.douyufp,
+ sdata,
+ quality,
+ headers,
+ self.hosts[ids[0]],
+ result_obj
+ ) for quality in ldata
+ ]
+ for future in futures:
+ future.result()
+
+ result = []
+ for bit in sorted(result_obj.keys(), reverse=True):
+ result.extend(result_obj[bit])
+
+ if result:
+ return 0, result
+ return 1, self.excepturl
+
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyufp(self, sdata, quality, headers, host, result_obj):
+ try:
+ body = f'{sdata["sign"]}&cdn={sdata["cdn"]}&rate={quality["rate"]}'
+ body=self.params_to_json(body)
+ data = self.post(f'{host}/lapi/live/getH5Play/{sdata["id"]}',
+ data=body, headers=headers).json()
+ if data.get('data'):
+ play_url = data['data']['rtmp_url'] + '/' + data['data']['rtmp_live']
+ bit = quality.get('bit', 0)
+ if bit not in result_obj:
+ result_obj[bit] = []
+ result_obj[bit].extend([quality['name'], play_url])
+ except Exception as e:
+ print(f"Error fetching {quality['name']}: {str(e)}")
+
+ def localProxy(self, param):
+ pass
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+ def params_to_json(self, query_string):
+ parsed_data = parse_qs(query_string)
+ result = {key: value[0] for key, value in parsed_data.items()}
+ return result
+
+ def buildvod(self, vod_id='', vod_name='', vod_pic='', vod_year='', vod_tag='', vod_remarks='', style='',
+ type_name='', vod_area='', vod_actor='', vod_director='',
+ vod_content='', vod_play_from='', vod_play_url=''):
+ vod = {
+ 'vod_id': vod_id,
+ 'vod_name': vod_name,
+ 'vod_pic': vod_pic,
+ 'vod_year': vod_year,
+ 'vod_tag': 'folder' if vod_tag else '',
+ 'vod_remarks': vod_remarks,
+ 'style': style,
+ 'type_name': type_name,
+ 'vod_area': vod_area,
+ 'vod_actor': vod_actor,
+ 'vod_director': vod_director,
+ 'vod_content': vod_content,
+ 'vod_play_from': vod_play_from,
+ 'vod_play_url': vod_play_url
+ }
+ vod = {key: value for key, value in vod.items() if value}
+ return vod
+
+ def getpq(self, url, headers=None, cookies=None):
+ data = self.fetch(url, headers=headers, cookies=cookies).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"解析页面错误: {str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def gethr(self, index, rf='', zr=''):
+ headers = self.headers[index]
+ if zr:
+ headers['referer'] = zr
+ else:
+ headers['referer'] = f"{self.referers[rf]}/"
+ return headers
+
+ def handle_exception(self, e):
+ print(f"报错: {str(e)}")
+ return {'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'翻车啦${self.excepturl}'}
+
diff --git a/py/美帕APP.py b/py/美帕APP.py
new file mode 100644
index 0000000..bcb3a51
--- /dev/null
+++ b/py/美帕APP.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+ def getName(self):
+ return "mp"
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://g.c494.com'
+
+ header = {
+ 'User-Agent': 'Dart/2.10 (dart:io)',
+ 'platform_version': 'RP1A.200720.011',
+ 'version': '2.2.3',
+ 'copyright': 'xiaogui',
+ 'platform': 'android',
+ 'client_name': '576O5p+P5b2x6KeG',
+ }
+
+ def homeContent(self, filter):
+ data = self.fetch(f'{self.host}/api.php/app/nav?token=', headers=self.header).json()
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["list"]
+ for item in json_data:
+ has_non_empty_field = False
+ jsontype_extend = item["type_extend"]
+ classes.append({"type_name": item["type_name"], "type_id": str(item["type_id"])})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ rsp = self.fetch(f"{self.host}/api.php/app/index_video?token=", headers=self.header)
+ root = rsp.json()['list']
+ videos = [item for vodd in root for item in vodd['vlist']]
+ return {'list': videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ parms = {"pg": pg, "tid": tid, "class": extend.get("class", ""), "area": extend.get("area", ""),
+ "lang": extend.get("lang", ""), "year": extend.get("year", ""), "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video', params=parms, headers=self.header).json()
+ return data
+
+ def detailContent(self, ids):
+ parms = {"id": ids[0], "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video_detail', params=parms, headers=self.header).json()
+ vod = data['data']
+ vod.pop('pause_advert_list', None)
+ vod.pop('init_advert_list', None)
+ vod.pop('vod_url_with_player', None)
+ return {"list": [vod]}
+
+ def searchContent(self, key, quick, pg='1'):
+ parms = {'pg': pg, 'text': key, 'token': ''}
+ data = self.fetch(f'{self.host}/api.php/app/search', params=parms, headers=self.header).json()
+ return data
+
+ def playerContent(self, flag, id, vipFlags):
+ return {"parse": 0, "url": id, "header": {'User-Agent': 'User-Agent: Lavf/58.12.100'}}
+
+ def localProxy(self, param):
+ pass
diff --git a/py/美帕影视.py b/py/美帕影视.py
new file mode 100644
index 0000000..bcb3a51
--- /dev/null
+++ b/py/美帕影视.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+ def getName(self):
+ return "mp"
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://g.c494.com'
+
+ header = {
+ 'User-Agent': 'Dart/2.10 (dart:io)',
+ 'platform_version': 'RP1A.200720.011',
+ 'version': '2.2.3',
+ 'copyright': 'xiaogui',
+ 'platform': 'android',
+ 'client_name': '576O5p+P5b2x6KeG',
+ }
+
+ def homeContent(self, filter):
+ data = self.fetch(f'{self.host}/api.php/app/nav?token=', headers=self.header).json()
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["list"]
+ for item in json_data:
+ has_non_empty_field = False
+ jsontype_extend = item["type_extend"]
+ classes.append({"type_name": item["type_name"], "type_id": str(item["type_id"])})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ rsp = self.fetch(f"{self.host}/api.php/app/index_video?token=", headers=self.header)
+ root = rsp.json()['list']
+ videos = [item for vodd in root for item in vodd['vlist']]
+ return {'list': videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ parms = {"pg": pg, "tid": tid, "class": extend.get("class", ""), "area": extend.get("area", ""),
+ "lang": extend.get("lang", ""), "year": extend.get("year", ""), "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video', params=parms, headers=self.header).json()
+ return data
+
+ def detailContent(self, ids):
+ parms = {"id": ids[0], "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video_detail', params=parms, headers=self.header).json()
+ vod = data['data']
+ vod.pop('pause_advert_list', None)
+ vod.pop('init_advert_list', None)
+ vod.pop('vod_url_with_player', None)
+ return {"list": [vod]}
+
+ def searchContent(self, key, quick, pg='1'):
+ parms = {'pg': pg, 'text': key, 'token': ''}
+ data = self.fetch(f'{self.host}/api.php/app/search', params=parms, headers=self.header).json()
+ return data
+
+ def playerContent(self, flag, id, vipFlags):
+ return {"parse": 0, "url": id, "header": {'User-Agent': 'User-Agent: Lavf/58.12.100'}}
+
+ def localProxy(self, param):
+ pass
diff --git a/py/胖虎APP.py b/py/胖虎APP.py
new file mode 100644
index 0000000..a76035f
--- /dev/null
+++ b/py/胖虎APP.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+sys.path.append('..')
+from base.spider import Spider
+from Cryptodome.Cipher import AES
+from Cryptodome.Util.Padding import pad, unpad
+from base64 import b64encode, b64decode
+import json
+import time
+
+
+class Spider(Spider):
+ def getName(self):
+ return "py_胖虎"
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def aes(self, operation, text):
+ key = "ihIwTbt2YAe9TGea".encode('utf-8')
+ iv = key
+
+ if operation == 'encrypt':
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ ct_bytes = cipher.encrypt(pad(text.encode('utf-8'), AES.block_size))
+ ct = b64encode(ct_bytes).decode('utf-8')
+ return ct
+ elif operation == 'decrypt':
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode('utf-8')
+
+ host = "http://sm.physkan.top:3389"
+ t = str(int(time.time()))
+
+ def homeContent(self, filter):
+ self.header = {
+ 'User-Agent': 'okhttp/3.14.9',
+ 'app-version-code': '402',
+ 'app-ui-mode': 'light',
+ 'app-user-device-id': '25f869d32598d3d3089a929453dff0bb7',
+ 'app-api-verify-time': self.t,
+ 'app-api-verify-sign': self.aes('encrypt', self.t),
+ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
+ }
+ data = self.fetch("{0}/api.php/getappapi.index/initV119".format(self.host), headers=self.header).content.decode(
+ 'utf-8')
+ data1 = json.loads(data)['data']
+ print(data1)
+ data2 = self.aes('decrypt', data1)
+ dy = {
+ "class": "类型",
+ "area": "地区",
+ "lang": "语言",
+ "year": "年份",
+ "letter": "字母",
+ "by": "排序",
+ "sort": "排序"
+ }
+
+ filter = {}
+ classes = []
+ json_data = json.loads(data2)['type_list']
+ self.homedata = json.loads(data2)['banner_list']
+
+ for item in json_data:
+ if item['type_name'] == '全部':
+ continue
+
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item['type_extend'])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+
+ classes.append({
+ "type_name": item['type_name'],
+ "type_id": item['type_id']
+ })
+
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+
+ if has_non_empty_field:
+ filter[str(item['type_id'])] = []
+
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(',')
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in values if value.strip() != ''
+ ]
+
+ filter[str(item['type_id'])].append({
+ "key": dkey,
+ "name": dy[dkey],
+ "value": value_array
+ })
+ result = {}
+ result['class'] = classes
+ result['filter'] = filter
+ return result
+
+ def homeVideoContent(self):
+ result = {
+ 'list': self.homedata
+ }
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = f"area={extend.get('area', '全部')}&year={extend.get('year', '全部')}&type_id={tid}&page={pg}&sort={extend.get('sort', '最新')}&lang={extend.get('lang', '全部')}&class={extend.get('class', '全部')}"
+ result = {}
+ url = '{0}/api.php/getappapi.index/typeFilterVodList'.format(self.host)
+ data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = self.aes('decrypt', data1)
+ result['list'] = json.loads(data2)['recommend_list']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ print(body)
+ url = '{0}/api.php/getappapi.index/vodDetail'.format(self.host)
+ data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = json.loads(self.aes('decrypt', data1))
+ print(data2)
+ vod = data2['vod']
+ print(vod)
+ play = []
+ names = []
+ for itt in data2['vod_play_list']:
+ a = []
+ names.append(itt['player_info']['show'])
+ parse = itt['player_info']['parse']
+ for it in itt['urls']:
+ if re.search(r'mp4|m3u8', it['url']):
+ a.append(f"{it['name']}${it['url']}")
+ elif re.search(r'www.yemu.xyz', it['parse_api_url']):
+ a.append(f"{it['name']}${it['parse_api_url']}")
+ else:
+ a.append(
+ f"{it['name']}${'parse_api=' + parse + '&url=' + self.aes('encrypt', it['url']) + '&token=' + it['token']}")
+ play.append('#'.join(a))
+ vod['vod_play_from'] = '$$$'.join(names)
+ vod['vod_play_url'] = '$$$'.join(play)
+ result = {
+ 'list': [
+ vod
+ ]
+ }
+ return result
+
+ def searchContent(self, key, quick, pg='1'):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ url = '{0}/api.php/getappapi.index/searchList'.format(self.host)
+ data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = self.aes('decrypt', data1)
+ result = {
+ 'list': json.loads(data2)['search_list']
+ }
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ def edu(str):
+ def replacer(match):
+ from urllib.parse import quote_plus
+ return match.group(1) + quote_plus(match.group(2)) + match.group(3)
+
+ return re.sub(r'(url=)(.*?)(&token)', replacer, str)
+
+ url = id
+ parse = 0
+ if 'm3u8' not in url and 'mp4' not in url:
+ try:
+ body = edu(url)
+ print(body)
+ data = self.post('{0}/api.php/getappapi.index/vodParse'.format(self.host), headers=self.header,
+ data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = json.loads(self.aes('decrypt', data1))['json']
+ url = json.loads(data2)['url']
+ except:
+ url = id
+ parse = 1
+ if not id.startswith('https://www.yemu.xyz'):
+ url = 'https://www.yemu.xyz/?url={0}'.format(id)
+ result = {}
+ print(url)
+ headers = self.header.copy()
+ del headers['Content-type']
+ result["parse"] = parse
+ result["url"] = url
+ result["header"] = headers
+ return result
+
+ def localProxy(self, param):
+ pass
diff --git a/py/腾讯视频.py b/py/腾讯视频.py
new file mode 100644
index 0000000..7a5218f
--- /dev/null
+++ b/py/腾讯视频.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import uuid
+import copy
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor, as_completed
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.dbody = {
+ "page_params": {
+ "channel_id": "",
+ "filter_params": "sort=75",
+ "page_type": "channel_operation",
+ "page_id": "channel_list_second_page"
+ }
+ }
+ self.body = self.dbody
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://v.qq.com'
+
+ apihost = 'https://pbaccess.video.qq.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
+ 'origin': host,
+ 'referer': f'{host}/'
+ }
+
+ def homeContent(self, filter):
+ cdata = {
+ "电视剧": "100113",
+ "电影": "100173",
+ "综艺": "100109",
+ "纪录片": "100105",
+ "动漫": "100119",
+ "少儿": "100150",
+ "短剧": "110755"
+ }
+ result = {}
+ classes = []
+ filters = {}
+ for k in cdata:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cdata[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ futures = [executor.submit(self.get_filter_data, item['type_id']) for item in classes]
+ for future in futures:
+ cid, data = future.result()
+ if not data.get('data', {}).get('module_list_datas'):
+ continue
+ filter_dict = {}
+ try:
+ items = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ for item in items:
+ if not item.get('item_params', {}).get('index_item_key'):
+ continue
+ params = item['item_params']
+ filter_key = params['index_item_key']
+ if filter_key not in filter_dict:
+ filter_dict[filter_key] = {
+ 'key': filter_key,
+ 'name': params['index_name'],
+ 'value': []
+ }
+ filter_dict[filter_key]['value'].append({
+ 'n': params['option_name'],
+ 'v': params['option_value']
+ })
+ except (IndexError, KeyError):
+ continue
+ filters[cid] = list(filter_dict.values())
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ json_data = {'page_context':None,'page_params':{'page_id':'100101','page_type':'channel','skip_privacy_types':'0','support_click_scan':'1','new_mark_label_enabled':'1','ams_cookies':'',},'page_bypass_params':{'params':{'caller_id':'','data_mode':'default','page_id':'','page_type':'channel','platform_id':'2','user_mode':'default',},'scene':'channel','abtest_bypass_id':'',}}
+ data = self.post(f'{self.apihost}/trpc.vector_layout.page_view.PageService/getPage',headers=self.headers, json=json_data).json()
+ vlist = []
+ for it in data['data']['CardList'][0]['children_list']['list']['cards']:
+ if it.get('params'):
+ p = it['params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ id = it.get('id') or p.get('cid')
+ name = p.get('mz_title') or p.get('title')
+ if name and 'http' not in id:
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': p.get('image_url'),
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ return {'list': vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ params = {
+ "sort": extend.get('sort', '75'),
+ "attr": extend.get('attr', '-1'),
+ "itype": extend.get('itype', '-1'),
+ "ipay": extend.get('ipay', '-1'),
+ "iarea": extend.get('iarea', '-1'),
+ "iyear": extend.get('iyear', '-1'),
+ "theater": extend.get('theater', '-1'),
+ "award": extend.get('award', '-1'),
+ "recommend": extend.get('recommend', '-1')
+ }
+ if pg == '1':
+ self.body = self.dbody.copy()
+ self.body['page_params']['channel_id'] = tid
+ self.body['page_params']['filter_params'] = self.josn_to_params(params)
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=self.body, headers=self.headers).json()
+ ndata = data['data']
+ if ndata['has_next_page']:
+ result['pagecount'] = 9999
+ self.body['page_context'] = ndata['next_page_context']
+ else:
+ result['pagecount'] = int(pg)
+ vlist = []
+ for its in ndata['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']:
+ id = its.get('item_params', {}).get('cid')
+ if id:
+ p = its['item_params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ name = p.get('mz_title') or p.get('title')
+ pic = p.get('new_pic_hz') or p.get('new_pic_vt')
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': pic,
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ result['list'] = vlist
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody = {"page_params":{"req_from":"web","cid":ids[0],"vid":"","lid":"","page_type":"detail_operation","page_id":"detail_page_introduction"},"has_cache":1}
+ body = {"page_params":{"req_from":"web_vsite","page_id":"vsite_episode_list","page_type":"detail_operation","id_type":"1","page_size":"","cid":ids[0],"vid":"","lid":"","page_num":"","page_context":"","detail_page_type":"1"},"has_cache":1}
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ future_detail = executor.submit(self.get_vdata, vbody)
+ future_episodes = executor.submit(self.get_vdata, body)
+ vdata = future_detail.result()
+ data = future_episodes.result()
+
+ pdata = self.process_tabs(data, body, ids)
+ if not pdata:
+ return self.handle_exception(None, "No pdata available")
+
+ try:
+ star_list = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][
+ 0].get('sub_items', {}).get('star_list', {}).get('item_datas', [])
+ actors = [star['item_params']['name'] for star in star_list]
+ names = ['腾讯视频', '预告片']
+ plist, ylist = self.process_pdata(pdata, ids)
+ if not plist:
+ del names[0]
+ if not ylist:
+ del names[1]
+ vod = self.build_vod(vdata, actors, plist, ylist, names)
+ return {'list': [vod]}
+ except Exception as e:
+ return self.handle_exception(e, "Error processing detail")
+
+ def searchContent(self, key, quick, pg="1"):
+ headers = self.headers.copy()
+ headers.update({'Content-Type': 'application/json'})
+ body = {'version':'25021101','clientType':1,'filterValue':'','uuid':str(uuid.uuid4()),'retry':0,'query':key,'pagenum':int(pg)-1,'pagesize':30,'queryFrom':0,'searchDatakey':'','transInfo':'','isneedQc':True,'preQid':'','adClientInfo':'','extraInfo':{'isNewMarkLabel':'1','multi_terminal_pc':'1','themeType':'1',},}
+ data = self.post(f'{self.apihost}/trpc.videosearch.mobile_search.MultiTerminalSearch/MbSearch?vplatform=2',
+ json=body, headers=headers).json()
+ vlist = []
+ vname=["电视剧", "电影", "综艺", "纪录片", "动漫", "少儿", "短剧"]
+ v=data['data']['normalList']['itemList']
+ d=data['data']['areaBoxList'][0]['itemList']
+ q=v+d
+ if v[0].get('doc') and v[0]['doc'].get('id') =='MainNeed':q=d+v
+ for k in q:
+ if k.get('doc') and k.get('videoInfo') and k['doc'].get('id') and '外站' not in k['videoInfo'].get('subTitle') and k['videoInfo'].get('title') and k['videoInfo'].get('typeName') in vname:
+ img_tag = k.get('videoInfo', {}).get('imgTag')
+ if img_tag is not None and isinstance(img_tag, str):
+ try:
+ tag = json.loads(img_tag)
+ except json.JSONDecodeError as e:
+ tag = {}
+ else:
+ tag = {}
+ pic = k.get('videoInfo', {}).get('imgUrl')
+ vlist.append({
+ 'vod_id': k['doc']['id'],
+ 'vod_name': self.removeHtmlTags(k['videoInfo']['title']),
+ 'vod_pic': pic,
+ 'vod_year': k['videoInfo'].get('typeName') +' '+ tag.get('tag_2', {}).get('text', ''),
+ 'vod_remarks': tag.get('tag_4', {}).get('text', '')
+ })
+ return {'list': vlist, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = id.split('@')
+ url = f"{self.host}/x/cover/{ids[0]}/{ids[1]}.html"
+ return {'jx':1,'parse': 1, 'url': url, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def get_filter_data(self, cid):
+ hbody = self.dbody.copy()
+ hbody['page_params']['channel_id'] = cid
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=hbody, headers=self.headers).json()
+ return cid, data
+
+ def get_vdata(self, body):
+ try:
+ vdata = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=3000010&vplatform=2&vversion_name=8.2.96',
+ json=body, headers=self.headers
+ ).json()
+ return vdata
+ except Exception as e:
+ print(f"Error in get_vdata: {str(e)}")
+ return {'data': {'module_list_datas': []}}
+
+ def process_pdata(self, pdata, ids):
+ plist = []
+ ylist = []
+ for k in pdata:
+ if k.get('item_id'):
+ pid = f"{k['item_params']['union_title']}${ids[0]}@{k['item_id']}"
+ if '预告' in k['item_params']['union_title']:
+ ylist.append(pid)
+ else:
+ plist.append(pid)
+ return plist, ylist
+
+ def build_vod(self, vdata, actors, plist, ylist, names):
+ d = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][0]['item_params']
+ urls = []
+ if plist:
+ urls.append('#'.join(plist))
+ if ylist:
+ urls.append('#'.join(ylist))
+ vod = {
+ 'type_name': d.get('sub_genre', ''),
+ 'vod_name': d.get('title', ''),
+ 'vod_year': d.get('year', ''),
+ 'vod_area': d.get('area_name', ''),
+ 'vod_remarks': d.get('holly_online_time', '') or d.get('hotval', ''),
+ 'vod_actor': ','.join(actors),
+ 'vod_content': d.get('cover_description', ''),
+ 'vod_play_from': '$$$'.join(names),
+ 'vod_play_url': '$$$'.join(urls)
+ }
+ return vod
+
+ def handle_exception(self, e, message):
+ print(f"{message}: {str(e)}")
+ return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': '翻车啦#555'}]}
+
+ def process_tabs(self, data, body, ids):
+ try:
+ pdata = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ tabs = data['data']['module_list_datas'][-1]['module_datas'][-1]['module_params'].get('tabs')
+ if tabs and len(json.loads(tabs)):
+ tabs = json.loads(tabs)
+ remaining_tabs = tabs[1:]
+ task_queue = []
+ for tab in remaining_tabs:
+ nbody = copy.deepcopy(body)
+ nbody['page_params']['page_context'] = tab['page_context']
+ task_queue.append(nbody)
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_map = {executor.submit(self.get_vdata, task): idx for idx, task in enumerate(task_queue)}
+ results = [None] * len(task_queue)
+ for future in as_completed(future_map.keys()):
+ idx = future_map[future]
+ results[idx] = future.result()
+ for result in results:
+ if result:
+ page_data = result['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists'][
+ 'item_datas']
+ pdata.extend(page_data)
+ return pdata
+ except Exception as e:
+ print(f"Error processing episodes: {str(e)}")
+ return []
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+
diff --git a/py/芒果视频.py b/py/芒果视频.py
new file mode 100644
index 0000000..6ba8e34
--- /dev/null
+++ b/py/芒果视频.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+import time
+from concurrent.futures import ThreadPoolExecutor, as_completed
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost='https://www.mgtv.com'
+
+ host='https://pianku.api.mgtv.com'
+
+ vhost='https://pcweb.api.mgtv.com'
+
+ mhost='https://dc.bz.mgtv.com'
+
+ shost='https://mobileso.bz.mgtv.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'origin': rhost,
+ 'referer': f'{rhost}/'
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "电影": "3",
+ "电视剧": "2",
+ "综艺": "1",
+ "动画": "50",
+ "少儿": "10",
+ "纪录片": "51",
+ "教育": "115"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.mhost}/dynamic/v1/channel/index/0/0/0/1000000/0/0/17/1354?type=17&version=5.0&t={str(int(time.time()*1000))}&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']:
+ if i.get('DSLList') and len(i['DSLList']):
+ for j in i['DSLList']:
+ if j.get('data') and j['data'].get('items') and len(j['data']['items']):
+ for k in j['data']['items']:
+ videoList.append({
+ 'vod_id': k["videoId"],
+ 'vod_name': k['videoName'],
+ 'vod_pic': k['img'],
+ 'vod_year': k.get('cornerTitle'),
+ 'vod_remarks': k.get('time') or k.get('desc'),
+ })
+ return {'list':videoList}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body={
+ 'allowedRC': '1',
+ 'platform': 'pcweb',
+ 'channelId': tid,
+ 'pn': pg,
+ 'pc': '80',
+ 'hudong': '1',
+ '_support': '10000000'
+ }
+ body.update(extend)
+ data=self.fetch(f'{self.host}/rider/list/pcweb/v3', params=body, headers=self.headers).json()
+ videoList = []
+ for i in data['data']['hitDocs']:
+ videoList.append({
+ 'vod_id': i["playPartId"],
+ 'vod_name': i['title'],
+ 'vod_pic': i['img'],
+ 'vod_year': (i.get('rightCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': i['updateInfo']
+ })
+ result = {}
+ result['list'] = videoList
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody={'allowedRC': '1', 'vid': ids[0], 'type': 'b', '_support': '10000000'}
+ vdata=self.fetch(f'{self.vhost}/video/info', params=vbody, headers=self.headers).json()
+ d=vdata['data']['info']['detail']
+ vod = {
+ 'vod_name': vdata['data']['info']['title'],
+ 'type_name': d.get('kind'),
+ 'vod_year': d.get('releaseTime'),
+ 'vod_area': d.get('area'),
+ 'vod_lang': d.get('language'),
+ 'vod_remarks': d.get('updateInfo'),
+ 'vod_actor': d.get('leader'),
+ 'vod_director': d.get('director'),
+ 'vod_content': d.get('story'),
+ 'vod_play_from': '芒果TV',
+ 'vod_play_url': ''
+ }
+ data,pdata=self.fetch_page_data('1', ids[0],True)
+ pagecount=data['data'].get('total_page') or 1
+ if int(pagecount)>1:
+ pages = list(range(2, pagecount+1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, ids[0]): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ pdata.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(pdata)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.shost}/applet/search/v1?channelCode=mobile-wxap&q={key}&pn={pg}&pc=10&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']['contents']:
+ if i.get('data') and len(i['data']):
+ k = i['data'][0]
+ if k.get('vid') and k.get('img'):
+ try:
+ videoList.append({
+ 'vod_id': k['vid'],
+ 'vod_name': k['title'],
+ 'vod_pic': k['img'],
+ 'vod_year': (i.get('rightTopCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': '/'.join(i.get('desc',[])),
+ })
+ except:
+ print(k)
+ return {'list':videoList,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ id=f'{self.rhost}{id}'
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def getf(self, body):
+ params = {
+ 'allowedRC': '1',
+ 'channelId': body['type_id'],
+ 'platform': 'pcweb',
+ '_support': '10000000',
+ }
+ data = self.fetch(f'{self.host}/rider/config/channel/v1', params=params, headers=self.headers).json()
+ ft = []
+ for i in data['data']['listItems']:
+ try:
+ value_array = [{"n": value['tagName'], "v": value['tagId']} for value in i['items'] if
+ value.get('tagName')]
+ ft.append({"key": i['eName'], "name": i['typeName'], "value": value_array})
+ except:
+ print(i)
+ return body['type_id'], ft
+
+ def fetch_page_data(self, page, id, b=False):
+ body = {'version': '5.5.35', 'video_id': id, 'page': page, 'size': '30',
+ 'platform': '4', 'src': 'mgtv', 'allowedRC': '1', '_support': '10000000'}
+ data = self.fetch(f'{self.vhost}/episode/list', params=body, headers=self.headers).json()
+ ldata = [f'{i["t3"]}${i["url"]}' for i in data['data']['list']]
+ if b:
+ return data, ldata
+ else:
+ return ldata
diff --git a/py/若惜影视.py b/py/若惜影视.py
new file mode 100644
index 0000000..1b33d5d
--- /dev/null
+++ b/py/若惜影视.py
@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor
+import json
+import time
+from base.spider import Spider
+
+class Spider(Spider):
+
+ '''
+ sites照常配置,
+ lives配置:
+ {
+ "name": "xxxx",
+ "type": 3,
+ "api": "路径/若惜追剧APP.py",
+ "ext": ""
+ }
+ '''
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent']=itt["player_info"].get("user_agent")
+ it["parse"]=itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h={"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data=self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url=data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'],True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url:raise ValueError(f"解析失败: {url}")
+ p=0
+ except Exception as e:
+ print('错误信息:',e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def liveContent(self, url):
+ id=self.homeContent(True)['class'][-1]['type_id']
+ vlist=self.categoryContent(id,1,False,{})['list']
+ results = []
+ with ThreadPoolExecutor(max_workers=len(vlist)) as executor:
+ futures = [executor.submit(self.livedetailContent, item['vod_name'], item['vod_id']) for item in vlist]
+ for future in futures:
+ try:
+ detail = future.result()
+ if detail:
+ results.append(detail)
+ except Exception as e:
+ print(f"处理详情数据失败: {str(e)}")
+ return '\n'.join(results)
+
+ def livedetailContent(self, name,id):
+ try:
+ print(f"获取直播源:{name}")
+ body = f"vod_id={id}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ play = [f"{name},#genre#"]
+ for itt in data["vod_play_list"]:
+ for it in itt['urls']:
+ play.append(f"{it['name']}, {it['url']}")
+ except Exception as e:
+ print(f"获取直播源失败:{str(e)}")
+ play=[]
+ return '\n'.join(play)
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ host = self.fetch('https://rxysyyds.oss-cn-chengdu.aliyuncs.com/getapp.txt', headers=headers).text
+ return host.strip()
+
+ def aes(self, text,b=None):
+ key = b"ebad3f1a58b13933"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else :
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer":self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "140", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes(t,True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
diff --git a/py/茶杯狐影视.py b/py/茶杯狐影视.py
new file mode 100644
index 0000000..2c879bf
--- /dev/null
+++ b/py/茶杯狐影视.py
@@ -0,0 +1,359 @@
+# coding = utf-8
+# !/usr/bin/python
+
+"""
+
+作者 繁华 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
+ ====================fanhua====================
+
+"""
+
+from Crypto.Util.Padding import unpad
+from urllib.parse import unquote
+from Crypto.Cipher import ARC4
+from urllib.parse import quote
+from base.spider import Spider
+from bs4 import BeautifulSoup
+import urllib.request
+import urllib.parse
+import binascii
+import requests
+import base64
+import json
+import time
+import sys
+import re
+import os
+
+sys.path.append('..')
+
+xurl = "https://cupfoxys.cc"
+
+headerx = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
+ }
+
+# headerx = {
+# 'User-Agent': 'Linux; Android 12; Pixel 3 XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36'
+# }
+
+pm = ''
+
+class Spider(Spider):
+ global xurl
+ global headerx
+
+ def getName(self):
+ return "首页"
+
+ def init(self, extend):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
+ if pl == 3:
+ plx = []
+ while True:
+ start_index = text.find(start_str)
+ if start_index == -1:
+ break
+ end_index = text.find(end_str, start_index + len(start_str))
+ if end_index == -1:
+ break
+ middle_text = text[start_index + len(start_str):end_index]
+ plx.append(middle_text)
+ text = text.replace(start_str + middle_text + end_str, '')
+ if len(plx) > 0:
+ purl = ''
+ for i in range(len(plx)):
+ matches = re.findall(start_index1, plx[i])
+ output = ""
+ for match in matches:
+ match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
+ if match3:
+ number = match3.group(1)
+ else:
+ number = 0
+ if 'http' not in match[0]:
+ output += f"#{ match[1]}${number}{xurl}{match[0]}"
+ else:
+ output += f"#{ match[1]}${number}{match[0]}"
+ output = output[1:]
+ purl = purl + output + "$$$"
+ purl = purl[:-3]
+ return purl
+ else:
+ return ""
+ else:
+ start_index = text.find(start_str)
+ if start_index == -1:
+ return ""
+ end_index = text.find(end_str, start_index + len(start_str))
+ if end_index == -1:
+ return ""
+
+ if pl == 0:
+ middle_text = text[start_index + len(start_str):end_index]
+ return middle_text.replace("\\", "")
+
+ if pl == 1:
+ middle_text = text[start_index + len(start_str):end_index]
+ matches = re.findall(start_index1, middle_text)
+ if matches:
+ jg = ' '.join(matches)
+ return jg
+
+ if pl == 2:
+ middle_text = text[start_index + len(start_str):end_index]
+ matches = re.findall(start_index1, middle_text)
+ if matches:
+ new_list = [f'{item}' for item in matches]
+ jg = '$$$'.join(new_list)
+ return jg
+
+ def homeContent(self, filter):
+ result = {}
+ result ={"class":[{"type_id":"dy","type_name":"电影"},{"type_id":"dsj","type_name":"电视剧"},{"type_id":"dm","type_name":"动漫"},{"type_id":"zy","type_name":"综艺"}],"list":[],"filters":{"dy":[{"key":"类型","name":"类型","value":[{"n":"全部","v":""},{"n":"喜剧","v":"喜剧"},{"n":"爱情","v":"爱情"},{"n":"动作","v":"动作"},{"n":"科幻","v":"科幻"},{"n":"剧情","v":"剧情"},{"n":"战争","v":"战争"},{"n":"警匪","v":"警匪"},{"n":"犯罪","v":"犯罪"},{"n":"动画","v":"动画"},{"n":"奇幻","v":"奇幻"},{"n":"武侠","v":"武侠"},{"n":"冒险","v":"冒险"},{"n":"枪战","v":"枪战"},{"n":"恐怖","v":"恐怖"},{"n":"悬疑","v":"悬疑"},{"n":"惊悚","v":"惊悚"},{"n":"经典","v":"经典"},{"n":"青春","v":"青春"},{"n":"文艺","v":"文艺"},{"n":"微电影","v":"微电影"},{"n":"古装","v":"古装"},{"n":"历史","v":"历史"},{"n":"运动","v":"运动"},{"n":"农村","v":"农村"},{"n":"儿童","v":"儿童"},{"n":"网络电影","v":"网络电影"}]},{"key":"地区","name":"地区","value":[{"n":"全部","v":""},{"n":"大陆","v":"大陆"},{"n":"香港","v":"香港"},{"n":"台湾","v":"台湾"},{"n":"美国","v":"美国"},{"n":"法国","v":"法国"},{"n":"英国","v":"英国"},{"n":"日本","v":"日本"},{"n":"韩国","v":"韩国"},{"n":"德国","v":"德国"},{"n":"泰国","v":"泰国"},{"n":"印度","v":"印度"},{"n":"意大利","v":"意大利"},{"n":"西班牙","v":"西班牙"},{"n":"加拿大","v":"加拿大"},{"n":"其他","v":"其他"}]},{"key":"年代","name":"年代","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"}]},{"key":"语言","name":"语言","value":[{"n":"全部","v":""},{"n":"国语","v":"国语"},{"n":"英语","v":"英语"},{"n":"粤语","v":"粤语"},{"n":"闽南语","v":"闽南语"},{"n":"韩语","v":"韩语"},{"n":"日语","v":"日语"},{"n":"法语","v":"法语"},{"n":"德语","v":"德语"},{"n":"其它","v":"其它"}]}],"dsj":[{"key":"类型","name":"类型","value":[{"n":"全部","v":""},{"n":"爱情","v":"爱情"},{"n":"古装","v":"古装"},{"n":"悬疑","v":"悬疑"},{"n":"都市","v":"都市"},{"n":"武侠","v":"武侠"},{"n":"战争","v":"战争"},{"n":"军旅","v":"军旅"},{"n":"权谋","v":"权谋"},{"n":"青春偶像","v":"青春偶像"},{"n":"喜剧","v":"喜剧"},{"n":"家庭","v":"家庭"},{"n":"犯罪","v":"犯罪"},{"n":"动作","v":"动作"},{"n":"科幻","v":"科幻"},{"n":"竞技","v":"竞技"},{"n":"玄幻","v":"玄幻"},{"n":"奇幻","v":"奇幻"},{"n":"剧情","v":"剧情"},{"n":"历史","v":"历史"},{"n":"经典","v":"经典"},{"n":"乡村","v":"乡村"},{"n":"情景","v":"情景"},{"n":"商战","v":"商战"},{"n":"网剧","v":"网剧"},{"n":"其他","v":"其他"}]},{"key":"地区","name":"地区","value":[{"n":"全部","v":""},{"n":"大陆","v":"大陆"},{"n":"韩国","v":"韩国"},{"n":"香港","v":"香港"},{"n":"台湾","v":"台湾"},{"n":"日本","v":"日本"},{"n":"美国","v":"美国"},{"n":"泰国","v":"泰国"},{"n":"英国","v":"英国"},{"n":"新加坡","v":"新加坡"},{"n":"其他","v":"其他"}]},{"key":"年代","name":"年代","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"},{"n":"2009","v":"2009"},{"n":"2008","v":"2008"},{"n":"2006","v":"2006"},{"n":"2005","v":"2005"},{"n":"2004","v":"2004"}]},{"key":"语言","name":"语言","value":[{"n":"全部","v":""},{"n":"国语","v":"国语"},{"n":"英语","v":"英语"},{"n":"粤语","v":"粤语"},{"n":"闽南语","v":"闽南语"},{"n":"韩语","v":"韩语"},{"n":"日语","v":"日语"},{"n":"其它","v":"其它"}]}],"dm":[{"key":"类型","name":"类型","value":[{"n":"全部","v":""},{"n":"武侠","v":"武侠"},{"n":"战斗","v":"战斗"},{"n":"情感","v":"情感"},{"n":"科幻","v":"科幻"},{"n":"热血","v":"热血"},{"n":"玄幻","v":"玄幻"},{"n":"推理","v":"推理"},{"n":"魔幻","v":"魔幻"},{"n":"搞笑","v":"搞笑"},{"n":"冒险","v":"冒险"},{"n":"萝莉","v":"萝莉"},{"n":"校园","v":"校园"},{"n":"恋爱","v":"恋爱"},{"n":"悬疑","v":"悬疑"},{"n":"日常","v":"日常"},{"n":"真人","v":"真人"},{"n":"历史","v":"历史"},{"n":"经典","v":"经典"},{"n":"动作","v":"动作"},{"n":"机战","v":"机战"},{"n":"竞技","v":"竞技"},{"n":"运动","v":"运动"},{"n":"战争","v":"战争"},{"n":"少年","v":"少年"},{"n":"少女","v":"少女"},{"n":"社会","v":"社会"},{"n":"原创","v":"原创"},{"n":"亲子","v":"亲子"},{"n":"益智","v":"益智"},{"n":"励志","v":"励志"},{"n":"其他","v":"其他"}]},{"key":"地区","name":"地区","value":[{"n":"全部","v":""},{"n":"大陆","v":"大陆"},{"n":"日本","v":"日本"},{"n":"美国","v":"美国"},{"n":"其他","v":"其他"}]},{"key":"年代","name":"年代","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"},{"n":"2009","v":"2009"},{"n":"2008","v":"2008"},{"n":"2007","v":"2007"},{"n":"2006","v":"2006"},{"n":"2005","v":"2005"},{"n":"2004","v":"2004"}]},{"key":"语言","name":"语言","value":[{"n":"全部","v":""},{"n":"国语","v":"国语"},{"n":"英语","v":"英语"},{"n":"粤语","v":"粤语"},{"n":"闽南语","v":"闽南语"},{"n":"韩语","v":"韩语"},{"n":"日语","v":"日语"},{"n":"其它","v":"其它"}]}],"zy":[{"key":"类型","name":"类型","value":[{"n":"全部","v":""},{"n":"真人秀","v":"真人秀"},{"n":"游戏","v":"游戏"},{"n":"竞技","v":"竞技"},{"n":"电竞","v":"电竞"},{"n":"推理","v":"推理"},{"n":"影视","v":"影视"},{"n":"脱口秀","v":"脱口秀"},{"n":"选秀","v":"选秀"},{"n":"情感","v":"情感"},{"n":"访谈","v":"访谈"},{"n":"播报","v":"播报"},{"n":"旅游","v":"旅游"},{"n":"音乐","v":"音乐"},{"n":"喜剧","v":"喜剧"},{"n":"美食","v":"美食"},{"n":"潮流运动","v":"潮流运动"},{"n":"亲子","v":"亲子"},{"n":"文化","v":"文化"},{"n":"互动","v":"互动"},{"n":"晚会","v":"晚会"},{"n":"资讯","v":"资讯"},{"n":"纪实","v":"纪实"},{"n":"曲艺","v":"曲艺"},{"n":"生活","v":"生活"},{"n":"职场","v":"职场"},{"n":"财经","v":"财经"},{"n":"求职","v":"求职"}]},{"key":"地区","name":"地区","value":[{"n":"全部","v":""},{"n":"大陆","v":"大陆"},{"n":"香港","v":"香港"},{"n":"台湾","v":"台湾"},{"n":"日本","v":"日本"},{"n":"韩国","v":"韩国"},{"n":"美国","v":"美国"}]},{"key":"年代","name":"年代","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"},{"n":"2009","v":"2009"},{"n":"2008","v":"2008"},{"n":"2007","v":"2007"},{"n":"2006","v":"2006"},{"n":"2005","v":"2005"},{"n":"2004","v":"2004"}]},{"key":"语言","name":"语言","value":[{"n":"全部","v":""},{"n":"国语","v":"国语"},{"n":"英语","v":"英语"},{"n":"粤语","v":"粤语"},{"n":"闽南语","v":"闽南语"},{"n":"韩语","v":"韩语"},{"n":"日语","v":"日语"},{"n":"其它","v":"其它"}]}]}}
+
+ return result
+
+ def homeVideoContent(self):
+ videos = []
+
+ try:
+ detail = requests.get(url=xurl, headers=headerx)
+ detail.encoding = "utf-8"
+ res = detail.text
+ doc = BeautifulSoup(res, "lxml")
+ soups = doc.find_all('div', class_="vod-list")
+ for soup in soups:
+ vods = soup.find_all('div', class_="col-xs-4")
+ for vod in vods:
+ names = vod.find('div', class_="vod-item")
+ name = names.find('h3').text
+ id = vod.select_one('h3 a')['href']
+ pics = vod.find_all('div')
+ pic = pics[1]['data-original']
+ if 'http' not in pic:
+ pic = xurl + pic
+ remarks = vod.find('span', class_="text-row-1")
+ remark = remarks.text.strip()
+ video = {
+ "vod_id": id,
+ "vod_name": name,
+ "vod_pic": pic,
+ "vod_remarks": remark
+ }
+ videos.append(video)
+ result = {'list': videos}
+ return result
+ except:
+ pass
+ def categoryContent(self, cid, pg, filter, ext):
+ result = {}
+ videos = []
+ if pg:
+ page = int(pg)
+ else:
+ page = 1
+ if '类型' in ext.keys():
+ lxType = ext['类型']
+ else:
+ lxType = ''
+ if '地区' in ext.keys():
+ DqType = ext['地区']
+ else:
+ DqType = ''
+ if '语言' in ext.keys():
+ YyType = ext['语言']
+ else:
+ YyType = ''
+ if '年代' in ext.keys():
+ NdType = ext['年代']
+ else:
+ NdType = ''
+ if '剧情' in ext.keys():
+ JqType = ext['剧情']
+ else:
+ JqType = ''
+ if '排序' in ext.keys():
+ pxType = ext['排序']
+ else:
+ pxType = ''
+ url = f'{xurl}/vod/{cid}-{DqType}--{lxType}-{YyType}----{pg}---{NdType}/'
+ # https://cupfoxys.cc/vod/dsj-%E5%A4%A7%E9%99%86--%E7%88%B1%E6%83%85-%E5%9B%BD%E8%AF%AD-------2024/
+ try:
+ print(url)
+ detail = requests.get(url=url, headers=headerx)
+ detail.encoding = "utf-8"
+ res = detail.text
+ doc = BeautifulSoup(res, "lxml")
+ soups = doc.find_all('div', class_="row")
+ for soup in soups:
+ vods = soup.find_all('div', class_="col-xs-4")
+ for vod in vods:
+ names = vod.find('div', class_="vod-item")
+ name = names.find('h3').text
+ id = vod.select_one('h3 a')['href']
+ pics = vod.find_all('div')
+ pic = pics[1]['data-original']
+ if 'http' not in pic:
+ pic = xurl + pic
+ remarks = vod.find('span', class_="text-row-1")
+ remark = remarks.text.strip()
+ video = {
+ "vod_id": id,
+ "vod_name": name,
+ "vod_pic": pic,
+ "vod_remarks": remark
+ }
+ videos.append(video)
+ except:
+ pass
+ result = {'list': videos}
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+ def detailContent(self, ids):
+ global pm
+ did = ids[0]
+ result = {}
+ videos = []
+ if 'http' not in did:
+ did = xurl + did
+ res = requests.get(url=did, headers=headerx)
+ res.encoding = "utf-8"
+ res = res.text
+ tiaozhuan = '0'
+ if tiaozhuan == '1':
+ didt = self.extract_middle_text(res, 'class="play">', '', 1, 'href="(.*?)"')
+ if 'http' not in didt:
+ didt = xurl + didt
+ ress = requests.get(url=didt, headers=headerx)
+ ress.encoding = "utf-8"
+ ress = ress.text
+ duoxian = '0'
+ if duoxian == '1':
+ doc = BeautifulSoup(ress, 'lxml')
+ soups = doc.find('span', class_='animate__animated')
+ vods = soups.find_all('a')[1:]
+ res1 = ''
+ for vod in vods:
+ url = self.extract_middle_text(str(vod), 'href="', '"', 0)
+ if 'http' not in url:
+ url = xurl + url
+ resss = requests.get(url, headers=headerx)
+ resss.encoding = 'utf-8'
+ resss = resss.text
+ res1 = res1 + resss
+ res2 = ress + res1
+ url = 'https://9071.kstore.vip/py/yz.txt'
+ response = requests.get(url)
+ response.encoding = 'utf-8'
+ code = response.text
+ name = self.extract_middle_text(code, "s1='", "'", 0)
+ Jumps = self.extract_middle_text(code, "s2='", "'", 0)
+ content = '😸繁华🎉绍剧情📢本资源来源于网络🚓侵权请联系删除👉' + self.extract_middle_text(res,'', '').replace('\u3000\u3000', '')
+ if name not in content:
+ bofang = Jumps
+ else:
+ bofang = self.extract_middle_text(res, '
(.*?)')
+ xianlu = self.extract_middle_text(res, 'ul class="tab-box','
',2, '(.*?)')
+ actors = self.extract_middle_text(res, 'class="fa fa-user-o fa-fw">主演:', '', 1,'href=".*?" target=".*?">(.*?)')
+ director = self.extract_middle_text(res, 'class="fa fa-user-o fa-fw">导演:', '', 1,'(.*?)')
+ videos.append({
+ "vod_id": did,
+ "vod_actor": actors,
+ "vod_director": director,
+ "vod_content": content,
+ "vod_play_from": xianlu,
+ "vod_play_url": bofang
+ })
+ result['list'] = videos
+ return result
+ def playerContent(self, flag, id, vipFlags):
+ parts = id.split("http")
+ xiutan = 0
+ if xiutan == 0:
+ if len(parts) > 1:
+ before_https, after_https = parts[0], 'http' + parts[1]
+ if '239755956819.mp4' in after_https:
+ url = after_https
+ else:
+ res = requests.get(url=after_https, headers=headerx)
+ res = res.text
+ url = self.extract_middle_text(res, '},"url":"', '"', 0).replace('\\', '')
+ result = {}
+ result["parse"] = xiutan
+ result["playUrl"] = ''
+ result["url"] = url
+ result["header"] = headerx
+ return result
+ if xiutan == 1:
+ if len(parts) > 1:
+ before_https, after_https = parts[0], 'http' + parts[1]
+ result = {}
+ result["parse"] = xiutan
+ result["playUrl"] = ''
+ result["url"] = after_https
+ result["header"] = headerx
+ return result
+ def searchContentPage(self, key, quick, page):
+ result = {}
+ videos = []
+ if not page:
+ page = '1'
+ if page == '1':
+ url = f'{xurl}/search/-------------/?wd={key}'
+ else:
+ url = f'{xurl}/search/{key}----------{str(page)}---/'
+ detail = requests.get(url=url, headers=headerx)
+ detail.encoding = "utf-8"
+ res = detail.text
+ doc = BeautifulSoup(res, "lxml")
+ soups = doc.find_all('div', class_="search-list")
+ for soup in soups:
+ vods = soup.find_all('div', class_="search-item row")
+ for vod in vods:
+ names = vod.find('h2', class_="search-item-title")
+ name = names.find('a')['title']
+ id = vod.find('a')['href']
+ pic = vod.select_one('a div')['data-original']
+ if 'http' not in pic:
+ pic = xurl + pic
+ remarks = vod.find('ul', class_="search-item-desc")
+ remark = remarks.find('li').text
+ video = {
+ "vod_id": id,
+ "vod_name": name,
+ "vod_pic": pic,
+ "vod_remarks": remark
+ }
+ videos.append(video)
+ result['list'] = videos
+ result['page'] = page
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+ def searchContent(self, key, quick, pg="1"):
+ return self.searchContentPage(key, quick, '1')
+ def localProxy(self, params):
+ if params['type'] == "m3u8":
+ return self.proxyM3u8(params)
+ elif params['type'] == "media":
+ return self.proxyMedia(params)
+ elif params['type'] == "ts":
+ return self.proxyTs(params)
+ return None
+
+
+
diff --git a/py/蓝莓短剧.py b/py/蓝莓短剧.py
new file mode 100644
index 0000000..fd79fc0
--- /dev/null
+++ b/py/蓝莓短剧.py
@@ -0,0 +1,391 @@
+# -*- coding: utf-8 -*-
+import requests
+import json
+import time
+import sys
+import urllib.parse
+
+sys.path.append('../../')
+try:
+ from base.spider import Spider
+except ImportError:
+ # 定义一个基础接口类,用于本地测试
+ class Spider:
+ def init(self, extend=""):
+ pass
+
+class Spider(Spider):
+ def __init__(self):
+ self.siteUrl = "https://app.whjzjx.cn"
+ # 分类ID映射
+ self.cateManual = {
+ "古装": "5",
+ "穿越": "17",
+ "逆袭": "7",
+ "重生": "6"
+ }
+ # 请求头
+ self.headers = {
+ "Connection": "keep-alive",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "user-agent": "okhttp/4.10.0",
+ "user_agent": "Mozilla/5.0 (Linux; Android 9; ASUS_I003DD Build/PI; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/68.0.3440.70 Mobile Safari/537.36",
+ "Host": "app.whjzjx.cn",
+ "Accept-Encoding": "gzip"
+ }
+ # token缓存
+ self.token = None
+ self.tokenExpireTime = 0
+
+ def getName(self):
+ # 返回爬虫名称
+ return "蓝莓短剧"
+
+ def init(self, extend=""):
+ return
+
+ def isVideoFormat(self, url):
+ # 检查是否为视频格式
+ video_formats = ['.mp4', '.m3u8', '.ts']
+ for format in video_formats:
+ if format in url.lower():
+ return True
+ return False
+
+ def manualVideoCheck(self):
+ # 不需要手动检查
+ return False
+
+ def getToken(self):
+ """获取API访问Token"""
+ # 如果token有效期内,直接返回
+ current_time = time.time()
+ if self.token and current_time < self.tokenExpireTime:
+ return self.token
+
+ # 否则重新获取
+ try:
+ tkurl = 'https://app.whjzjx.cn/v1/account/login'
+ body = "device=20caaae96b3443174bf4ebdbdcc253776"
+
+ response = requests.post(
+ tkurl,
+ headers=self.headers,
+ data=body
+ )
+
+ if response.status_code == 200:
+ json_data = response.json()
+ # 修复:服务器返回的是"ok"而不是0
+ if json_data.get('code') == 0 or json_data.get('code') == "ok" or json_data.get('status') == 0:
+ self.token = json_data['data']['token']
+ # 设置token过期时间为1小时
+ self.tokenExpireTime = current_time + 3600
+ return self.token
+
+ print(f"获取token失败: {response.text}")
+ return None
+ except Exception as e:
+ print(f"获取token异常: {str(e)}")
+ return None
+
+ def fetchWithToken(self, url, method="GET", body=None):
+ """带token的网络请求"""
+ token = self.getToken()
+ if not token:
+ print("无法获取token")
+ return None
+
+ headers = self.headers.copy()
+ headers["authorization"] = token
+
+ try:
+ if method.upper() == "GET":
+ response = requests.get(url, headers=headers, timeout=10)
+ else: # POST
+ response = requests.post(url, headers=headers, data=body, timeout=10)
+
+ response.raise_for_status()
+ return response
+ except Exception as e:
+ print(f"请求失败: {url}, 错误: {str(e)}")
+ return None
+
+ def homeContent(self, filter):
+ """获取首页分类及筛选"""
+ result = {}
+ classes = []
+
+ # 添加分类
+ for k in self.cateManual:
+ classes.append({
+ 'type_id': self.cateManual[k],
+ 'type_name': k
+ })
+
+ result['class'] = classes
+
+ # 获取首页推荐视频
+ try:
+ result['list'] = self.homeVideoContent()['list']
+ except:
+ result['list'] = []
+
+ return result
+
+ def homeVideoContent(self):
+ """获取首页推荐视频内容"""
+ # 使用第一个分类的内容作为首页推荐
+ first_cate = list(self.cateManual.values())[0]
+ result = self.categoryContent(first_cate, 1, False, None)
+ # 不打印错误信息,除非列表为空
+ if not result.get('list'):
+ print("未获取到首页推荐视频")
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ """获取分类内容"""
+ result = {}
+ videos = []
+
+ try:
+ # 构建请求URL:分类页
+ url = f"{self.siteUrl}/v1/theater/home_page?theater_class_id={tid}&page_num={int(pg)-1}&page_size=24"
+
+ response = self.fetchWithToken(url)
+ if not response:
+ return result
+
+ json_data = response.json()
+
+ # 服务器正常响应状态检查,返回"ok"或status=0认为是成功
+ if not(json_data.get('code') == 0 or json_data.get('code') == "ok" or json_data.get('status') == 0):
+ print(f"获取分类数据失败: {json_data}")
+ return result
+
+ # 不再打印json_data,而是处理正常返回的数据
+ # 解析视频列表
+ data_list = json_data.get('data', {}).get('list', [])
+ for item in data_list:
+ theater = item.get('theater', {})
+ if not theater:
+ continue
+
+ video_id = theater.get('id')
+ title = theater.get('title')
+ cover = theater.get('cover_url')
+ total = theater.get('total', '')
+ play_amount = theater.get('play_amount_str', '')
+
+ videos.append({
+ "vod_id": video_id,
+ "vod_name": title,
+ "vod_pic": cover,
+ "vod_remarks": f"{total}集",
+ "vod_content": f"播放量:{play_amount}"
+ })
+
+ # 构建返回结果
+ result = {
+ 'list': videos,
+ 'page': pg,
+ 'pagecount': 9999, # 假设有很多页
+ 'limit': 24,
+ 'total': 999999 # 设置一个较大数值
+ }
+ except Exception as e:
+ print(f"获取分类内容异常: {str(e)}")
+
+ return result
+
+ def detailContent(self, ids):
+ """获取详情页内容"""
+ video_id = ids[0]
+ result = {}
+
+ try:
+ # 构建详情页请求URL
+ url = f"{self.siteUrl}/v2/theater_parent/detail?theater_parent_id={video_id}"
+
+ response = self.fetchWithToken(url)
+ if not response:
+ return {}
+
+ json_data = response.json()
+ if not(json_data.get('code') == 0 or json_data.get('code') == "ok" or json_data.get('status') == 0):
+ print(f"获取详情数据失败: {json_data}")
+ return {}
+
+ # 解析详情数据
+ data = json_data.get('data', {})
+ title = data.get('title', '')
+ cover = data.get('cover_url', '')
+ total = data.get('total', '')
+
+ # 提取剧集列表
+ theaters = data.get('theaters', [])
+ episodes = []
+
+ for index, theater in enumerate(theaters):
+ ep_name = f"第{theater.get('num', '')}集"
+ # 生成格式为 video_id_episode_index 的ID,方便playerContent提取
+ ep_url = f"{video_id}_{index}"
+ episodes.append(f"{ep_name}${ep_url}")
+
+ # 构建VOD数据
+ vod = {
+ "vod_id": video_id,
+ "vod_name": title,
+ "vod_pic": cover,
+ "vod_remarks": f"{total}集",
+ "vod_content": data.get('introduction', ''),
+ "vod_play_from": "蓝莓短剧",
+ "vod_play_url": "#".join(episodes)
+ }
+
+ result = {
+ 'list': [vod]
+ }
+ except Exception as e:
+ print(f"获取详情内容异常: {str(e)}")
+
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ """搜索功能"""
+ result = {}
+ videos = []
+
+ try:
+ # 构建搜索请求
+ url = f"{self.siteUrl}/v2/search"
+ body = f"text={urllib.parse.quote(key)}"
+
+ response = self.fetchWithToken(url, method="POST", body=body)
+ if not response:
+ return {}
+
+ json_data = response.json()
+ # 修改这里,使用与detailContent相同的条件判断
+ if not(json_data.get('code') == 0 or json_data.get('code') == "ok" or json_data.get('status') == 0):
+ print(f"搜索数据失败: {json_data}")
+ return {}
+
+ # 解析搜索结果
+ search_data = json_data.get('data', {}).get('search_data', [])
+ for item in search_data:
+ video_id = item.get('id')
+ title = item.get('title')
+ cover = item.get('cover_url')
+ score = item.get('score_str', '')
+ total = item.get('total', '')
+
+ videos.append({
+ "vod_id": video_id,
+ "vod_name": title,
+ "vod_pic": cover,
+ "vod_remarks": f"{score}|{total}集"
+ })
+
+ result = {
+ 'list': videos,
+ 'page': pg
+ }
+ except Exception as e:
+ print(f"搜索内容异常: {str(e)}")
+
+ print(11111111, result)
+ return result
+
+ def searchContentPage(self, key, quick, pg=1):
+ return self.searchContent(key, quick, pg)
+
+ def playerContent(self, flag, id, vipFlags):
+ """获取播放内容"""
+ result = {}
+
+ # 检查是否已经是直接的视频URL
+ if self.isVideoFormat(id):
+ result["parse"] = 0
+ result["url"] = id
+ result["playUrl"] = ""
+ result["header"] = json.dumps(self.headers)
+ return result
+
+ # 如果不是直接的视频URL,需要处理一下
+ try:
+ # 我们需要从ID中解析出剧ID和集索引
+ if id.isdigit():
+ # 如果是纯数字ID,说明是剧ID,我们需要获取详情并提取第一集
+ video_id = id
+ ep_index = 0 # 默认获取第一集
+ elif '_' in id:
+ # 如果ID包含下划线,格式是 video_id_episode_index
+ parts = id.split('_')
+ if len(parts) >= 2:
+ video_id = parts[0] # 这是纯数字的视频ID
+ ep_index = int(parts[1])
+ else:
+ video_id = id
+ ep_index = 0
+ else:
+ # 假设id就是视频URL
+ result["parse"] = 0
+ result["url"] = id
+ result["playUrl"] = ""
+ result["header"] = json.dumps(self.headers)
+ return result
+
+ # 获取详情数据,通过详情接口获取剧集列表
+ # 确保只使用纯数字的视频ID作为theater_parent_id参数
+ detail_url = f"{self.siteUrl}/v2/theater_parent/detail?theater_parent_id={video_id}"
+ print(f"请求详情URL: {detail_url}")
+ detail_response = self.fetchWithToken(detail_url)
+
+ if not detail_response or detail_response.status_code != 200:
+ print("获取详情数据失败")
+ return result
+
+ detail_json = detail_response.json()
+ # 修改这里,使用与detailContent相同的条件判断
+ if not(detail_json.get('code') == 0 or detail_json.get('code') == "ok" or detail_json.get('status') == 0):
+ print(f"获取详情数据错误: {detail_json}")
+ return result
+
+ # 获取剧集列表
+ theaters = detail_json.get('data', {}).get('theaters', [])
+
+ if not theaters or ep_index >= len(theaters):
+ print(f"未找到剧集或索引超出范围: {ep_index}")
+ return result
+
+ # 获取指定索引的剧集
+ episode = theaters[ep_index]
+ video_url = episode.get('son_video_url', '')
+
+ if not video_url:
+ print(f"未找到视频URL")
+ return result
+
+ # 添加播放所需的headers
+ play_headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "Referer": "http://qcapp.xingya.com.cn/"
+ }
+
+ # 返回播放信息
+ result["parse"] = 0
+ result["url"] = video_url
+ result["playUrl"] = ""
+ result["header"] = json.dumps(play_headers)
+
+ except Exception as e:
+ print(f"获取播放内容异常: {str(e)}")
+ import traceback
+ print(traceback.format_exc())
+
+ return result
+
+ def localProxy(self, param):
+ """本地代理处理,此处简单返回传入的参数"""
+ return [200, "video/MP2T", {}, param]
\ No newline at end of file
diff --git a/py/视觉APP.py b/py/视觉APP.py
new file mode 100644
index 0000000..8d6df57
--- /dev/null
+++ b/py/视觉APP.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append("..")
+import re
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from base64 import b64encode, b64decode
+import json
+from base.spider import Spider
+from urllib.parse import quote
+
+
+class Spider(Spider):
+
+ def getName(self):
+ return "视觉"
+
+ def init(self, extend=""):
+ self.host = self.host()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.fetch(
+ f"{self.host}/api/v3/drama/getCategory?orderBy=type_id",
+ headers=self.headers,
+ ).json()
+ dy = {
+ "class": "类型",
+ "area": "地区",
+ "lang": "语言",
+ "year": "年份",
+ "letter": "字母",
+ "by": "排序",
+ "sort": "排序",
+ }
+ filters = {}
+ classes = []
+ for item in data["data"]:
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["converUrl"])
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in values
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ data = self.fetch(f"{self.host}/api/ex/v3/security/tag/list", headers=self.headers).json()["data"]
+ data1 = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)
+ list = []
+ for item in data1[0]['carousels']:
+ id = item['link'].split("id=")[1]
+ list.append({
+ "vod_id": id,
+ 'vod_name': item.get("title"),
+ 'vod_pic': item.get("cover"),
+ 'vod_remarks': item.get("sort"),
+ })
+ result = {"list": list}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = []
+ if extend.get('area'):
+ params.append(f"vodArea={extend['area']}")
+ if extend.get('classs'):
+ params.append(f"vodClass={extend['class']}")
+ params.append("pagesize=20")
+ params.append(f"typeId1={tid}")
+ params.append(f"page={pg}")
+ if extend.get('year'):
+ params.append(f"vodYear={extend['year']}")
+ body = '&'.join(params)
+ path = self.aes(self.aes(body, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
+ data = self.fetch(f"{self.host}/api/ex/v3/security/drama/list?query={path}", headers=self.headers).json()[
+ "data"]
+ data = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['list']
+ list = []
+ for item in data:
+ list.append({
+ 'vod_id': item.get("id"),
+ 'vod_pic': item["coverImage"].get("path"),
+ 'vod_name': item.get("name"),
+ 'vod_year': item.get("year"),
+ 'vod_remarks': item.get("remark")
+ })
+ result = {}
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ url = f"{self.host}/api/v3/drama/getDetail?id={ids[0]}"
+ data = self.post(url, headers=self.headers).json()["data"]
+ vod = {
+ 'vod_name': data.get("name"),
+ 'vod_area': data.get("area"),
+ 'type_name': data.get("clazz"),
+ 'vod_actor': data.get("actor"),
+ 'vod_director': data.get("director"),
+ 'vod_content': data.get("brief").strip(),
+ }
+ play = []
+ names = []
+ plays = {}
+ for itt in data["videos"]:
+ if itt["sourceCn"] not in names:
+ plays[itt["source"]] = []
+ names.append(itt["sourceCn"])
+ url = f"vodPlayFrom={itt['source']}&playUrl={itt['path']}"
+ if re.search(r"\.(mp4|m3u8|flv)$", itt["path"]):
+ url = itt["path"]
+ plays[itt["source"]].append(f"{itt['titleOld']}${url}")
+ for it in plays:
+ play.append("#".join(plays[it]))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = f"pagesize=20&page={pg}&searchKeys={key}"
+ path = self.aes(self.aes(body, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
+ data = self.fetch(f"{self.host}/api/ex/v3/security/drama/list?query={path}", headers=self.headers).json()[
+ "data"]
+ data = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['list']
+ list = []
+ for item in data:
+ list.append({
+ 'vod_id': item.get("id"),
+ 'vod_pic': item["coverImage"].get("path"),
+ 'vod_name': item.get("name"),
+ 'vod_year': item.get("year"),
+ 'vod_remarks': item.get("remark")
+ })
+ result = {"list": list, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if "vodPlayFrom" in url:
+ try:
+ path = self.aes(self.aes(id, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
+ data = \
+ self.fetch(f"{self.host}/api/ex/v3/security/videoUsableUrl?query={path}", headers=self.headers).json()[
+ "data"]
+ url = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['playUrl']
+ # try:
+ # url1 = self.fetch(url, headers=self.headers, timeout=5, allow_redirects=False).headers['Location']
+ # if "http" in url1 and url1:
+ # url = url1
+ # except:
+ # pass
+ except Exception as e:
+ pass
+ if '.jpg' in url or '.jpeg' in url or '.png' in url:
+ url = self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'User-Agent': 'okhttp/3.12.1'}
+ return result
+
+ def localProxy(self, param):
+ url = b64decode(param["url"]).decode('utf-8')
+ durl = url[:url.rfind('/')]
+ data = self.fetch(url, headers=self.headers).content.decode("utf-8")
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ lines[index] = durl + ('' if string.startswith('/') else '/') + string
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def host(self):
+ try:
+ url = self.fetch('https://www.shijue.pro/token.txt', headers=self.headers).json()['domain']
+ return url
+ except:
+ return "http://118.25.18.217:6632"
+
+ headers = {
+ 'User-Agent': 'okhttp/3.12.1',
+ 'Content-Type': 'application/json;'
+ }
+ key = ['TFLYWVJ5EG5YB1PLZLVVMGVLBGRIDCSW', 'nj6E5K4yYYT5W4ScJ3J3rJ2zrzcJkpTk']
+
+ def aes(self, word, key, mode='decrypt', bool=False):
+ key = key.encode('utf-8')
+ if mode == 'decrypt':
+ word = b64decode(word)
+ cipher = AES.new(key, AES.MODE_ECB)
+ decrypted = cipher.decrypt(word)
+ word = unpad(decrypted, AES.block_size).decode('utf-8')
+ if bool:
+ word = json.loads(word)
+ elif mode == 'encrypt':
+ cipher = AES.new(key, AES.MODE_ECB)
+ padded = pad(word.encode('utf-8'), AES.block_size)
+ encrypted = cipher.encrypt(padded)
+ word = b64encode(encrypted).decode('utf-8')
+ if bool:
+ word = quote(word)
+ return word
+
+
diff --git a/py/边缘影视.py b/py/边缘影视.py
new file mode 100644
index 0000000..0ebb1dd
--- /dev/null
+++ b/py/边缘影视.py
@@ -0,0 +1,340 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import binascii
+import json
+import os
+import re
+import sys
+import time
+import uuid
+from urllib.parse import urlparse
+from concurrent.futures import ThreadPoolExecutor
+sys.path.append('..')
+from base.spider import Spider
+from base64 import b64encode, b64decode
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import AES, PKCS1_v1_5
+from Crypto.Util.Padding import unpad, pad
+from Crypto.Hash import MD5
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'AppID': '534',
+ 'app_id': '534',
+ 'version': '1.0.3',
+ 'package': 'com.hjmore.wallpaper',
+ 'user_id': '3507f394e83d2424',
+ 'user-id': '3507f394e83d2424',
+ 'app_name': 'lanlan',
+ 'app-name': 'lanlan',
+ 'Content-Type': 'application/json; charset=utf-8;',
+ 'User-Agent': 'okhttp/4.9.0'
+ }
+
+ def homeContent(self, filter):
+ hdata=self.getdata('/api.php/provide/index',self.getbody({'tid':'0'}))
+ vlist=hdata['data'].get('tj',[])
+ result = {}
+ classes = []
+ filters = {}
+ for i in hdata['data']['sub_data']:
+ id=str(i['type_id'])
+ classes.append({'type_id': id, 'type_name': i['type_name']})
+ if len(i['data']):
+ vlist.extend(i['data'])
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = vlist
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body={
+ "tid": tid,
+ "type": extend.get('type'),
+ "lang": extend.get('lang'),
+ "area": extend.get('area'),
+ "year": extend.get('year'),
+ "pg": pg
+ }
+ body = {k: v for k, v in body.items() if v is not None and v != ""}
+ data=self.getdata('/api.php/provide/nav',self.getbody(body))
+ result = {}
+ result['list'] = data['data']['data']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+ pass
+
+ def detailContent(self, ids):
+ data=self.getdata('/api.php/provide/vod',self.getbody({'ids':ids[0]}))
+ vod=data['data']
+ plist=[]
+ names=[]
+ for i in vod['vod_play_url']:
+ ulist=[]
+ names.append(i['name'].split(' ')[0])
+ jdata={'parse':''}
+ if i.get('parse') and isinstance(i['parse'], list) and len(i['parse']):
+ jdata['parse']=self.e64(json.dumps(i['parse']))
+ for j in i['data']:
+ jdata['url']=j['url']
+ ulist.append(f'{j["name"]}${self.e64(json.dumps(jdata))}')
+ plist.append('#'.join(ulist))
+ vod['vod_play_from']='$$$'.join(names)
+ vod['vod_play_url']='$$$'.join(plist)
+ vod.pop('cover_list', None)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ body={"wd":key,"tid":"0","pg":pg}
+ data=self.getdata('/api.php/provide/search',self.getbody(body))
+ vlist=[]
+ for i in data['data']:
+ i.pop('vod_play_from', None)
+ vlist.append(i)
+ return {'list':vlist,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ data=json.loads(self.d64(id))
+ parse=data.get('parse')
+ url,p,head = data.get('url'),1,''
+ if parse:
+ parse=json.loads(self.d64(parse))
+ if not re.search(r'\.m3u8|.mp4|\.flv', url) and parse:
+ for p in parse:
+ try:
+ data=self.fetch(f'{p}{url}',self.headers).json()
+ url=data.get('data',{}).get('url') or data.get('url')
+ head=data.get('data',{}).get('header') or data.get('header')
+ p=0
+ break
+ except:
+ p,url=1,data.get('url')
+ head = {'User-Agent': 'okhttp/4.9.0'}
+ return {'parse': p, 'url': url, 'header': head}
+
+ def localProxy(self, param):
+ pass
+
+ def getf(self, map):
+ ft,id =[], map['type_id']
+ try:
+ fdata = self.getdata('/api.php/provide/nav', self.getbody({'tid': id, 'pg': '1'}))
+ dy = ['area', 'year', 'lang', 'type']
+ fd = fdata['data']['type_extend']
+ has_non_empty_field = False
+ for key in dy:
+ if key in fd and fd[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ for dkey in fd:
+ if dkey in dy and fd[dkey].strip() != "":
+ values = fd[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ ft.append({"key": dkey, "name": dkey, "value": value_array})
+ return (id, ft)
+ except:
+ return (id, ft)
+
+ def getskey(self):
+ random_bytes = os.urandom(16)
+ return binascii.hexlify(random_bytes).decode()
+
+ def getohost(self):
+ url='https://bianyuan001.oss-cn-beijing.aliyuncs.com/huidu1.0.0.json'
+ response = self.fetch(url, headers=self.headers).json()
+ return response['servers'][0]
+
+ def gethost(self):
+ body={
+ "gr_rp_size": "1080*2272",
+ "gr_app_list": "%E5%B1%8F%E5%B9%95%E5%BD%95%E5%88%B6%EF%BC%88com.miui.screenrecorder%29%0A%E5%A4%B8%E5%85%8B%EF%BC%88com.quark.browser%29%0A%E8%BE%B9%E7%BC%98%E8%A7%86%E9%A2%91%EF%BC%88com.hjmore.wallpaper%29%0A%E5%93%94%E5%93%A9%E5%93%94%E5%93%A9%EF%BC%88tv.danmaku.bili%29%0A%E7%81%AB%E6%98%9F%E6%90%9C%E9%A2%98%EF%BC%88com.fenbi.android.souti%29%0A%E6%94%AF%E4%BB%98%E5%AE%9D%EF%BC%88com.eg.android.AlipayGphone%29%0AWPS%20Office%EF%BC%88cn.wps.moffice_eng%29",
+ "gr_lal": "0.0%2C0.0",
+ "gr_system_type": "android",
+ "gr_device_imei": "3507f394e83d2424",
+ "gr_app_version": "1.0.3",
+ "gr_device_model": "Xiaomi%20M2012K10C%20%28Android%20%E7%89%88%E6%9C%AC%3A%2011%2C%20SDK%E7%89%88%E6%9C%AC%3A%2030%29",
+ "gr_city": "%E8%B4%B5%E5%B7%9E%2C%E6%9C%AA%E7%9F%A5%2C%E6%9C%AA%E7%9F%A5",
+ "requestId": self.uuid(),
+ "timeStamp": str(int(time.time() * 1000)),
+ "version": "1.0.3",
+ "package": "com.hjmore.wallpaper",
+ "userLoginToken": "",
+ "app_id": "534",
+ "appName": 2131951658,
+ "device_id": "3507f394e83d2424",
+ "device-id": "3507f394e83d2424",
+ "oaid": "",
+ "imei": "",
+ "referer_shop": "边缘影视",
+ "referer-shop": "边缘影视",
+ "access_fine_location": 0,
+ "access-fine-location": 0
+ }
+ ohost = self.getohost()
+ data=self.getdata(f'/api.php/settings/grayscale_list',body,ohost)
+ parsed_url = urlparse(data['data']['grayscale']['server_url'][0])
+ domain = parsed_url.scheme + "://" + parsed_url.netloc
+ return domain
+
+ def drsa(self, encrypted_data):
+ private_key_pem = """-----BEGIN RSA PRIVATE KEY-----
+ MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDA5NWiAwRjH50/
+ IJY1N0zLopa4jpuWE7kWMn1Qunu6SjBgTvNRmRUoPDHn54haLfbfXIa2X+/sIaMB
+ /O3HhrpVsz55E5W2vpZ5fBYWh+M65bQERKTW+l72H7GR9x0yj3QPByzzfsj/QkyP
+ 81prpwR9i8yMe7yG9TFKqUQCPE+/GrhNU1Qf6nFmV+vMnlP9DantkwAt4fPOMZn3
+ j4da65/1YQV+F5bYzaLenNVKbHf8U8fVYLZWIy4yk2Vpe4R2Z+JX/eHWsChE9hOu
+ iFm02eTW5NJLZlWUxYrSE23VXi8oXSEdON3UEOrwSdAUh4SXxLZ9U7KpNVdTwWyR
+ AS4GyzJ/AgMBAAECggEBAKzmcXefLLeNBu4mz30z7Go7es5DRcLoOudiqmFKRs1c
+ 4q/xFLj3drdx/WnZZ6ctvDPKRBYFOJF4NRz7Ekfew/c9i6oLnA8KFuceCs53T37j
+ ltCclwT7t1L2ZbxovIsteuJdlDVOV+w2CVqez1Xfh27heKAT6ZEvBtfdkVBPr0uj
+ oVwa2+XlJmYZw5dHeB7ySVeAQ+69zDuADB8OWxPWsv6Del+Fhf0kTHAw4WgqcYsd
+ JUunCjgLdJUlDgXzH/M/Nj8NYVEuq6QpmhaktJ4fwn/F7u3lQllVCFKj5lr0Xb92
+ y7lvQlGqMKX1oxf+P5c5/vie1kDx1Rj4S++flIcVlUECgYEA4BuxCZ1c8oOF98bs
+ KTAONnnZniQ1BRt7rA+O9+++lDjxJhxkuthwjB9YzrnZtxHJtvIIie9Jv8MVfzHa
+ p2woDtiEh3YYwmIlgNUFvTcGe++tTiEiLDcGc/xNhpvfbLaw9QB7/HQ+LT1QCMxJ
+ ufdBrR98l0khIGjYqxDW3W5pV70CgYEA3Ff/9+GM2XI/EUSTYrpnwp5R5OsXz1DL
+ 3CFFgp1EPCNk/c3YNWnrUtTkfmKAlRqWIHfphvH/jS6jpGrfRxDggPwGMtBc134b
+ brIM5i4KNj/EcE+w5g03HaKBf1ZihHDQ53c6wTn6IFOHJNSPRLqMNqRymfbclNyO
+ lBMHQmB8yOsCgYBCdZPTwRnuRTi2WQRx1nFwkEQL1Lrwb80GInsIZc2DkTtaTPNG
+ QadmtmkUrSK2Wo0SNsZ3eUHKn2TBmpw4KCfc9zKeJVSEWKy8fu+7xBSlLlebotHK
+ gOrl/H1VHOZuC+OAVItwO1yw98zDPynh/0Q3ve2pw6MSRGV0nYLKmdKdlQKBgQCJ
+ Ty1rw1qKhu9WS22tMIxIc3CFPxtvTeI8I1+1rVtAPq5Im2YIoyDKVXCucaO/RvoW
+ 8aLNPTELQe0oIJFTL+k3d9ZFBCNXBncB3GK9biNe+w3nD0IlmkamaQZZ2/M4pTUJ
+ iPtMPlzomCS3ht5g7f9CbegcmgGLooYXMGRtsMMSUQKBgQCoj+3UciH2i+HyUla5
+ 1FxivjH3MqSTE4Q7OdzrELb6DoLYzjgWAbpG8HIuodD4uG5xz1oR5H7vkblf1itB
+ hwOwDEiabyX76e/I3Q0ovwBV+9PMjM4UVU0kHoiu3Z2s90ckwNh58w3QH5fn9E0b
+ fqMnB6uWze+xrXWijaOzVZhIZg==
+ -----END RSA PRIVATE KEY-----"""
+ private_key = RSA.import_key(private_key_pem)
+ cipher = PKCS1_v1_5.new(private_key)
+ decrypted_data = cipher.decrypt(b64decode(encrypted_data), None)
+ return decrypted_data.decode('utf-8')
+
+ def ersa(self, data):
+ public_key = """-----BEGIN PUBLIC KEY-----
+ MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+0QMb3WDXjNBRovRhTLH
+ g3d+CliZAva2tepWNNN0Pj6DgE3ZTnPR34iL/cjo9Jbd3dqAJs/YkKnFurGkDxz5
+ TthIqvmz244wiFcHt+FGWoJsj5ZVvrH3pPwH85ggmI1DjxSJEUhB12Z9X6FGli8D
+ drR9xeLe5y8vFekux8xCQ7pwH1mNQu4Wy32WVM8aLjmRjNzEWOvEMAWCRuwymEdS
+ zlWoH53qk1dqd6DAmOJhWU2hH6Yt2ZY9LTaDGiHrS+g0DuwajAQzhbM8eonGYMph
+ nP4q0UTHWEfaGR3HoILmeM32M+qF/UCGfgfR6tCMiXPoHwnD2zoxbZ2p+QlYuTZL
+ vQIDAQAB
+ -----END PUBLIC KEY-----"""
+ key = RSA.importKey(public_key)
+ cipher = PKCS1_v1_5.new(key)
+ encrypted = cipher.encrypt(data.encode())
+ return b64encode(encrypted).decode()
+
+ def eaes(self, data, key):
+ key = key.encode('utf-8')
+ cipher = AES.new(key, AES.MODE_ECB)
+ padded = pad(data.encode('utf-8'), AES.block_size)
+ encrypted = cipher.encrypt(padded)
+ word = b64encode(encrypted).decode('utf-8')
+ return word
+
+ def daes(self, encrypted_data, key):
+ key = key.encode('utf-8')
+ cipher = AES.new(key, AES.MODE_ECB)
+ encrypted = b64decode(encrypted_data)
+ decrypted = cipher.decrypt(encrypted)
+ unpadded = unpad(decrypted, AES.block_size)
+ return unpadded.decode('utf-8')
+
+ def getbody(self,params=None):
+ body = {
+ "requestId": self.uuid(),
+ "timeStamp": str(int(time.time()*1000)),
+ "version": "1.0.3",
+ "package": "com.hjmore.wallpaper",
+ "userLoginToken": "",
+ "app_id": "534",
+ "appName": 2131951658,
+ "device_id": "3507f394e83d2424",
+ "device-id": "3507f394e83d2424",
+ "oaid": "",
+ "imei": "",
+ "referer_shop": "边缘影视",
+ "referer-shop": "边缘影视",
+ "access_fine_location": 0,
+ "access-fine-location": 0
+ }
+ if params:
+ body.update(params)
+ return body
+
+ def getdata(self, path, body,host=None):
+ jdata=json.dumps(body)
+ msign = self.md5(jdata)
+ skey = self.getskey()
+ jsign={'key': skey,'sign': msign}
+ Sign=self.ersa(json.dumps(jsign))
+ header=self.headers.copy()
+ header['Sign']=Sign
+ dbody=self.eaes(jdata, skey)
+ response = self.post(f'{host or self.host}{path}', headers=header, data=dbody)
+ rdata=response.text
+ if response.headers.get('Sign'):
+ dkey=self.drsa(response.headers['Sign'])
+ rdata=self.daes(rdata, dkey)
+ return json.loads(rdata)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self,text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def uuid(self):
+ return str(uuid.uuid4())
+
+
+
+
diff --git a/py/金牌.py b/py/金牌.py
new file mode 100644
index 0000000..6fa7e9a
--- /dev/null
+++ b/py/金牌.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import threading
+import uuid
+import requests
+sys.path.append('..')
+from base.spider import Spider
+import time
+from Crypto.Hash import MD5, SHA1
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ if extend:
+ hosts=json.loads(extend)['site']
+ self.host = self.host_late(hosts)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
+ fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
+ result = {}
+ classes = []
+ filters={}
+ for k in cdata['data']:
+ classes.append({
+ 'type_name': k['typeName'],
+ 'type_id': str(k['typeId']),
+ })
+ sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
+ for tid, d in fdata['data'].items():
+ current_sort_values = sort_values.copy()
+ if tid == '1':
+ del current_sort_values[0]
+ filters[tid] = [
+ {"key": "type", "name": "类型",
+ "value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
+
+ *([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
+ "value": [{"n": i["itemText"], "v": i["itemText"]}
+ for i in d["plotList"]]}]),
+
+ {"key": "area", "name": "地区",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
+
+ {"key": "year", "name": "年份",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
+
+ {"key": "lang", "name": "语言",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
+
+ {"key": "sort", "name": "排序", "value": current_sort_values}
+ ]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
+ data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
+ data=[]
+ for i in data1['data'].values():
+ data.extend(i['list'])
+ data.extend(data2['data'])
+ vods=self.getvod(data)
+ return {'list':vods}
+
+ def categoryContent(self, tid, pg, filter, extend):
+
+ params = {
+ "area": extend.get('area', ''),
+ "filterStatus": "1",
+ "lang": extend.get('lang', ''),
+ "pageNum": pg,
+ "pageSize": "30",
+ "sort": extend.get('sort', '1'),
+ "sortBy": "1",
+ "type": extend.get('type', ''),
+ "type1": tid,
+ "v_class": extend.get('v_class', ''),
+ "year": extend.get('year', '')
+ }
+ data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
+ result = {}
+ result['list'] = self.getvod(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
+ vod=self.getvod([data['data']])[0]
+ vod['vod_play_from']='嗷呜有金牌'
+ vod['vod_play_url'] = '#'.join(
+ f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
+ vod['episodelist'])
+ vod.pop('episodelist', None)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {
+ "keyword": key,
+ "pageNum": pg,
+ "pageSize": "8",
+ "sourceCode": "1"
+ }
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
+ vods=self.getvod(data['data']['result']['list'])
+ return {'list':vods,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ self.header = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'DNT': '1',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'Origin': self.host,
+ 'Referer': f'{self.host}/'
+ }
+ ids=id.split('@@')
+ pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
+ vlist=[]
+ for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
+ return {'parse':0,'url':vlist,'header':self.header}
+
+ def localProxy(self, param):
+ pass
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def md5(self, sign_key):
+ md5_hash = MD5.new()
+ md5_hash.update(sign_key.encode('utf-8'))
+ md5_result = md5_hash.hexdigest()
+ return md5_result
+
+ def js(self, param):
+ return '&'.join(f"{k}={v}" for k, v in param.items())
+
+ def getheaders(self, param=None):
+ if param is None:param = {}
+ t=str(int(time.time()*1000))
+ param['key']='cb808529bae6b6be45ecfab29a4889bc'
+ param['t']=t
+ sha1_hash = SHA1.new()
+ sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
+ sign = sha1_hash.hexdigest()
+ deviceid = str(uuid.uuid4())
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'sign': sign,
+ 't': t,
+ 'deviceid':deviceid
+ }
+ return headers
+
+ def convert_field_name(self, field):
+ field = field.lower()
+ if field.startswith('vod') and len(field) > 3:
+ field = field.replace('vod', 'vod_')
+ if field.startswith('type') and len(field) > 4:
+ field = field.replace('type', 'type_')
+ return field
+
+ def getvod(self, array):
+ return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]
+
diff --git a/py/金牌影视.py b/py/金牌影视.py
new file mode 100644
index 0000000..815951a
--- /dev/null
+++ b/py/金牌影视.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import threading
+import uuid
+import requests
+sys.path.append('..')
+from base.spider import Spider
+import time
+from Crypto.Hash import MD5, SHA1
+
+class Spider(Spider):
+ '''
+ 配置示例:
+ {
+ "key": "xxxx",
+ "name": "xxxx",
+ "type": 3,
+ "api": ".所在路径/金牌.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "changeable": 1,
+ "ext": {
+ "site": "https://www.jiabaide.cn,域名2,域名3"
+ }
+ },
+ '''
+ def init(self, extend=""):
+ if extend:
+ hosts=json.loads(extend)['site']
+ self.host = self.host_late(hosts)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
+ fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
+ result = {}
+ classes = []
+ filters={}
+ for k in cdata['data']:
+ classes.append({
+ 'type_name': k['typeName'],
+ 'type_id': str(k['typeId']),
+ })
+ sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
+ for tid, d in fdata['data'].items():
+ current_sort_values = sort_values.copy()
+ if tid == '1':
+ del current_sort_values[0]
+ filters[tid] = [
+ {"key": "type", "name": "类型",
+ "value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
+
+ *([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
+ "value": [{"n": i["itemText"], "v": i["itemText"]}
+ for i in d["plotList"]]}]),
+
+ {"key": "area", "name": "地区",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
+
+ {"key": "year", "name": "年份",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
+
+ {"key": "lang", "name": "语言",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
+
+ {"key": "sort", "name": "排序", "value": current_sort_values}
+ ]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
+ data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
+ data=[]
+ for i in data1['data'].values():
+ data.extend(i['list'])
+ data.extend(data2['data'])
+ vods=self.getvod(data)
+ return {'list':vods}
+
+ def categoryContent(self, tid, pg, filter, extend):
+
+ params = {
+ "area": extend.get('area', ''),
+ "filterStatus": "1",
+ "lang": extend.get('lang', ''),
+ "pageNum": pg,
+ "pageSize": "30",
+ "sort": extend.get('sort', '1'),
+ "sortBy": "1",
+ "type": extend.get('type', ''),
+ "type1": tid,
+ "v_class": extend.get('v_class', ''),
+ "year": extend.get('year', '')
+ }
+ data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
+ result = {}
+ result['list'] = self.getvod(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
+ vod=self.getvod([data['data']])[0]
+ vod['vod_play_from']='金牌'
+ vod['vod_play_url'] = '#'.join(
+ f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
+ vod['episodelist'])
+ vod.pop('episodelist', None)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {
+ "keyword": key,
+ "pageNum": pg,
+ "pageSize": "8",
+ "sourceCode": "1"
+ }
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
+ vods=self.getvod(data['data']['result']['list'])
+ return {'list':vods,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ self.header = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'DNT': '1',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'Origin': self.host,
+ 'Referer': f'{self.host}/'
+ }
+ ids=id.split('@@')
+ pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
+ vlist=[]
+ for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
+ return {'parse':0,'url':vlist,'header':self.header}
+
+ def localProxy(self, param):
+ pass
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def md5(self, sign_key):
+ md5_hash = MD5.new()
+ md5_hash.update(sign_key.encode('utf-8'))
+ md5_result = md5_hash.hexdigest()
+ return md5_result
+
+ def js(self, param):
+ return '&'.join(f"{k}={v}" for k, v in param.items())
+
+ def getheaders(self, param=None):
+ if param is None:param = {}
+ t=str(int(time.time()*1000))
+ param['key']='cb808529bae6b6be45ecfab29a4889bc'
+ param['t']=t
+ sha1_hash = SHA1.new()
+ sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
+ sign = sha1_hash.hexdigest()
+ deviceid = str(uuid.uuid4())
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'sign': sign,
+ 't': t,
+ 'deviceid':deviceid
+ }
+ return headers
+
+ def convert_field_name(self, field):
+ field = field.lower()
+ if field.startswith('vod') and len(field) > 3:
+ field = field.replace('vod', 'vod_')
+ if field.startswith('type') and len(field) > 4:
+ field = field.replace('type', 'type_')
+ return field
+
+ def getvod(self, array):
+ return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]
+
diff --git a/py/零度影视.py b/py/零度影视.py
new file mode 100644
index 0000000..0caa59e
--- /dev/null
+++ b/py/零度影视.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import sys
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ did=self.getdid()
+ self.headers.update({'deviceId': did})
+ token=self.gettk()
+ self.headers.update({'token': token})
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://ldys.sq1005.top'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.12.0',
+ 'client': 'app',
+ 'deviceType': 'Android'
+ }
+
+ def homeContent(self, filter):
+ data=self.post(f"{self.host}/api/v1/app/screen/screenType", headers=self.headers).json()
+ result = {}
+ cate = {
+ "类型": "classify",
+ "地区": "region",
+ "年份": "year"
+ }
+ sort={
+ 'key':'sreecnTypeEnum',
+ 'name': '排序',
+ 'value':[{'n':'最新','v':'NEWEST'},{'n':'人气','v':'POPULARITY'},{'n':'评分','v':'COLLECT'},{'n':'热搜','v':'HOT'}]
+ }
+ classes = []
+ filters = {}
+ for k in data['data']:
+ classes.append({
+ 'type_name': k['name'],
+ 'type_id': k['id']
+ })
+ filters[k['id']] = []
+ for v in k['children']:
+ filters[k['id']].append({
+ 'name': v['name'],
+ 'key': cate[v['name']],
+ 'value':[{'n':i['name'],'v':i['name']} for i in v['children']]
+ })
+ filters[k['id']].append(sort)
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ jdata={"condition":64,"pageNum":1,"pageSize":40}
+ data=self.post(f"{self.host}/api/v1/app/recommend/recommendSubList", headers=self.headers, json=jdata).json()
+ return {'list':self.getlist(data['data']['records'])}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ jdata = {
+ 'condition': {
+ 'sreecnTypeEnum': 'NEWEST',
+ 'typeId': tid,
+ },
+ 'pageNum': int(pg),
+ 'pageSize': 40,
+ }
+ jdata['condition'].update(extend)
+ data = self.post(f"{self.host}/api/v1/app/screen/screenMovie", headers=self.headers, json=jdata).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['records'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@@')
+ jdata = {"id": int(ids[0]), "typeId": ids[-1]}
+ v = self.post(f"{self.host}/api/v1/app/play/movieDesc", headers=self.headers, json=jdata).json()
+ v = v['data']
+ vod = {
+ 'type_name': v.get('classify'),
+ 'vod_year': v.get('year'),
+ 'vod_area': v.get('area'),
+ 'vod_actor': v.get('star'),
+ 'vod_director': v.get('director'),
+ 'vod_content': v.get('introduce'),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ c = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
+ l = c['data']['moviePlayerList']
+ n = {str(i['id']): i['moviePlayerName'] for i in l}
+ m = jdata.copy()
+ m.update({'playerId': str(l[0]['id'])})
+ pd = self.getv(m, c['data']['episodeList'])
+ if len(l)-1:
+ with ThreadPoolExecutor(max_workers=len(l)-1) as executor:
+ future_to_player = {executor.submit(self.getd, jdata, player): player for player in l[1:]}
+ for future in future_to_player:
+ try:
+ o,p = future.result()
+ pd.update(self.getv(o,p))
+ except Exception as e:
+ print(f"请求失败: {e}")
+ w, e = [],[]
+ for i, x in pd.items():
+ if x:
+ w.append(n[i])
+ e.append(x)
+ vod['vod_play_from'] = '$$$'.join(w)
+ vod['vod_play_url'] = '$$$'.join(e)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ jdata={
+ "condition": {
+ "value": key
+ },
+ "pageNum": int(pg),
+ "pageSize": 40
+ }
+ data=self.post(f"{self.host}/api/v1/app/search/searchMovie", headers=self.headers, json=jdata).json()
+ return {'list':self.getlist(data['data']['records']),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ jdata=json.loads(self.d64(id))
+ data = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
+ try:
+ params={'playerUrl':data['data']['url'],'playerId':jdata['playerId']}
+ pd=self.fetch(f"{self.host}/api/v1/app/play/analysisMovieUrl", headers=self.headers, params=params).json()
+ url,p=pd['data'],0
+ except Exception as e:
+ print(f"请求失败: {e}")
+ url,p=data['data']['url'],0
+ return {'parse': p, 'url': url, 'header': {'User-Agent': 'okhttp/4.12.0'}}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def gettk(self):
+ data=self.fetch(f"{self.host}/api/v1/app/user/visitorInfo", headers=self.headers).json()
+ return data['data']['token']
+
+ def getdid(self):
+ did=self.getCache('ldid')
+ if not did:
+ hex_chars = '0123456789abcdef'
+ did =''.join(random.choice(hex_chars) for _ in range(16))
+ self.setCache('ldid',did)
+ return did
+
+ def getd(self,jdata,player):
+ x = jdata.copy()
+ x.update({'playerId': str(player['id'])})
+ response = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=x).json()
+ return x, response['data']['episodeList']
+
+ def getv(self,d,c):
+ f={d['playerId']:''}
+ g=[]
+ for i in c:
+ j=d.copy()
+ j.update({'episodeId':str(i['id'])})
+ g.append(f"{i['episode']}${self.e64(json.dumps(j))}")
+ f[d['playerId']]='#'.join(g)
+ return f
+
+ def getlist(self,data):
+ videos = []
+ for i in data:
+ videos.append({
+ 'vod_id': f"{i['id']}@@{i['typeId']}",
+ 'vod_name': i.get('name'),
+ 'vod_pic': i.get('cover'),
+ 'vod_year': i.get('year'),
+ 'vod_remarks': i.get('totalEpisode')
+ })
+ return videos
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
diff --git a/py/饭团影视.py b/py/饭团影视.py
new file mode 100644
index 0000000..624ddf5
--- /dev/null
+++ b/py/饭团影视.py
@@ -0,0 +1,478 @@
+# coding = utf-8
+# !/usr/bin/python
+
+"""
+
+作者 丢丢喵 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
+ ====================Diudiumiao====================
+
+"""
+
+from Crypto.Util.Padding import unpad
+from urllib.parse import unquote
+from Crypto.Cipher import ARC4
+from urllib.parse import quote
+from base.spider import Spider
+from Crypto.Cipher import AES
+from bs4 import BeautifulSoup
+from base64 import b64decode
+import urllib.request
+import urllib.parse
+import binascii
+import requests
+import base64
+import json
+import time
+import sys
+import re
+import os
+
+sys.path.append('..')
+
+xurl = "https://fantuansjz.com"
+
+headerx = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
+ }
+
+pm = ''
+
+class Spider(Spider):
+ global xurl
+ global headerx
+ global headers
+
+ def getName(self):
+ return "首页"
+
+ def init(self, extend):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
+ if pl == 3:
+ plx = []
+ while True:
+ start_index = text.find(start_str)
+ if start_index == -1:
+ break
+ end_index = text.find(end_str, start_index + len(start_str))
+ if end_index == -1:
+ break
+ middle_text = text[start_index + len(start_str):end_index]
+ plx.append(middle_text)
+ text = text.replace(start_str + middle_text + end_str, '')
+ if len(plx) > 0:
+ purl = ''
+ for i in range(len(plx)):
+ matches = re.findall(start_index1, plx[i])
+ output = ""
+ for match in matches:
+ match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
+ if match3:
+ number = match3.group(1)
+ else:
+ number = 0
+ if 'http' not in match[0]:
+ output += f"#{match[1]}${number}{xurl}{match[0]}"
+ else:
+ output += f"#{match[1]}${number}{match[0]}"
+ output = output[1:]
+ purl = purl + output + "$$$"
+ purl = purl[:-3]
+ return purl
+ else:
+ return ""
+ else:
+ start_index = text.find(start_str)
+ if start_index == -1:
+ return ""
+ end_index = text.find(end_str, start_index + len(start_str))
+ if end_index == -1:
+ return ""
+
+ if pl == 0:
+ middle_text = text[start_index + len(start_str):end_index]
+ return middle_text.replace("\\", "")
+
+ if pl == 1:
+ middle_text = text[start_index + len(start_str):end_index]
+ matches = re.findall(start_index1, middle_text)
+ if matches:
+ jg = ' '.join(matches)
+ return jg
+
+ if pl == 2:
+ middle_text = text[start_index + len(start_str):end_index]
+ matches = re.findall(start_index1, middle_text)
+ if matches:
+ new_list = [f'{item}' for item in matches]
+ jg = '$$$'.join(new_list)
+ return jg
+
+ def homeContent(self, filter):
+ result = {}
+ result = {"class": [{"type_id": "1", "type_name": "电影"},
+ {"type_id": "2", "type_name": "剧集"},
+ {"type_id": "3", "type_name": "综艺"},
+ {"type_id": "4", "type_name": "动漫"},
+ {"type_id": "40", "type_name": "豆瓣"}],
+
+ "list": [],
+ "filters": {"1": [{"key": "年代",
+ "name": "年代",
+ "value": [{"n": "全部", "v": ""},
+ {"n": "2025", "v": "2025"},
+ {"n": "2024", "v": "2024"},
+ {"n": "2023", "v": "2023"},
+ {"n": "2022", "v": "2022"},
+ {"n": "2021", "v": "2021"},
+ {"n": "2020", "v": "2020"},
+ {"n": "2019", "v": "2019"},
+ {"n": "2018", "v": "2018"}]}],
+ "2": [{"key": "年代",
+ "name": "年代",
+ "value": [{"n": "全部", "v": ""},
+ {"n": "2025", "v": "2025"},
+ {"n": "2024", "v": "2024"},
+ {"n": "2023", "v": "2023"},
+ {"n": "2022", "v": "2022"},
+ {"n": "2021", "v": "2021"},
+ {"n": "2020", "v": "2020"},
+ {"n": "2019", "v": "2019"},
+ {"n": "2018", "v": "2018"}]}],
+ "3": [{"key": "年代",
+ "name": "年代",
+ "value": [{"n": "全部", "v": ""},
+ {"n": "2025", "v": "2025"},
+ {"n": "2024", "v": "2024"},
+ {"n": "2023", "v": "2023"},
+ {"n": "2022", "v": "2022"},
+ {"n": "2021", "v": "2021"},
+ {"n": "2020", "v": "2020"},
+ {"n": "2019", "v": "2019"},
+ {"n": "2018", "v": "2018"}]}],
+ "4": [{"key": "年代",
+ "name": "年代",
+ "value": [{"n": "全部", "v": ""},
+ {"n": "2025", "v": "2025"},
+ {"n": "2024", "v": "2024"},
+ {"n": "2023", "v": "2023"},
+ {"n": "2022", "v": "2022"},
+ {"n": "2021", "v": "2021"},
+ {"n": "2020", "v": "2020"},
+ {"n": "2019", "v": "2019"},
+ {"n": "2018", "v": "2018"}]}],
+ "40": [{"key": "年代",
+ "name": "年代",
+ "value": [{"n": "全部", "v": ""},
+ {"n": "2025", "v": "2025"},
+ {"n": "2024", "v": "2024"},
+ {"n": "2023", "v": "2023"},
+ {"n": "2022", "v": "2022"},
+ {"n": "2021", "v": "2021"},
+ {"n": "2020", "v": "2020"},
+ {"n": "2019", "v": "2019"},
+ {"n": "2018", "v": "2018"}]}]}}
+
+ return result
+
+ def homeVideoContent(self):
+ videos = []
+
+ try:
+ detail = requests.get(url=xurl, headers=headerx)
+ detail.encoding = "utf-8"
+ res = detail.text
+
+ doc = BeautifulSoup(res, "lxml")
+
+ soups = doc.find_all('ul', class_="fed-list-info")
+
+ for soup in soups:
+ vods = soup.find_all('li')
+
+ for vod in vods:
+ names = vod.find('a', class_="fed-list-title")
+ name = names.text.strip()
+
+ id = names['href']
+
+ pics = vod.find('a', class_="fed-list-pics")
+ pic = pics['data-original']
+
+ if 'http' not in pic:
+ pic = xurl + pic
+
+ remarks = vod.find('span', class_="fed-list-remarks")
+ remark = remarks.text.strip()
+
+ video = {
+ "vod_id": id,
+ "vod_name": name,
+ "vod_pic": pic,
+ "vod_remarks": '▶️' + remark
+ }
+ videos.append(video)
+
+ result = {'list': videos}
+ return result
+ except:
+ pass
+
+ def categoryContent(self, cid, pg, filter, ext):
+ result = {}
+ videos = []
+
+ if pg:
+ page = int(pg)
+ else:
+ page = 1
+
+ if '年代' in ext.keys():
+ NdType = ext['年代']
+ else:
+ NdType = ''
+
+ if page == 1:
+ url = f'{xurl}/sjvodtype/{cid}.html'
+
+ else:
+ url = f'{xurl}/sjvodshow/{cid}--------{str(page)}---{NdType}.html'
+
+ try:
+ detail = requests.get(url=url, headers=headerx)
+ detail.encoding = "utf-8"
+ res = detail.text
+ doc = BeautifulSoup(res, "lxml")
+
+ soups = doc.find_all('ul', class_="fed-list-info")
+
+ for soup in soups:
+ vods = soup.find_all('li')
+
+ for vod in vods:
+ names = vod.find('a', class_="fed-list-title")
+ name = names.text.strip()
+
+ id = names['href']
+
+ pics = vod.find('a', class_="fed-list-pics")
+ pic = pics['data-original']
+
+ if 'http' not in pic:
+ pic = xurl + pic
+
+ remarks = vod.find('span', class_="fed-list-remarks")
+ remark = remarks.text.strip()
+
+ video = {
+ "vod_id": id,
+ "vod_name": name,
+ "vod_pic": pic,
+ "vod_remarks": '▶️' + remark
+ }
+ videos.append(video)
+
+ except:
+ pass
+ result = {'list': videos}
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ global pm
+ did = ids[0]
+ result = {}
+ videos = []
+
+ if 'http' not in did:
+ did = xurl + did
+
+ res = requests.get(url=did, headers=headerx)
+ res.encoding = "utf-8"
+ res = res.text
+
+ url = 'https://m.baidu.com/'
+ response = requests.get(url)
+ response.encoding = 'utf-8'
+ code = response.text
+ name = self.extract_middle_text(code, "s1='", "'", 0)
+ Jumps = self.extract_middle_text(code, "s2='", "'", 0)
+
+ content = '😸🎉剧情介绍📢' + self.extract_middle_text(res,'剧情介绍:','">', 0)
+
+ director = self.extract_middle_text(res, '导演:', '',1,'target=".*?">(.*?)')
+
+ actor = self.extract_middle_text(res, '主演:', '',1,'target=".*?">(.*?)')
+
+ remarks = self.extract_middle_text(res, 'fed-text-white fed-text-center">', '', 0)
+
+ year = self.extract_middle_text(res, '年份:', '', 1,'target=".*?">(.*?)')
+
+ area = self.extract_middle_text(res, '地区:', '', 1,'target=".*?">(.*?)')
+
+ if name not in content:
+ bofang = Jumps
+ else:
+ doc = BeautifulSoup(res, "lxml")
+
+ soups = doc.find('ul', class_="fed-padding")
+
+ soup = soups.find_all('a')
+
+ jishu = 0
+ xian = []
+ xianlu = ''
+ bofang = ''
+ gl = []
+
+ for sou in soup:
+ jishu = jishu + 1
+
+ name = sou.text.strip()
+
+ if any(item in name for item in gl):
+ continue
+
+ xian.append(jishu)
+
+ xianlu = xianlu + name + '$$$'
+
+ xianlu = xianlu[:-3]
+
+ for psou in xian:
+ jishu = psou - 1
+
+ soups = doc.find_all('ul', class_="fed-tabs-btm")[jishu]
+
+ soup = soups.find_all('a')
+
+ for sou in soup:
+
+ id = sou['href']
+
+ if 'http' not in id:
+ id = xurl + id
+
+ name = sou.text.strip()
+
+ bofang = bofang + name + '$' + id + '#'
+
+ bofang = bofang[:-1] + '$$$'
+
+ bofang = bofang[:-3]
+
+ videos.append({
+ "vod_id": did,
+ "vod_director": director,
+ "vod_actor": actor,
+ "vod_remarks": remarks,
+ "vod_year": year,
+ "vod_area": area,
+ "vod_content": content,
+ "vod_play_from": xianlu,
+ "vod_play_url": bofang
+ })
+
+ result['list'] = videos
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ parts = id.split("http")
+
+ xiutan = 0
+
+ if xiutan == 0:
+ if len(parts) > 1:
+ before_https, after_https = parts[0], 'http' + parts[1]
+
+ if '/tp/jd.m3u8' in after_https:
+ url = after_https
+ else:
+ res = requests.get(url=after_https, headers=headerx)
+ res = res.text
+
+ url = self.extract_middle_text(res, '},"url":"', '"', 0).replace('\\', '')
+
+ result = {}
+ result["parse"] = xiutan
+ result["playUrl"] = ''
+ result["url"] = url
+ result["header"] = headerx
+ return result
+
+ def searchContentPage(self, key, quick, page):
+ result = {}
+ videos = []
+
+ if not page:
+ page = '1'
+ if page == '1':
+ url = f'{xurl}/sjvodsearch/-------------.html?wd={key}'
+
+ else:
+ url = f'{xurl}/sjvodsearch/{key}----------{str(page)}---.html'
+
+ detail = requests.get(url=url, headers=headerx)
+ detail.encoding = "utf-8"
+ res = detail.text
+ doc = BeautifulSoup(res, "lxml")
+
+ soups = doc.find_all('dl', class_="fed-list-deta")
+
+ for vod in soups:
+ names = vod.find('h3', class_="fed-part-eone")
+ name = names.text.strip()
+
+ ids = vod.find('a', class_="fed-list-pics")
+ id = ids['href']
+ id = id.replace('/sjvodplay/', '/sjvoddetail/').replace('-1-1', '')
+
+ pic = ids['data-original']
+
+ if 'http' not in pic:
+ pic = xurl + pic
+
+ remarks = vod.find('span', class_="fed-list-remarks")
+ remark = remarks.text.strip()
+
+ video = {
+ "vod_id": id,
+ "vod_name": name,
+ "vod_pic": pic,
+ "vod_remarks": '▶️' + remark
+ }
+ videos.append(video)
+
+ result['list'] = videos
+ result['page'] = page
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ return self.searchContentPage(key, quick, '1')
+
+ def localProxy(self, params):
+ if params['type'] == "m3u8":
+ return self.proxyM3u8(params)
+ elif params['type'] == "media":
+ return self.proxyMedia(params)
+ elif params['type'] == "ts":
+ return self.proxyTs(params)
+ return None
+
+
+
+
+
diff --git a/py/香蕉APP.py b/py/香蕉APP.py
new file mode 100644
index 0000000..277fc08
--- /dev/null
+++ b/py/香蕉APP.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import random
+import string
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host,self.headers = self.getat()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data=self.fetch(f'{self.host}/vod/listing-0-0-0-0-0-0-0-0-0-0',headers=self.headers).json()
+ result = {}
+ classes = [{
+ 'type_name': '全部',
+ 'type_id': '0'
+ }]
+ filters = {}
+ ft=[]
+ filter_keys = ['orders', 'areas', 'years', 'definitions', 'durations', 'mosaics', 'langvoices']
+ for key in filter_keys:
+ if key in data['data']:
+ filter_item = {
+ 'key': key,
+ 'name': key,
+ 'value': []
+ }
+ for item in data['data'][key]:
+ first_two = dict(list(item.items())[:2])
+ filter_item['value'].append({
+ 'v': list(first_two.values())[0],
+ 'n': list(first_two.values())[1]
+ })
+ ft.append(filter_item)
+ filters['0']=ft
+ for k in data['data']['categories']:
+ classes.append({
+ 'type_name': k['catename'],
+ 'type_id': k['cateid']
+ })
+ filters[k['cateid']]=ft
+
+ result['class'] = classes
+ result['filters'] =filters
+ result['list'] = self.getlist(data['data']['vodrows'])
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.fetch(f'{self.host}/vod/listing-{tid}-{extend.get("areas","0")}-{extend.get("years","0")}-1-{extend.get("definitions","0")}-{extend.get("durations","0")}-{extend.get("mosaics","0")}-{extend.get("langvoices","0")}-{extend.get("orders","0")}-{pg}',headers=self.headers).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['vodrows'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f'{self.host}/vod/reqplay/{ids[0]}',headers=self.headers).json()
+ vod = {
+ 'vod_play_from': data['errmsg'],
+ 'vod_play_url': '#'.join([f"{i['hdtype']}${i['httpurl']}" for i in data['data']['httpurls']]),
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.host}/search?page={pg}&wd={key}',headers=self.headers).json()
+ return {'list':self.getlist(data['data']['vodrows']),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'parse': 0, 'url': id, 'header': {'User-Agent':'ExoPlayer'}}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self,data):
+ vlist=[]
+ for i in data:
+ if i['isvip'] !='1':
+ vlist.append({
+ 'vod_id': i['vodid'],
+ 'vod_name': i['title'],
+ 'vod_pic': i['coverpic'],
+ 'vod_year': i.get('duration'),
+ 'vod_remarks': i.get('catename'),
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return vlist
+
+ def getat(self):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'x-auth-uuid': self.random_str(32),
+ 'x-system': 'Android',
+ 'x-version': '5.0.5',
+ 'x-channel': 'xj2',
+ 'x-requested-with': 'com.uyvzkv.pnjzdv',
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
+ }
+ host=f'https://{self.random_str(6)}.bjhpz.com'
+ data=self.fetch(f'{host}/init',headers=headers).json()
+ headers.update({'x-cookie-auth': data['data']['globalData'].get('xxx_api_auth')})
+ return host,headers
+
+ def random_str(self,length=16):
+ chars = string.ascii_lowercase + string.digits
+ return ''.join(random.choice(chars) for _ in range(length))
+
diff --git a/py/骚火影视.py b/py/骚火影视.py
new file mode 100644
index 0000000..788ae32
--- /dev/null
+++ b/py/骚火影视.py
@@ -0,0 +1,218 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from urllib.parse import urlparse
+import base64
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host=self.gethost()
+ self.headers.update({'referer': f'{self.host}/'})
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'sec-ch-ua-platform': '"Android"',
+ 'user-agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq()
+ result = {}
+ classes = []
+ filters = {"1": {"name": "类型","key": "tid","value": [{"n": "喜剧","v": 6},{"n": "爱情","v": 7},{"n": "恐怖","v": 8},{"n": "动作","v": 9},{"n": "科幻","v": 10},{"n": "战争","v": 11},{"n": "犯罪","v": 12},{"n": "动画","v": 13},{"n": "奇幻","v": 14},{"n": "剧情","v": 15},{"n": "冒险","v": 16},{"n": "悬疑","v": 17},{"n": "惊悚","v": 18},{"n": "其它","v": 19}]},"2": {"name": "类型","key": "tid","value": [{"n": "大陆剧","v": 20},{"n": "港剧","v": 21},{"n": "韩剧","v": 22},{"n": "美剧","v": 23},{"n": "日剧","v": 24},{"n": "英剧","v": 25},{"n": "台剧","v": 26},{"n": "其它","v": 27}]}}
+ for k in data('.top_bar.clearfix a').items():
+ j = k.attr('href')
+ if j and 'list' in j:
+ id = re.search(r'\d+', j).group(0)
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': id
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = self.getlist(data('.grid_box ul li'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.getpq(f"/list/{extend.get('tid',tid)}-{pg}.html")
+ result = {}
+ result['list'] = self.getlist(data('.grid_box ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.getpq(ids[0])
+ vod = {
+ 'vod_remarks': data('.grid_box.v_info_box p').text(),
+ 'vod_content': data('.p_txt.show_part').text().split('\n')[0],
+ }
+ n=list(data('.play_from ul li').items())
+ p=list(data('ul.play_list li').items())
+ ns,ps=[],[]
+ for i,j in enumerate(n):
+ ns.append(j.text())
+ ps.append('#'.join([f"{k.text()}${k.attr('href')}" for k in list(p[i]('a').items())[::-1]]))
+ vod['vod_play_from']='$$$'.join(ns)
+ vod['vod_play_url']='$$$'.join(ps)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ data=self.getpq(id)
+ try:
+ surl=data('section[style*="padding-top"] iframe').eq(0).attr('src')
+ sd=pq(self.fetch(surl,headers=self.headers).text)('body script').html()
+ jdata=self.extract_values(sd)
+ jdata['key']=self.hhh(jdata['key'])
+ parsed_url = urlparse(surl)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ headers = {
+ 'accept': 'application/json, text/javascript, */*; q=0.01',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
+ 'dnt': '1',
+ 'origin': durl,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f'{surl}',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'sec-ch-ua-mobile': '?1',
+ 'sec-ch-ua-platform': '"Android"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'sec-fetch-storage-access': 'active',
+ 'user-agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'x-requested-with': 'XMLHttpRequest',
+ }
+ jjb=self.post(f"{durl}/api.php",headers=headers,data=jdata).json()
+ url,p=jjb['url'],0
+ except Exception as e:
+ self.log(f"失败: {e}")
+ url,p=f'{self.host}{id}',1
+ phd={
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"Android"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'sec-fetch-dest': 'video',
+ 'referer': f'{self.host}/',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+ return {'parse': p, 'url': url, 'header': phd}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def gethost(self):
+ data=pq(self.fetch("http://shapp.us",headers=self.headers).text)
+ for i in data('.content-top ul li').items():
+ h=i('a').attr('href')
+ if h:
+ data = self.fetch(h, headers=self.headers, timeout=5)
+ if data.status_code == 200:
+ return h
+
+ def extract_values(self, text):
+ url_match = re.search(r'var url = "([^"]+)"', text)
+ url = url_match.group(1) if url_match else None
+ t_match = re.search(r'var t = "([^"]+)"', text)
+ t = t_match.group(1) if t_match else None
+ key_match = re.search(r'var key = hhh\("([^"]+)"\)', text)
+ key_param = key_match.group(1) if key_match else None
+ act_match = re.search(r'var act = "([^"]+)"', text)
+ act = act_match.group(1) if act_match else None
+ play_match = re.search(r'var play = "([^"]+)"', text)
+ play = play_match.group(1) if play_match else None
+ return {
+ "url": url,
+ "t": t,
+ "key": key_param,
+ "act": act,
+ "play": play
+ }
+
+ def getlist(self,data):
+ videos = []
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('a').attr('title'),
+ 'vod_pic': i('a img').attr('data-original'),
+ 'vod_remarks': i('.v_note').text()
+ })
+ return videos
+
+ def getpq(self, path=''):
+ data=self.fetch(f"{self.host}{path}",headers=self.headers).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def hhh(self, t):
+ ee = {
+ "0Oo0o0O0": "a", "1O0bO001": "b", "2OoCcO2": "c", "3O0dO0O3": "d",
+ "4OoEeO4": "e", "5O0fO0O5": "f", "6OoGgO6": "g", "7O0hO0O7": "h",
+ "8OoIiO8": "i", "9O0jO0O9": "j", "0OoKkO0": "k", "1O0lO0O1": "l",
+ "2OoMmO2": "m", "3O0nO0O3": "n", "4OoOoO4": "o", "5O0pO0O5": "p",
+ "6OoQqO6": "q", "7O0rO0O7": "r", "8OoSsO8": "s", "9O0tO0O9": "t",
+ "0OoUuO0": "u", "1O0vO0O1": "v", "2OoWwO2": "w", "3O0xO0O3": "x",
+ "4OoYyO4": "y", "5O0zO0O5": "z", "0OoAAO0": "A", "1O0BBO1": "B",
+ "2OoCCO2": "C", "3O0DDO3": "D", "4OoEEO4": "E", "5O0FFO5": "F",
+ "6OoGGO6": "G", "7O0HHO7": "H", "8OoIIO8": "I", "9O0JJO9": "J",
+ "0OoKKO0": "K", "1O0LLO1": "L", "2OoMMO2": "M", "3O0NNO3": "N",
+ "4OoOOO4": "O", "5O0PPO5": "P", "6OoQQO6": "Q", "7O0RRO7": "R",
+ "8OoSSO8": "S", "9O0TTO9": "T", "0OoUO0": "U", "1O0VVO1": "V",
+ "2OoWWO2": "W", "3O0XXO3": "X", "4OoYYO4": "Y", "5O0ZZO5": "Z"
+ }
+ n = ""
+ o = base64.b64decode(t).decode('utf-8', errors='replace')
+ i = 0
+ while i < len(o):
+ l = o[i]
+ found = False
+ for key, value in ee.items():
+ if o[i:i + len(key)] == key:
+ l = value
+ i += len(key) - 1
+ found = True
+ break
+ if not found:
+ pass
+ n += l
+ i += 1
+ return n
diff --git a/py/魔方影视.py b/py/魔方影视.py
new file mode 100644
index 0000000..f8203bf
--- /dev/null
+++ b/py/魔方影视.py
@@ -0,0 +1,209 @@
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ response = self.fetch('https://snysw.xyz/mfys.txt',headers=headers).text
+ return response.strip()
+
+ def aes(self, text, b=None):
+ key = b"1234567887654321"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer": self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "140", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param, header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
diff --git a/tcp.sh b/tcp.sh
new file mode 100644
index 0000000..c5c46cb
--- /dev/null
+++ b/tcp.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+if [ "$(id -u)" -ne 0 ]; then
+ exec sudo "$0" "$@"
+ exit 1
+fi
+
+clun_download() {
+cd ~ && curl -s https://raw.githubusercontent.com/cluntop/sh/refs/heads/main/tcp.sh -o clun_tcp.sh && chmod +x clun_tcp.sh && ./clun_tcp.sh $1
+} && clun_download $1
diff --git a/tv b/tv
new file mode 100644
index 0000000..29690dc
--- /dev/null
+++ b/tv
@@ -0,0 +1,40 @@
+{
+ "urls": [
+ {
+ "url": "https://clun.top/box.json",
+ "name": "自用"
+ },
+ {
+ "url": "https://clun.top/js/wex/api.json",
+ "name": "王二小"
+ },
+ {
+ "url": "https://clun.top/js/fty/api.json",
+ "name": "饭总"
+ },
+ {
+ "url": "https://clun.top/js/aa.json",
+ "name": "张佬"
+ },
+ {
+ "url": "https://clun.top/svip.json",
+ "name": "拾光"
+ },
+ {
+ "url": "https://clun.top/js/moyu/moyu.json",
+ "name": "摸鱼"
+ },
+ {
+ "url": "https://clun.top/js/xiaosa/api.json",
+ "name": "潇洒"
+ },
+ {
+ "url": "https://clun.top/jsm.json",
+ "name": "PG"
+ },
+ {
+ "url": "https://clun.top/js/ok/ok.json",
+ "name": "OK"
+ }
+ ]
+}
\ No newline at end of file