feat(adult): 添加911爆料网、UVod和花都资源站支持

新增三个成人内容站点的配置与解析脚本:
- 911爆料网(911.py):支持分类、搜索、详情及播放链接解析
- UVod(UVod.py):支持加密接口请求、视频分类与播放源解析
- 花都:在 adult.json 中添加对应资源配置项

同时更新 api.json,增加“余白”和“星空”两个 APP 数据源配置。
```
This commit is contained in:
Wang.Luo 2025-10-18 01:34:04 +08:00
parent 44ae06c5ee
commit 500d200e2e
5 changed files with 677 additions and 2 deletions

View File

@ -1260,7 +1260,7 @@
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
},
{
"key": "花都",
"name": "花都",
@ -1360,7 +1360,25 @@
"key": "javxx",
"name": "javxx",
"type": 3,
"api": "./py/adult/javxx.py",
"api": "./py/adult/javbb.py",
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "UVod",
"name": "UVod",
"type": 3,
"api": "./py/adult/UVod.py",
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "911爆料网",
"name": "911爆料网",
"type": 3,
"api": "./py/adult/911.py",
"searchable": 1,
"quickSearch": 1,
"filterable": 1

View File

@ -94,6 +94,23 @@
"name": "金牌影院(请断网再安装)"
}
},
{
"key": "余白",
"name": "余白丨APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "https://app.yb4k.top",
"site": "",
"dataKey": "pk5CemuwgQ4gh8dl",
"dataIv": "pk5CemuwgQ4gh8dl",
"deviceId": "",
"version": "",
"token": "71ef95295e56f3dfce2d008b1b07dc83f20f2bd8c5444202b6957587c9a5e57e",
"ua": ""
}
},
{
"key": "咖啡",
"name": "咖啡丨APP",
@ -250,6 +267,21 @@
"ua": ""
}
},
{
"key": "星空",
"name": "星空APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "http://xkcms.xkgzs.xyz",
"site": "",
"dataKey": "AJcdjkAjkdJDkvcd",
"dataIv": "AJcdjkAjkdJDkvcd",
"deviceId": "",
"version": ""
}
},
{
"key": "于浅",
"name": "于浅APP",

435
py/adult/911.py Normal file
View File

@ -0,0 +1,435 @@
# -*- coding: utf-8 -*-
import json
import random
import re
import sys
import threading
import time
import requests
from base64 import b64decode, b64encode
from urllib.parse import urlparse, urljoin
from Crypto.Cipher import AES
from Crypto.Util.Padding import unpad
from bs4 import BeautifulSoup
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend="{}"):
config = json.loads(extend)
self.domin = config.get('site', "https://911blw.com")
self.proxies = config.get('proxy', {}) or {}
self.plp = config.get('plp', '')
self.backup_urls = ["https://hlj.fun", "https://911bl16.com"]
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
'Accept-Language': 'zh-CN,zh;q=0.9'
}
# 获取最佳主机
self.host = self.host_late([self.domin] + self.backup_urls)
self.headers.update({'Origin': self.host, 'Referer': f"{self.host}/"})
# 缓存主机信息
self.getcnh()
def getName(self):
return "911爆料网"
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
def homeContent(self, filter):
result = {}
classes = []
# 分类列表根据911爆料网的实际分类
categories = [
{"type_id": "/category/jrgb/", "type_name": "最新爆料"},
{"type_id": "/category/rmgb/", "type_name": "精选大瓜"},
{"type_id": "/category/blqw/", "type_name": "猎奇吃瓜"},
{"type_id": "/category/rlph/", "type_name": "TOP5大瓜"},
{"type_id": "/category/ssdbl/", "type_name": "社会热点"},
{"type_id": "/category/hjsq/", "type_name": "海角社区"},
{"type_id": "/category/mrds/", "type_name": "每日大赛"},
{"type_id": "/category/xyss/", "type_name": "校园吃瓜"},
{"type_id": "/category/mxhl/", "type_name": "明星吃瓜"},
{"type_id": "/category/whbl/", "type_name": "网红爆料"},
{"type_id": "/category/bgzq/", "type_name": "反差爆料"},
{"type_id": "/category/fljq/", "type_name": "网黄福利"},
{"type_id": "/category/crfys/", "type_name": "午夜剧场"},
{"type_id": "/category/thjx/", "type_name": "探花经典"},
{"type_id": "/category/dmhv/", "type_name": "禁漫天堂"},
{"type_id": "/category/slec/", "type_name": "吃瓜精选"},
{"type_id": "/category/zksr/", "type_name": "重口调教"},
{"type_id": "/category/crlz/", "type_name": "精选连载"}
]
result['class'] = categories
# 首页推荐内容
html = self.fetch_page(f"{self.host}/")
if html:
soup = BeautifulSoup(html, 'html.parser')
articles = soup.select('article, .post-item, .article-item')
result['list'] = self.getlist(articles)
else:
result['list'] = []
return result
def homeVideoContent(self):
# 首页推荐视频
html = self.fetch_page(f"{self.host}/category/jrgb/1/")
videos = self.extract_content(html, f"{self.host}/category/jrgb/1/")
return {'list': videos}
def categoryContent(self, tid, pg, filter, extend):
if '@folder' in tid:
# 文件夹类型内容
id = tid.replace('@folder', '')
videos = self.getfod(id)
else:
# 普通分类内容
url = f"{self.host}{tid}{pg}/" if pg != "1" else f"{self.host}{tid}"
html = self.fetch_page(url)
if html:
soup = BeautifulSoup(html, 'html.parser')
articles = soup.select('article, .post-item, .article-item, ul.row li')
videos = self.getlist(articles, tid)
else:
videos = []
result = {}
result['list'] = videos
result['page'] = pg
result['pagecount'] = 1 if '@folder' in tid else 99999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
url = ids[0] if ids[0].startswith("http") else f"{self.host}{ids[0]}"
html = self.fetch_page(url)
if not html:
return {'list': []}
soup = BeautifulSoup(html, 'html.parser')
vod = {'vod_play_from': '911爆料网'}
try:
# 提取标签信息
clist = []
tags = soup.select('.tags .keywords a, .tagcloud a, a[rel="tag"]')
for tag in tags:
title = tag.get_text(strip=True)
href = tag.get('href', '')
if href and title:
clist.append('[a=cr:' + json.dumps({'id': href, 'name': title}) + '/]' + title + '[/a]')
vod['vod_content'] = '点击展开↓↓↓\n'+' '.join(clist) if clist else soup.select_one('.post-content, .entry-content').get_text(strip=True)[:200] + '...'
except:
title_elem = soup.select_one('h1, .post-title, .entry-title')
vod['vod_content'] = title_elem.get_text(strip=True) if title_elem else "无简介"
try:
# 提取播放列表类似51吸瓜的dplayer方式
plist = []
# 方式1检查dplayer
dplayers = soup.select('.dplayer, [data-config]')
for c, player in enumerate(dplayers, start=1):
config_str = player.get('data-config', '{}')
try:
config = json.loads(config_str)
if 'video' in config and 'url' in config['video']:
plist.append(f"视频{c}${config['video']['url']}")
except:
pass
# 方式2检查视频标签
if not plist:
video_tags = soup.select('video source, video[src]')
for c, video in enumerate(video_tags, start=1):
src = video.get('src') or ''
if src:
plist.append(f"视频{c}${src}")
# 方式3检查iframe
if not plist:
iframes = soup.select('iframe[src]')
for c, iframe in enumerate(iframes, start=1):
src = iframe.get('src', '')
if src and ('player' in src or 'video' in src):
plist.append(f"视频{c}${src}")
# 方式4从脚本中提取
if not plist:
scripts = soup.find_all('script')
for script in scripts:
if script.string:
# 查找m3u8、mp4等视频链接
video_matches = re.findall(r'(https?://[^\s"\']*\.(?:m3u8|mp4|flv|ts|mkv)[^\s"\']*)', script.string)
for c, match in enumerate(video_matches, start=1):
plist.append(f"视频{c}${match}")
vod['vod_play_url'] = '#'.join(plist) if plist else f"请检查页面,可能没有视频${url}"
except Exception as e:
print(f"详情页解析错误: {e}")
vod['vod_play_url'] = f"解析错误${url}"
return {'list': [vod]}
def searchContent(self, key, quick, pg="1"):
url = f"{self.host}/search/{key}/{pg}/"
html = self.fetch_page(url)
if html:
soup = BeautifulSoup(html, 'html.parser')
articles = soup.select('article, .post-item, .article-item, ul.row li')
videos = self.getlist(articles)
else:
videos = []
return {'list': videos, 'page': pg, 'pagecount': 9999, 'limit': 90, 'total': 999999}
def playerContent(self, flag, id, vipFlags):
# 判断是否为直接播放的视频格式
p = 0 if re.search(r'\.(m3u8|mp4|flv|ts|mkv|mov|avi|webm)', id) else 1
return {'parse': p, 'url': f"{self.plp}{id}", 'header': self.headers}
def localProxy(self, param):
try:
url = self.d64(param['url'])
match = re.search(r"loadBannerDirect\('([^']*)'", url)
if match:
url = match.group(1)
res = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=10)
# 检查是否需要AES解密根据文件类型判断
if url.endswith(('.jpg', '.jpeg', '.png', '.gif', '.webp')):
# 普通图片直接返回
return [200, res.headers.get('Content-Type'), res.content]
else:
# 加密内容进行AES解密
return [200, res.headers.get('Content-Type'), self.aesimg(res.content)]
except Exception as e:
print(f"图片代理错误: {str(e)}")
return [500, 'text/html', '']
def e64(self, text):
try:
text_bytes = text.encode('utf-8')
encoded_bytes = b64encode(text_bytes)
return encoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64编码错误: {str(e)}")
return ""
def d64(self, encoded_text):
try:
encoded_bytes = encoded_text.encode('utf-8')
decoded_bytes = b64decode(encoded_bytes)
return decoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64解码错误: {str(e)}")
return ""
def aesimg(self, word):
key = b'f5d965df75336270'
iv = b'97b60394abc2fbe1'
cipher = AES.new(key, AES.MODE_CBC, iv)
decrypted = unpad(cipher.decrypt(word), AES.block_size)
return decrypted
def fetch_page(self, url, use_backup=False):
original_url = url
if use_backup:
for backup in self.backup_urls:
test_url = url.replace(self.domin, backup)
try:
time.sleep(1)
res = requests.get(test_url, headers=self.headers, proxies=self.proxies, timeout=10)
res.raise_for_status()
res.encoding = "utf-8"
text = res.text
if len(text) > 1000:
print(f"[DEBUG] 使用备用 {backup}: {test_url}")
return text
except:
continue
try:
time.sleep(1)
res = requests.get(original_url, headers=self.headers, proxies=self.proxies, timeout=10)
res.raise_for_status()
res.encoding = "utf-8"
text = res.text
if len(text) < 1000:
print(f"[DEBUG] 内容过短,尝试备用域名")
return self.fetch_page(original_url, use_backup=True)
return text
except Exception as e:
print(f"[ERROR] 请求失败 {original_url}: {e}")
return None
def getcnh(self):
try:
html = self.fetch_page(f"{self.host}/about.html")
if html:
soup = BeautifulSoup(html, 'html.parser')
link = soup.select_one('a[href]')
if link:
url = link.get('href')
parsed_url = urlparse(url)
host = parsed_url.scheme + "://" + parsed_url.netloc
self.setCache('host_911blw', host)
except Exception as e:
print(f"获取主机信息错误: {str(e)}")
def host_late(self, url_list):
if not url_list:
return self.domin
results = {}
threads = []
def test_host(url):
try:
start_time = time.time()
response = requests.head(url, headers=self.headers, proxies=self.proxies, timeout=1.0, allow_redirects=False)
delay = (time.time() - start_time) * 1000
results[url] = delay
except Exception as e:
results[url] = float('inf')
for url in url_list:
t = threading.Thread(target=test_host, args=(url,))
threads.append(t)
t.start()
for t in threads:
t.join()
return min(results.items(), key=lambda x: x[1])[0]
def getfod(self, id):
url = f"{self.host}{id}"
html = self.fetch_page(url)
if not html:
return []
soup = BeautifulSoup(html, 'html.parser')
videos = []
# 查找文件夹内容
content = soup.select_one('.post-content, .entry-content')
if content:
# 移除不需要的元素
for elem in content.select('.txt-apps, .line, blockquote, .tags, .content-tabs'):
elem.decompose()
# 提取标题和链接
headings = content.select('h2, h3, h4')
paragraphs = content.select('p')
for i, heading in enumerate(headings):
title = heading.get_text(strip=True)
if i < len(paragraphs):
link = paragraphs[i].select_one('a')
if link:
videos.append({
'vod_id': link.get('href', ''),
'vod_name': link.get_text(strip=True),
'vod_pic': f"{self.getProxyUrl()}&url={self.e64(link.get('data-img', ''))}",
'vod_remarks': title
})
return videos
def getlist(self, articles, tid=''):
videos = []
is_folder = '/mrdg' in tid
for article in articles:
try:
# 标题
title_elem = article.select_one('h2, h3, .headline, .title, a[title]')
name = title_elem.get_text(strip=True) if title_elem else ""
# 链接
link_elem = article.select_one('a')
href = link_elem.get('href', '') if link_elem else ""
# 日期/备注
date_elem = article.select_one('time, .date, .published')
remarks = date_elem.get_text(strip=True) if date_elem else ""
# 图片(使用吸瓜的方式)
pic = None
script_elem = article.select_one('script')
if script_elem and script_elem.string:
base64_match = re.search(r'base64,[\'"]?([A-Za-z0-9+/=]+)[\'"]?', script_elem.string)
if base64_match:
encoded_url = base64_match.group(1)
pic = f"{self.getProxyUrl()}&url={self.e64(encoded_url)}"
if not pic:
img_elem = article.select_one('img[data-xkrkllgl]')
if img_elem and img_elem.get('data-xkrkllgl'):
encoded_url = img_elem.get('data-xkrkllgl')
pic = f"{self.getProxyUrl()}&url={self.e64(encoded_url)}"
if not pic:
img_elem = article.select_one('img')
if img_elem:
for attr in ["data-lazy-src", "data-original", "data-src", "src"]:
pic = img_elem.get(attr)
if pic:
pic = urljoin(self.host, pic)
break
if name and href:
videos.append({
'vod_id': f"{href}{'@folder' if is_folder else ''}",
'vod_name': name.replace('\n', ' '),
'vod_pic': pic,
'vod_remarks': remarks,
'vod_tag': 'folder' if is_folder else '',
'style': {"type": "rect", "ratio": 1.33}
})
except Exception as e:
print(f"列表项解析错误: {e}")
continue
return videos
if __name__ == "__main__":
spider = Spider()
spider.init('{"site": "https://911blw.com"}')
# 测试首页
result = spider.homeContent({})
print(f"首页分类: {len(result['class'])}")
print(f"首页内容: {len(result['list'])}")
# 测试分类
result = spider.categoryContent("/category/jrgb/", "1", False, {})
print(f"分类内容: {len(result['list'])}")
# 测试搜索
result = spider.searchContent("测试", False, "1")
print(f"搜索结果: {len(result['list'])}")

190
py/adult/UVod.py Normal file
View File

@ -0,0 +1,190 @@
# -*- coding: utf-8 -*-
import sys,json,time,base64,random,string,hashlib
from urllib.parse import urlencode,quote
from base.spider import Spider
from Crypto.Cipher import AES,PKCS1_v1_5
from Crypto.PublicKey import RSA
from Crypto.Util.Padding import pad,unpad
class Spider(Spider):
def __init__(self):
super().__init__()
self.base_url = 'https://api-h5.uvod.tv'; self.web_url = 'https://www.uvod.tv'; self.token = ''; self._iv = b"abcdefghijklmnop"
self._client_private = """-----BEGIN PRIVATE KEY-----
MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAJ4FBai1Y6my4+fc
8AD5tyYzxgN8Q7M/PuFv+8i1Xje8ElXYVwzvYd1y/cNxwgW4RX0tDy9ya562V33x
6SyNr29DU6XytOeOlOkxt3gd5169K4iFaJ0l0wA4koMTcCAYVxC9B4+zzS5djYmF
MuRGfYgKYNH99vfY7BZjdAY68ty5AgMBAAECgYB1rbvHJj5wVF7Rf4Hk2BMDCi9+
zP4F8SW88Y6KrDbcPt1QvOonIea56jb9ZCxf4hkt3W6foRBwg86oZo2FtoZcpCJ+
rFqUM2/wyV4CuzlL0+rNNSq7bga7d7UVld4hQYOCffSMifyF5rCFNH1py/4Dvswm
pi5qljf+dPLSlxXl2QJBAMzPJ/QPAwcf5K5nngQtbZCD3nqDFpRixXH4aUAIZcDz
S1RNsHrT61mEwZ/thQC2BUJTQNpGOfgh5Ecd1MnURwsCQQDFhAFfmvK7svkygoKX
t55ARNZy9nmme0StMOfdb4Q2UdJjfw8+zQNtKFOM7VhB7ijHcfFuGsE7UeXBe20n
g/XLAkEAv9SoT2hgJaQxxUk4MCF8pgddstJlq8Z3uTA7JMa4x+kZfXTm/6TOo6I8
2VbXZLsYYe8op0lvsoHMFvBSBljV0QJBAKhxyoYRa98dZB5qZRskciaXTlge0WJk
kA4vvh3/o757izRlQMgrKTfng1GVfIZFqKtnBiIDWTXQw2N9cnqXtH8CQAx+CD5t
l1iT0cMdjvlMg2two3SnpOjpo7gALgumIDHAmsVWhocLtcrnJI032VQSUkNnLq9z
EIfmHDz0TPTNHBQ=
-----END PRIVATE KEY-----
"""
self._client_public = """-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCeBQWotWOpsuPn3PAA+bcmM8YD
fEOzPz7hb/vItV43vBJV2FcM72Hdcv3DccIFuEV9LQ8vcmuetld98eksja9vQ1Ol
8rTnjpTpMbd4HedevSuIhWidJdMAOJKDE3AgGFcQvQePs80uXY2JhTLkRn2ICmDR
/fb32OwWY3QGOvLcuQIDAQAB
-----END PUBLIC KEY-----
"""
self._server_public = """-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCeBQWotWOpsuPn3PAA+bcmM8YD
fEOzPz7hb/vItV43vBJV2FcM72Hdcv3DccIFuEV9LQ8vcmuetld98eksja9vQ1Ol
8rTnjpTpMbd4HedevSuIhWidJdMAOJKDE3AgGFcQvQePs80uXY2JhTLkRn2ICmDR
/fb32OwWY3QGOvLcuQIDAQAB
-----END PUBLIC KEY-----
"""
def getName(self): return "UVOD"
def init(self, extend=""):
try: cfg = json.loads(extend) if isinstance(extend, str) and extend.strip().startswith('{') else extend if isinstance(extend, dict) else {}
except Exception: cfg = {}
self.base_url = cfg.get('base_url', self.base_url); self.token = cfg.get('token', self.token)
return self.homeContent(False)
def isVideoFormat(self, url): return any(x in url.lower() for x in ['.m3u8', '.mp4']) if url else False
def manualVideoCheck(self): return False
def destroy(self): pass
def _random_key(self, n=32):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(n))
def _encrypt(self, plain_text: str) -> str:
aes_key = self._random_key(32).encode('utf-8')
cipher = AES.new(aes_key, AES.MODE_CBC, iv=self._iv)
ct_b64 = base64.b64encode(cipher.encrypt(pad(plain_text.encode('utf-8'), AES.block_size))).decode('utf-8')
rsa_pub = RSA.import_key(self._server_public); rsa_cipher = PKCS1_v1_5.new(rsa_pub)
rsa_b64 = base64.b64encode(rsa_cipher.encrypt(aes_key)).decode('utf-8')
return f"{ct_b64}.{rsa_b64}"
def _decrypt(self, enc_text: str) -> str:
try:
parts = enc_text.split('.'); ct_b64, rsa_b64 = parts
rsa_priv = RSA.import_key(self._client_private)
aes_key = PKCS1_v1_5.new(rsa_priv).decrypt(base64.b64decode(rsa_b64), None)
cipher = AES.new(aes_key, AES.MODE_CBC, iv=self._iv)
pt = unpad(cipher.decrypt(base64.b64decode(ct_b64)), AES.block_size)
return pt.decode('utf-8', 'ignore')
except Exception: return enc_text
def _build_headers(self, path: str, payload: dict):
ts = str(int(time.time() * 1000)); token = self.token or ''
if path == '/video/latest':
parent_id = payload.get('parent_category_id', 101); text = f"-parent_category_id={parent_id}-{ts}"
elif path == '/video/list':
keyword = payload.get('keyword')
if keyword: keyword = quote(str(keyword), safe='').lower(); text = f"-keyword={keyword}&need_fragment=1&page=1&pagesize=42&sort_type=asc-{ts}"
else: page = payload.get('page', 1); pagesize = payload.get('pagesize', 42); parent_id = payload.get('parent_category_id', ''); text = f"-page={page}&pagesize={pagesize}&parent_category_id={parent_id}&sort_type=asc-{ts}"
elif path == '/video/info': text = f"-id={payload.get('id', '')}-{ts}"
elif path == '/video/source': quality = payload.get('quality', ''); fragment_id = payload.get('video_fragment_id', ''); video_id = payload.get('video_id', ''); text = f"-quality={quality}&video_fragment_id={fragment_id}&video_id={video_id}-{ts}"
else: filtered = {k: v for k, v in (payload or {}).items() if v not in (0, '0', '', False, None)}; query = urlencode(sorted(filtered.items()), doseq=True).lower(); text = f"{token}-{query}-{ts}"
sig = hashlib.md5(text.encode('utf-8')).hexdigest()
return {'Content-Type': 'application/json', 'X-TOKEN': token, 'X-TIMESTAMP': ts, 'X-SIGNATURE': sig, 'Origin': self.web_url, 'Referer': self.web_url + '/', 'Accept': '*/*', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36'}
def _post_api(self, path: str, payload: dict):
url = self.base_url.rstrip('/') + path
try:
body = self._encrypt(json.dumps(payload, ensure_ascii=False)); headers = self._build_headers(path, payload)
rsp = self.post(url, data=body, headers=headers, timeout=15)
if rsp.status_code != 200 or not rsp.text: return None
txt = rsp.text.strip(); obj = None
try: dec = self._decrypt(txt); obj = json.loads(dec)
except:
try: obj = json.loads(txt)
except: pass
if isinstance(obj, dict) and obj.get('error') == 0: return obj.get('data')
return None
except Exception: return None
def homeContent(self, filter):
data = self._post_api('/video/category', {}); lst = (data.get('list') or data.get('category') or []) if isinstance(data, dict) else (data or []); classes = []
for it in lst:
cid = it.get('id') or it.get('category_id') or it.get('value'); name = it.get('name') or it.get('label') or it.get('title')
if cid and name: classes.append({'type_name': str(name), 'type_id': str(cid)})
if not classes: classes = [{'type_name': '电影', 'type_id': '100'}, {'type_name': '电视剧', 'type_id': '101'}, {'type_name': '综艺', 'type_id': '102'}, {'type_name': '动漫', 'type_id': '103'}, {'type_name': '体育', 'type_id': '104'}, {'type_name': '纪录片', 'type_id': '105'}, {'type_name': '粤台专区', 'type_id': '106'}]
return {'class': classes}
def homeVideoContent(self):
data = self._post_api('/video/latest', {'parent_category_id': 101})
if isinstance(data, dict): lst = data.get('video_latest_list') or data.get('list') or data.get('rows') or data.get('items') or []
elif isinstance(data, list): lst = data
else: lst = []
videos = []
for k in lst:
vid = k.get('id') or k.get('video_id') or k.get('videoId')
if vid: videos.append({'vod_id': str(vid), 'vod_name': k.get('title') or k.get('name') or '', 'vod_pic': k.get('poster') or k.get('cover') or k.get('pic') or '', 'vod_remarks': k.get('score') or k.get('remarks') or ''})
return {'list': videos}
def categoryContent(self, tid, pg, filter, extend):
page = int(pg) if str(pg).isdigit() else 1
payload = {'parent_category_id': str(tid), 'category_id': None, 'language': None, 'year': None, 'region': None, 'state': None, 'keyword': '', 'paid': None, 'page': page, 'pagesize': 42, 'sort_field': '', 'sort_type': 'asc'}
if isinstance(extend, dict):
for k in ['category_id', 'year', 'region', 'state', 'keyword']:
if extend.get(k): payload[k] = extend[k]
data = self._post_api('/video/list', payload)
if isinstance(data, dict): lst = data.get('video_list') or data.get('list') or data.get('rows') or data.get('items') or []; total = data.get('total', 999999)
elif isinstance(data, list): lst = data; total = 999999
else: lst, total = [], 0
videos = []
for k in lst:
vid = k.get('id') or k.get('video_id') or k.get('videoId')
if vid: videos.append({'vod_id': str(vid), 'vod_name': k.get('title') or k.get('name') or '', 'vod_pic': k.get('poster') or k.get('cover') or k.get('pic') or '', 'vod_remarks': k.get('score') or ''})
return {'list': videos, 'page': page, 'pagecount': 9999, 'limit': 24, 'total': total}
def detailContent(self, ids):
vid = ids[0]; data = self._post_api('/video/info', {'id': vid}) or {}; video_info = data.get('video', {}) if isinstance(data, dict) else {}; fragments = data.get('video_fragment_list', []) if isinstance(data, dict) else []; play_urls = []
if fragments:
for fragment in fragments:
name = fragment.get('symbol', '播放'); fragment_id = fragment.get('id', ''); qualities = fragment.get('qualities', [])
if fragment_id and qualities:
max_quality = max(qualities) if qualities else 4
play_urls.append(f"{name}${vid}|{fragment_id}|[{max_quality}]")
if not play_urls: play_urls.append(f"播放${vid}")
vod = {'vod_id': str(vid), 'vod_name': video_info.get('title') or video_info.get('name') or '', 'vod_pic': video_info.get('poster') or video_info.get('cover') or video_info.get('pic') or '', 'vod_year': video_info.get('year') or '', 'vod_remarks': video_info.get('duration') or '', 'vod_content': video_info.get('description') or video_info.get('desc') or '', 'vod_play_from': '优汁🍑源', 'vod_play_url': '#'.join(play_urls) + '$$$'}
return {'list': [vod]}
def searchContent(self, key, quick, pg="1"):
page = int(pg) if str(pg).isdigit() else 1
payload = {'parent_category_id': None, 'category_id': None, 'language': None, 'year': None, 'region': None, 'state': None, 'keyword': key, 'paid': None, 'page': page, 'pagesize': 42, 'sort_field': '', 'sort_type': 'asc', 'need_fragment': 1}
data = self._post_api('/video/list', payload)
if isinstance(data, dict): lst = data.get('video_list') or data.get('list') or data.get('rows') or data.get('items') or []
elif isinstance(data, list): lst = data
else: lst = []
videos = []
for k in lst:
vid = k.get('id') or k.get('video_id') or k.get('videoId')
if vid: videos.append({'vod_id': str(vid), 'vod_name': k.get('title') or k.get('name') or '', 'vod_pic': k.get('poster') or k.get('cover') or k.get('pic') or '', 'vod_remarks': k.get('score') or ''})
return {'list': videos}
def _extract_first_media(self, obj):
if not obj: return None
if isinstance(obj, str): s = obj.strip(); return s if self.isVideoFormat(s) else None
if isinstance(obj, (dict, list)):
for v in (obj.values() if isinstance(obj, dict) else obj):
r = self._extract_first_media(v)
if r: return r
return None
def playerContent(self, flag, id, vipFlags):
parts = id.split('|'); video_id = parts[0]
if len(parts) >= 3:
fragment_id = parts[1]; qualities_str = parts[2].strip('[]').replace(' ', ''); qualities = [q.strip() for q in qualities_str.split(',') if q.strip()]; quality = qualities[0] if qualities else '4'
payload = {'video_id': video_id, 'video_fragment_id': int(fragment_id) if str(fragment_id).isdigit() else fragment_id, 'quality': int(quality) if str(quality).isdigit() else quality, 'seek': None}
else: payload = {'video_id': video_id, 'video_fragment_id': 1, 'quality': 4, 'seek': None}
data = self._post_api('/video/source', payload) or {}
url = (data.get('video', {}).get('url', '') or data.get('url') or data.get('playUrl') or data.get('play_url') or self._extract_first_media(data) or '')
if not url: return {'parse': 1, 'url': id}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36', 'Referer': self.web_url + '/', 'Origin': self.web_url}
return {'parse': 0, 'url': url, 'header': headers}
def localProxy(self, param): return None

Binary file not shown.