更换JAR,更新 API 配置,调整多个弹幕站点为影视站点,优化 API 地址及站点信息

This commit is contained in:
Wang.Luo 2025-04-13 14:40:19 +08:00
parent 55ea05f9b6
commit 184425433f
45 changed files with 23963 additions and 1937 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 KiB

After

Width:  |  Height:  |  Size: 3.2 KiB

61
JS/88看球.js Normal file
View File

@ -0,0 +1,61 @@
var rule = {
title:'88看球',
// host:'http://www.88kanqiu.cc',
host:'http://www.88kanqiu.bar/',
url: "/match/fyclass/live",
searchUrl: "",
searchable: 0,
quickSearch: 0,
class_parse: ".nav-pills li;a&&Text;a&&href;/match/(\\d+)/live",
headers: {
"User-Agent": "PC_UA",
},
timeout: 5000,
play_parse: true,
pagecount:{"1":1,"2":1,"4":1,"22":1,"8":1,"9":1,"10":1,"14":1,"15":1,"12":1,"13":1,"16":1,"28":1,"7":1,"11":1,"33":1,"27":1,"23":1,"26":1,"3":1,"21":1,"18":1},
lazy: `js:
if(/embed=/.test(input)) {
let url = input.match(/embed=(.*?)&/)[1];
url = base64Decode(url);
input = {
jx:0,
url: url.split('#')[0],
parse: 0
}
} else if (/\?url=/.test(input)){
input = {
jx:0,
url: input.split('?url=')[1].split('#')[0],
parse: 0
}
} else {
input
}
`,
limit: 6,
double: false,
推荐: "*",
一级: ".list-group .group-game-item;.d-none&&Text;img&&src;.btn&&Text;a&&href",
二级: {
title: ".game-info-container&&Text;.customer-navbar-nav li&&Text",
img: "img&&src",
desc: ";;;div.team-name:eq(0)&&Text;div.team-name:eq(1)&&Text",
content: "div.game-time&&Text",
tabs: "js:TABS=['实时直播']",
lists: `js:
LISTS = [];
let html = request(input.replace('play', 'play-url'));
let pdata = JSON.parse(html).data;
pdata = pdata.slice(6);
pdata = pdata.slice(0, -2);
pdata = base64Decode(pdata);
// log(pdata);
let jo = JSON.parse(pdata).links;
let d = jo.map(function (it) {
return it.name + '$' + urlencode(it.url)
});
LISTS.push(d)
`,
},
搜索: "",
};

View File

@ -2132,14 +2132,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@ -2147,7 +2147,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),

View File

@ -1354,14 +1354,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@ -1369,7 +1369,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),

View File

@ -1,101 +0,0 @@
var rule = {
title:'磁力熊[磁]',
host:'https://www.cilixiong.com',
homeUrl:'/top250/index.html',
// url: '/fyclass/index_(fypage-1).html',
url: '/fyclassfyfilter-(fypage-1).html',
filter_url:'-{{fl.class or "0"}}-{{fl.area or "0"}}',
filter:{
"1":[{"key":"class","name":"类型","value":[{"n":"全部","v":"0"},{"n":"剧情","v":"1"},{"n":"喜剧","v":"2"},{"n":"惊悚","v":"3"},{"n":"动作","v":"4"},{"n":"爱情","v":"5"},{"n":"犯罪","v":"6"},{"n":"恐怖","v":"7"},{"n":"冒险","v":"8"},{"n":"悬疑","v":"9"},{"n":"科幻","v":"10"},{"n":"家庭","v":"11"},{"n":"奇幻","v":"12"},{"n":"动画","v":"13"},{"n":"战争","v":"14"},{"n":"历史","v":"15"},{"n":"传记","v":"16"},{"n":"音乐","v":"17"},{"n":"歌舞","v":"18"},{"n":"运动","v":"19"},{"n":"西部","v":"20"},{"n":"灾难","v":"21"},{"n":"古装","v":"22"},{"n":"情色","v":"23"},{"n":"同性","v":"24"},{"n":"儿童","v":"25"},{"n":"纪录片","v":"26"}]},{"key":"area","name":"地区","value":[{"n":"全部","v":"0"},{"n":"大陆","v":"1"},{"n":"香港","v":"2"},{"n":"台湾","v":"3"},{"n":"美国","v":"4"},{"n":"日本","v":"5"},{"n":"韩国","v":"6"},{"n":"英国","v":"7"},{"n":"法国","v":"8"},{"n":"德国","v":"9"},{"n":"印度","v":"10"},{"n":"泰国","v":"11"},{"n":"丹麦","v":"12"},{"n":"瑞典","v":"13"},{"n":"巴西","v":"14"},{"n":"加拿大","v":"15"},{"n":"俄罗斯","v":"16"},{"n":"意大利","v":"17"},{"n":"比利时","v":"18"},{"n":"爱尔兰","v":"19"},{"n":"西班牙","v":"20"},{"n":"澳大利亚","v":"21"},{"n":"波兰","v":"22"},{"n":"土耳其","v":"23"},{"n":"越南","v":"24"}]}],
"2":[{"key":"class","name":"类型","value":[{"n":"全部","v":"0"},{"n":"剧情","v":"1"},{"n":"喜剧","v":"2"},{"n":"惊悚","v":"3"},{"n":"动作","v":"4"},{"n":"爱情","v":"5"},{"n":"犯罪","v":"6"},{"n":"恐怖","v":"7"},{"n":"冒险","v":"8"},{"n":"悬疑","v":"9"},{"n":"科幻","v":"10"},{"n":"家庭","v":"11"},{"n":"奇幻","v":"12"},{"n":"动画","v":"13"},{"n":"战争","v":"14"},{"n":"历史","v":"15"},{"n":"传记","v":"16"},{"n":"音乐","v":"17"},{"n":"歌舞","v":"18"},{"n":"运动","v":"19"},{"n":"西部","v":"20"},{"n":"灾难","v":"21"},{"n":"古装","v":"22"},{"n":"情色","v":"23"},{"n":"同性","v":"24"},{"n":"儿童","v":"25"},{"n":"纪录片","v":"26"}]},{"key":"area","name":"地区","value":[{"n":"全部","v":"0"},{"n":"大陆","v":"1"},{"n":"香港","v":"2"},{"n":"台湾","v":"3"},{"n":"美国","v":"4"},{"n":"日本","v":"5"},{"n":"韩国","v":"6"},{"n":"英国","v":"7"},{"n":"法国","v":"8"},{"n":"德国","v":"9"},{"n":"印度","v":"10"},{"n":"泰国","v":"11"},{"n":"丹麦","v":"12"},{"n":"瑞典","v":"13"},{"n":"巴西","v":"14"},{"n":"加拿大","v":"15"},{"n":"俄罗斯","v":"16"},{"n":"意大利","v":"17"},{"n":"比利时","v":"18"},{"n":"爱尔兰","v":"19"},{"n":"西班牙","v":"20"},{"n":"澳大利亚","v":"21"},{"n":"波兰","v":"22"},{"n":"土耳其","v":"23"},{"n":"越南","v":"24"}]}]
},
searchUrl: '/e/search/index.php#classid=1,2&show=title&tempid=1&keyboard=**;post',
searchable:2,
quickSearch:0,
filterable:1,
headers:{
'User-Agent': 'MOBILE_UA'
},
timeout:5000,
class_name:'电影&剧集&豆瓣电影Top250&IMDB Top250&高分悬疑片&高分喜剧片&高分传记片&高分爱情片&高分犯罪片&高分恐怖片&高分冒险片&高分武侠片&高分奇幻片&高分历史片&高分战争片&高分歌舞片&高分灾难片&高分情色片&高分西部片&高分音乐片&高分科幻片&高分动作片&高分动画片&高分纪录片&冷门佳片',
class_url:'1&2&/top250/&/s/imdbtop250/&/s/suspense/&/s/comedy/&/s/biopic/&/s/romance/&/s/crime/&/s/horror/&/s/adventure/&/s/martial/&/s/fantasy/&/s/history/&/s/war/&/s/musical/&/s/disaster/&/s/erotic/&/s/west/&/s/music/&/s/sci-fi/&s/action/&/s/animation/&/s/documentary/&/s/unpopular/',
play_parse:true,
lazy:'',
limit:6,
推荐: `js:
pdfh=jsp.pdfh;pdfa=jsp.pdfa;pd=jsp.pd;
var d = [];
var html = request(input);
var list = pdfa(html, 'body&&.col');
list.forEach(it => {
d.push({
title: pdfh(it, 'h2&&Text'),
desc: pdfh(it, '.me-auto&&Text') + '分 / ' + pdfh(it, '.small&&Text'),
// pic_url: pd(it, '.card-img&&style'), // 只有 影视TV&爱佬版 有图片
pic_url: /!'/.test(pd(it, '.card-img&&style'))?pd(it, '.card-img&&style'):pd(it, '.card-img&&style').replaceAll("'",""), // 兼容 影视TV&爱佬版 以外的其它壳子
url: pd(it, 'a&&href')
});
})
setResult(d);
`,
一级: `js:
pdfh=jsp.pdfh;pdfa=jsp.pdfa;pd=jsp.pd;
var d = [];
if (MY_CATE !== '1' && MY_CATE !== '2') {
let turl = (MY_PAGE === 1)? 'index' : 'index_'+ MY_PAGE;
input = HOST + MY_CATE + turl + '.html';
}
var html = request(input);
var list = pdfa(html, 'body&&.col');
list.forEach(it => {
d.push({
title: pdfh(it, 'h2&&Text'),
desc: pdfh(it, '.me-auto&&Text') + '分 / ' + pdfh(it, '.small&&Text'),
// pic_url: pdfh(it, '.card-img&&style'), // 只有 影视TV&爱佬版 有图片
pic_url: /!'/.test(pd(it, '.card-img&&style'))?pd(it, '.card-img&&style'):pd(it, '.card-img&&style').replaceAll("'",""), // 兼容 影视TV&爱佬版 以外的其它壳子
url: pd(it, 'a&&href')
});
})
setResult(d);
`,
二级:{
title:'h1&&Text;p.mb-2:eq(4)&&Text',
desc:'p.mb-2:eq(1)&&Text;;;p.mb-2:eq(7)&&Text;p.mb-2:eq(5)&&Text',
img:'.rounded-2&&src',
content:'.mv_card_box&&Text',
// tabs:'js:TABS = ["道长磁力"]',
// lists:'.mv_down:eq(#id)&&.border-bottom',
// list_text:'a&&Text',
// list_url:'a&&href',
tabs:'js:TABS = ["道长磁力","道长在线预览"]',
lists:`js:
log(TABS);
pdfh=jsp.pdfh;pdfa=jsp.pdfa;pd=jsp.pd;
LISTS = [];
var dd=[];
TABS.forEach(function(tab) {
if (/道长磁力/.test(tab)) {
var d = pdfa(html, '.mv_down&&.border-bottom');
d = d.map(function(it) {
var title = pdfh(it, 'a&&Text');
log('title >>>>>>>>>>>>>>>>>>>>>>>>>>' + title);
var burl = pd(it, 'a&&href');
log('burl >>>>>>>>>>>>>>>>>>>>>>>>>>' + burl);
return title + '$' + burl
});
LISTS.push(d)
} else if (/道长在线预览/.test(tab)) {
var d = pd(html, 'iframe&&src');
if (d) {
d=['第一集在线播放预览$' + d]
} else {
d=['没有预览不要点$http://www.sharenice.net/douyin/23852']
}
LISTS.push(d)
}
});
`,
},
搜索:'.col;h2&&Text;.card-img&&style;.me-auto&&Text;a&&href',
}

View File

@ -1,224 +0,0 @@
globalThis.h_ost = 'http://mitu.jiajiayoutian.top/';
var key = CryptoJS.enc.Base64.parse("ZDAzMmMxMjg3NmJjNjg0OA==");
var iv = CryptoJS.enc.Base64.parse("ZDAzMmMxMjg3NmJjNjg0OA==");
globalThis.AES_Decrypt = function(word) {
try {
var decrypt = CryptoJS.AES.decrypt(word, key, {
iv: iv,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.Pkcs7,
});
const decryptedText = decrypt.toString(CryptoJS.enc.Utf8);
if (!decryptedText) {
throw new Error("解密后的内容为空");
}
return decryptedText;
} catch (e) {
console.error("解密失败:", e);
return null;
}
};
globalThis.AES_Encrypt = function(word) {
var encrypted = CryptoJS.AES.encrypt(word, key, {
iv: iv,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.Pkcs7
});
return encrypted.toString();
};
globalThis.vod1 = function(t, pg) {
let html1 = request(h_ost + 'api.php/getappapi.index/typeFilterVodList', {
body: {
area: '全部',
year: '全部',
type_id: t,
page: pg,
sort: '最新',
lang: '全部',
class: '全部'
},
headers: {
'User-Agent': 'okhttp/3.14.9',
'Content-Type': 'application/x-www-form-urlencoded'
},
'method': 'POST'
}, true);
let html = JSON.parse(html1);
return (AES_Decrypt(html.data));
}
globalThis.vodids = function(ids) {
let html1 = fetch(h_ost + 'api.php/getappapi.index/vodDetail', {
method: 'POST',
headers: {
'User-Agent': 'okhttp/3.14.9',
'Content-Type': 'application/x-www-form-urlencoded'
},
body: {
vod_id: ids,
}
});
let html = JSON.parse(html1);
const rdata = JSON.parse(AES_Decrypt(html.data));
console.log(rdata);
const data = {
vod_id: ids,
vod_name: rdata.vod.vod_name,
vod_remarks: rdata.vod.vod_remarks,
vod_actor: rdata.vod.vod_actor,
vod_director: rdata.vod.vod_director,
vod_content: rdata.vod.vod_content,
vod_play_from: '',
vod_play_url: ''
};
rdata.vod_play_list.forEach((value) => {
data.vod_play_from += value.player_info.show + '$$$';
value.urls.forEach((v) => {
data.vod_play_url += v.name + '$' + value.player_info.parse + '~' + v.url + '~' + rdata.vod.vod_name + '~' + v.name + '#';
});
data.vod_play_url += '$$$';
});
return data;
}
//搜索
globalThis.ssvod = function(wd) {
var html1 = fetch(h_ost + 'api.php/getappapi.index/searchList', {
method: 'POST',
headers: {
'User-Agent': 'okhttp/3.14.9',
'Content-Type': 'application/x-www-form-urlencoded'
},
body: {
keywords: wd,
typepage_id: 1,
}
});
let html = JSON.parse(html1);
return AES_Decrypt(html.data);
}
//解析
globalThis.jxx = function(id, url, name, juji) {
try {
if (id.includes('xmflv')) {
return {
parse: 1,
url: id + url,
jx: 0,
danmaku: 'http://103.45.162.207:25252/hbdm.php?key=7894561232&id=' + '&jm=' + name + '&js=' + juji + '&key=741852963'
};
}
//log(id);
if (url.includes('m3u8')) {
return {
parse: 0,
url: url,
jx: 1,
danmaku: 'http://103.45.162.207:25252/hbdm.php?key=7894561232&id=' + '&jm=' + name + '&js=' + juji + '&key=741852963'
};
}
if (id.includes('http')) {
let purl = JSON.parse(request(id + url)).url;
return {
parse: 0,
url: purl,
jx: 0,
danmaku: 'http://103.45.162.207:25252/hbdm.php?key=7894561232&id=' + '&jm=' + name + '&js=' + juji + '&key=741852963'
};
}
let html1 = request(h_ost + 'api.php/getappapi.index/vodParse', {
method: 'POST',
headers: {
'User-Agent': 'okhttp/3.14.9',
'Content-Type': 'application/x-www-form-urlencoded'
},
body: {
parse_api: id,
url: AES_Encrypt(url),
}
});
let html = AES_Decrypt(JSON.parse(html1).data);
console.log(html1);
let decry = html.replace(/\n/g, '').replace(/\\/g, '');
let matches = decry.match(/"url":"([^"]+)"/);
if (!matches || matches[1] === null) {
matches = decry.match(/"url": "([^"]+)"/);
}
return {
parse: 0,
url: matches[1],
jx: 0,
danmaku: 'http://103.45.162.207:25252/hbdm.php?key=7894561232&id=' + '&jm=' + name + '&js=' + juji + '&key=741852963'
};
} catch {
return {
parse: 0,
url: '解析失败',
jx: 0
};
}
}
var rule = {
title: '米兔[资]',
host: '',
detailUrl: 'fyid',
searchUrl: '**',
url: 'fyclass',
searchable: 2,
quickSearch: 1,
filterable: 0,
class_name: '电影&电视剧&综艺&动漫',
class_url: '1&2&3&4',
play_parse: true,
lazy: $js.toString(() => {
const parts = input.split('~');
input = jxx(parts[0], parts[1], parts[2], parts[3]);
}),
推荐: $js.toString(() => {
let data = vod1(0, 0);
let bata = JSON.parse(data).recommend_list;
bata.forEach(it => {
d.push({
url: it.vod_id,
title: it.vod_name,
img: it.vod_pic,
desc: it.vod_remarks
});
});
setResult(d);
}),
一级: $js.toString(() => {
let data = vod1(input, MY_PAGE);
let bata = JSON.parse(data).recommend_list;
bata.forEach(it => {
d.push({
url: it.vod_id,
title: it.vod_name,
img: it.vod_pic,
desc: it.vod_remarks
});
});
setResult(d);
}),
二级: $js.toString(() => {
console.log("调试信息2" + input);
let data = vodids(input);
//console.log(data);
VOD = data;
}),
搜索: $js.toString(() => {
let data = ssvod(input);
let bata = JSON.parse(data).search_list;
bata.forEach(it => {
d.push({
url: it.vod_id,
title: it.vod_name,
img: it.vod_pic,
desc: it.vod_remarks
});
});
// console.log(data);
setResult(d);
}),
}

View File

@ -1,743 +0,0 @@
// 地址发布页 https://subaibai.vip/
// 搜索数字验证
var rule = {
title: '素白白',
// host:'https://www.subaibaiys.com',
host: 'https://subaibai.vip',
hostJs: 'print(HOST);let html=request(HOST,{headers:{"User-Agent":PC_UA}});let src = jsp.pdfh(html,".go:eq(0)&&a&&href");print(src);HOST=src', //网页域名根动态抓取js代码。通过HOST=赋值
// url:'/fyclass/page/fypage',
url: '/fyclassfyfilter',
filterable: 1, //是否启用分类筛选,
filter_url: '{{fl.area}}{{fl.year}}{{fl.class}}{{fl.cateId}}/page/fypage',
filter: {
"movie_bt": [{
"key": "cateId",
"name": "分类",
"value": [{
"n": "全部",
"v": ""
}, {
"v": "/movie_bt_series/dongmanju",
"n": "动漫剧"
}, {
"v": "/movie_bt_series/dongmandy",
"n": "动漫电影"
}, {
"v": "/movie_bt_series/yindudy",
"n": "印度电影"
}, {
"v": "/movie_bt_series/guochanju",
"n": "国产剧"
}, {
"v": "/movie_bt_series/guochandy",
"n": "国产电影"
}, {
"v": "/movie_bt_series/riju",
"n": "日剧"
}, {
"v": "/movie_bt_series/rihandy",
"n": "日韩电影"
}, {
"v": "/movie_bt_series/oumeiju",
"n": "欧美剧"
}, {
"v": "/movie_bt_series/oumeidy",
"n": "欧美电影"
}, {
"v": "/movie_bt_series/taiju",
"n": "泰剧"
}, {
"v": "/movie_bt_series/thaidy",
"n": "泰国电影"
}, {
"v": "/movie_bt_series/gangju",
"n": "港台剧"
}, {
"v": "/movie_bt_series/gangtaidy",
"n": "港台电影"
}, {
"v": "/movie_bt_series/documentary",
"n": "纪录片"
}, {
"v": "/movie_bt_series/zongyi",
"n": "综艺"
}, {
"v": "/movie_bt_series/hanju",
"n": "韩剧"
}, {
"v": "/movie_bt_series/xianggangdy",
"n": "香港经典电影"
}]
}, {
"key": "class",
"name": "类型",
"value": [{
"n": "全部",
"v": ""
}, {
"v": "/movie_bt_tags/pop-popular",
"n": "POP流行"
}, {
"v": "/movie_bt_tags/biography",
"n": "传记"
}, {
"v": "/movie_bt_tags/child",
"n": "儿童"
}, {
"v": "/movie_bt_tags/adventure",
"n": "冒险"
}, {
"v": "/movie_bt_tags/plot",
"n": "剧情"
}, {
"v": "/movie_bt_tags/action",
"n": "动作"
}, {
"v": "/movie_bt_tags/anime",
"n": "动漫"
}, {
"v": "/movie_bt_tags/animation",
"n": "动画"
}, {
"v": "/movie_bt_tags/history",
"n": "历史"
}, {
"v": "/movie_bt_tags/costume",
"n": "古装"
}, {
"v": "/movie_bt_tags/antiquity",
"n": "古风"
}, {
"v": "/movie_bt_tags/homosexual",
"n": "同性"
}, {
"v": "/movie_bt_tags/comedy",
"n": "喜剧"
}, {
"v": "/movie_bt_tags/fantasy",
"n": "奇幻"
}, {
"v": "/movie_bt_tags/family",
"n": "家庭"
}, {
"v": "/movie_bt_tags/terror",
"n": "恐怖"
}, {
"v": "/movie_bt_tags/suspense",
"n": "悬疑"
}, {
"v": "/movie_bt_tags/erotic",
"n": "情色"
}, {
"v": "/movie_bt_tags/thriller",
"n": "惊悚"
}, {
"v": "/movie_bt_tags/drama",
"n": "戏曲"
}, {
"v": "/movie_bt_tags/war",
"n": "战争"
}, {
"v": "/movie_bt_tags/latin",
"n": "拉丁"
}, {
"v": "/movie_bt_tags/funny",
"n": "搞笑"
}, {
"v": "/movie_bt_tags/campus",
"n": "校园"
}, {
"v": "/movie_bt_tags/song-and-dance",
"n": "歌舞"
}, {
"v": "/movie_bt_tags/martial-arts",
"n": "武侠"
}, {
"v": "/movie_bt_tags/disaster",
"n": "灾难"
}, {
"v": "/movie_bt_tags/love",
"n": "爱情"
}, {
"v": "/movie_bt_tags/crime",
"n": "犯罪"
}, {
"v": "/movie_bt_tags/fancy",
"n": "玄幻"
}, {
"v": "/movie_bt_tags/reality-show",
"n": "真人秀"
}, {
"v": "/movie_bt_tags/short-film",
"n": "短片"
}, {
"v": "/movie_bt_tags/kehuan",
"n": "科幻"
}, {
"v": "/movie_bt_tags/documentary",
"n": "纪录片"
}, {
"v": "/movie_bt_tags/talkshow",
"n": "脱口秀"
}, {
"v": "/movie_bt_tags/stageart",
"n": "舞台艺术"
}, {
"v": "/movie_bt_tags/west",
"n": "西部"
}, {
"v": "/movie_bt_tags/sport",
"n": "运动"
}, {
"v": "/movie_bt_tags/youth",
"n": "青春"
}, {
"v": "/movie_bt_tags/music",
"n": "音乐"
}, {
"v": "/movie_bt_tags/ghost",
"n": "鬼怪"
}, {
"v": "/movie_bt_tags/black-film",
"n": "黑色电影"
}]
}, {
"key": "area",
"name": "地区",
"value": [{
"n": "全部",
"v": ""
}, {
"v": "/movie_bt_cat/bhutan",
"n": "不丹"
}, {
"v": "/movie_bt_cat/china",
"n": "中国"
}, {
"v": "/movie_bt_cat/china-taiwan",
"n": "中国台湾"
}, {
"v": "/movie_bt_cat/china-mainland",
"n": "中国大陆"
}, {
"v": "/movie_bt_cat/china-hongkong",
"n": "中国香港"
}, {
"v": "/movie_bt_cat/denmark",
"n": "丹麦"
}, {
"v": "/movie_bt_cat/ukraine",
"n": "乌克兰"
}, {
"v": "/movie_bt_cat/uruguay",
"n": "乌拉圭"
}, {
"v": "/movie_bt_cat/israel",
"n": "以色列"
}, {
"v": "/movie_bt_cat/iraq",
"n": "伊拉克"
}, {
"v": "/movie_bt_cat/iran",
"n": "伊朗"
}, {
"v": "/movie_bt_cat/russia",
"n": "俄罗斯"
}, {
"v": "/movie_bt_cat/bulgaria",
"n": "保加利亚"
}, {
"v": "/movie_bt_cat/croatia",
"n": "克罗地亚"
}, {
"v": "/movie_bt_cat/iceland",
"n": "冰岛"
}, {
"v": "/movie_bt_cat/canada",
"n": "加拿大"
}, {
"v": "/movie_bt_cat/hungary",
"n": "匈牙利"
}, {
"v": "/movie_bt_cat/south-africa",
"n": "南非"
}, {
"v": "/movie_bt_cat/botswana",
"n": "博茨瓦纳"
}, {
"v": "/movie_bt_cat/qatar",
"n": "卡塔尔"
}, {
"v": "/movie_bt_cat/luxembourg",
"n": "卢森堡"
}, {
"v": "/movie_bt_cat/india",
"n": "印度"
}, {
"v": "/movie_bt_cat/indonesia",
"n": "印度尼西亚"
}, {
"v": "/movie_bt_cat/kazakhstan",
"n": "哈萨克斯坦"
}, {
"v": "/movie_bt_cat/colombia",
"n": "哥伦比亚"
}, {
"v": "/movie_bt_cat/turkey",
"n": "土耳其"
}, {
"v": "/movie_bt_cat/serbia",
"n": "塞尔维亚"
}, {
"v": "/movie_bt_cat/cyprus",
"n": "塞浦路斯"
}, {
"v": "/movie_bt_cat/mexico",
"n": "墨西哥"
}, {
"v": "/movie_bt_cat/dominica",
"n": "多米尼加"
}, {
"v": "/movie_bt_cat/austria",
"n": "奥地利"
}, {
"v": "/movie_bt_cat/venezuela",
"n": "委内瑞拉"
}, {
"v": "/movie_bt_cat/nigeria",
"n": "尼日利亚"
}, {
"v": "/movie_bt_cat/巴基斯坦",
"n": "巴基斯坦"
}, {
"v": "/movie_bt_cat/paraguay",
"n": "巴拉圭"
}, {
"v": "/movie_bt_cat/brazil",
"n": "巴西"
}, {
"v": "/movie_bt_cat/greece",
"n": "希腊"
}, {
"v": "/movie_bt_cat/germany",
"n": "德国"
}, {
"v": "/movie_bt_cat/italy",
"n": "意大利"
}, {
"v": "/movie_bt_cat/latvia",
"n": "拉脱维亚"
}, {
"v": "/movie_bt_cat/norway",
"n": "挪威"
}, {
"v": "/movie_bt_cat/chech",
"n": "捷克"
}, {
"v": "/movie_bt_cat/摩洛哥",
"n": "摩洛哥"
}, {
"v": "/movie_bt_cat/斯洛伐克",
"n": "斯洛伐克"
}, {
"v": "/movie_bt_cat/slovenia",
"n": "斯洛文尼亚"
}, {
"v": "/movie_bt_cat/singapore",
"n": "新加坡"
}, {
"v": "/movie_bt_cat/zealand",
"n": "新西兰"
}, {
"v": "/movie_bt_cat/japan",
"n": "日本"
}, {
"v": "/movie_bt_cat/chile",
"n": "智利"
}, {
"v": "/movie_bt_cat/north-korea",
"n": "朝鲜"
}, {
"v": "/movie_bt_cat/cambodia",
"n": "柬埔寨"
}, {
"v": "/movie_bt_cat/georgia",
"n": "格鲁吉亚"
}, {
"v": "/movie_bt_cat/belgium",
"n": "比利时"
}, {
"v": "/movie_bt_cat/saudi-arabia",
"n": "沙特阿拉伯"
}, {
"v": "/movie_bt_cat/france",
"n": "法国"
}, {
"v": "/movie_bt_cat/poland",
"n": "波兰"
}, {
"v": "/movie_bt_cat/puertorco",
"n": "波多黎各"
}, {
"v": "/movie_bt_cat/bohei",
"n": "波黑"
}, {
"v": "/movie_bt_cat/thailand",
"n": "泰国"
}, {
"v": "/movie_bt_cat/australia",
"n": "澳大利亚"
}, {
"v": "/movie_bt_cat/ireland",
"n": "爱尔兰"
}, {
"v": "/movie_bt_cat/estonia",
"n": "爱沙尼亚"
}, {
"v": "/movie_bt_cat/sweden",
"n": "瑞典"
}, {
"v": "/movie_bt_cat/switzerland",
"n": "瑞士"
}, {
"v": "/movie_bt_cat/belarus",
"n": "白俄罗斯"
}, {
"v": "/movie_bt_cat/peru",
"n": "秘鲁"
}, {
"v": "/movie_bt_cat/tunisia",
"n": "突尼斯"
}, {
"v": "/movie_bt_cat/lithuania",
"n": "立陶宛"
}, {
"v": "/movie_bt_cat/romania",
"n": "罗马尼亚"
}, {
"v": "/movie_bt_cat/america",
"n": "美国"
}, {
"v": "/movie_bt_cat/finland",
"n": "芬兰"
}, {
"v": "/movie_bt_cat/sovietunion",
"n": "苏联"
}, {
"v": "/movie_bt_cat/england",
"n": "英国"
}, {
"v": "/movie_bt_cat/netherlands",
"n": "荷兰"
}, {
"v": "/movie_bt_cat/philippines",
"n": "菲律宾"
}, {
"v": "/movie_bt_cat/葡萄牙",
"n": "葡萄牙"
}, {
"v": "/movie_bt_cat/west-germany",
"n": "西德"
}, {
"v": "/movie_bt_cat/spain",
"n": "西班牙"
}, {
"v": "/movie_bt_cat/vietnam",
"n": "越南"
}, {
"v": "/movie_bt_cat/argentina",
"n": "阿根廷"
}, {
"v": "/movie_bt_cat/korea",
"n": "韩国"
}, {
"v": "/movie_bt_cat/malaysia",
"n": "马来西亚"
}, {
"v": "/movie_bt_cat/马耳他",
"n": "马耳他"
}]
}, {
"key": "year",
"name": "年份",
"value": [{
"n": "全部",
"v": ""
}, {
"v": "/year/2024",
"n": "2024"
}, {
"v": "/year/2023",
"n": "2023"
}, {
"v": "/year/2022",
"n": "2022"
}, {
"v": "/year/2021",
"n": "2021"
}, {
"v": "/year/2020",
"n": "2020"
}, {
"v": "/year/2019",
"n": "2019"
}, {
"v": "/year/2018",
"n": "2018"
}, {
"v": "/year/2017",
"n": "2017"
}, {
"v": "/year/2016",
"n": "2016"
}, {
"v": "/year/2015",
"n": "2015"
}, {
"v": "/year/2014",
"n": "2014"
}, {
"v": "/year/2013",
"n": "2013"
}, {
"v": "/year/2012",
"n": "2012"
}, {
"v": "/year/2011",
"n": "2011"
}, {
"v": "/year/2010",
"n": "2010"
}, {
"v": "/year/2009",
"n": "2009"
}, {
"v": "/year/2008",
"n": "2008"
}, {
"v": "/year/2007",
"n": "2007"
}, {
"v": "/year/2006",
"n": "2006"
}, {
"v": "/year/2005",
"n": "2005"
}, {
"v": "/year/2004",
"n": "2004"
}, {
"v": "/year/2003",
"n": "2003"
}, {
"v": "/year/2002",
"n": "2002"
}, {
"v": "/year/2001",
"n": "2001"
}, {
"v": "/year/2000",
"n": "2000"
}, {
"v": "/year/1999",
"n": "1999"
}, {
"v": "/year/1998",
"n": "1998"
}, {
"v": "/year/1997",
"n": "1997"
}, {
"v": "/year/1996",
"n": "1996"
}, {
"v": "/year/1995",
"n": "1995"
}, {
"v": "/year/1994",
"n": "1994"
}, {
"v": "/year/1993",
"n": "1993"
}, {
"v": "/year/1992",
"n": "1992"
}, {
"v": "/year/1991",
"n": "1991"
}, {
"v": "/year/1990",
"n": "1990"
}, {
"v": "/year/1989",
"n": "1989"
}, {
"v": "/year/1988",
"n": "1988"
}, {
"v": "/year/1987",
"n": "1987"
}, {
"v": "/year/1986",
"n": "1986"
}, {
"v": "/year/1985",
"n": "1985"
}, {
"v": "/year/1984",
"n": "1984"
}, {
"v": "/year/1983",
"n": "1983"
}, {
"v": "/year/1982",
"n": "1982"
}, {
"v": "/year/1981",
"n": "1981"
}, {
"v": "/year/1980",
"n": "1980"
}, {
"v": "/year/1979",
"n": "1979"
}, {
"v": "/year/1978",
"n": "1978"
}, {
"v": "/year/1977",
"n": "1977"
}, {
"v": "/year/1976",
"n": "1976"
}, {
"v": "/year/1975",
"n": "1975"
}, {
"v": "/year/1974",
"n": "1974"
}, {
"v": "/year/1973",
"n": "1973"
}, {
"v": "/year/1972",
"n": "1972"
}, {
"v": "/year/1971",
"n": "1971"
}, {
"v": "/year/1970",
"n": "1970"
}, {
"v": "/year/1969",
"n": "1969"
}, {
"v": "/year/1968",
"n": "1968"
}, {
"v": "/year/1967",
"n": "1967"
}, {
"v": "/year/1966",
"n": "1966"
}, {
"v": "/year/1965",
"n": "1965"
}, {
"v": "/year/1964",
"n": "1964"
}, {
"v": "/year/1963",
"n": "1963"
}, {
"v": "/year/1962",
"n": "1962"
}, {
"v": "/year/1960",
"n": "1960"
}, {
"v": "/year/1959",
"n": "1959"
}, {
"v": "/year/1954",
"n": "1954"
}, {
"v": "/year/1952",
"n": "1952"
}, {
"v": "/year/1950",
"n": "1950"
}, {
"v": "/year/1949",
"n": "1949"
}, {
"v": "/year/1948",
"n": "1948"
}, {
"v": "/year/1940",
"n": "1940"
}, {
"v": "/year/1939",
"n": "1939"
}, {
"v": "/year/1925",
"n": "1925"
}]
}]
},
// searchUrl:'/search?q=**',
searchUrl: '/search?q=**',
searchable: 2, //是否启用全局搜索,
quickSearch: 0, //是否启用快速搜索,
headers: {
'User-Agent': 'UC_UA',
},
class_parse: '.navlist&&li;a&&Text;a&&href;.*/([^/]+)',
play_parse: true,
// lazy:'',
lazy: `js:
pdfh = jsp.pdfh;
var html = request(input);
var ohtml = pdfh(html, '.videoplay&&Html');
var url = pdfh(ohtml, "body&&iframe&&src");
if (/Cloud/.test(url)) {
var ifrwy = request(url);
let code = ifrwy.match(/var url = '(.*?)'/)[1].split('').reverse().join('');
let temp = '';
for (let i = 0x0; i < code.length; i = i + 0x2) {
temp += String.fromCharCode(parseInt(code[i] + code[i + 0x1], 0x10))
}
input = {
jx: 0,
url: temp.substring(0x0, (temp.length - 0x7) / 0x2) + temp.substring((temp.length - 0x7) / 0x2 + 0x7),
parse: 0
}
} else if (/decrypted/.test(ohtml)) {
var phtml = pdfh(ohtml, "body&&script:not([src])&&Html");
eval(getCryptoJS());
var scrpt = phtml.match(/var.*?\\)\\);/g)[0];
var data = [];
eval(scrpt.replace(/md5/g, 'CryptoJS').replace('eval', 'data = '));
input = {
jx: 0,
url: data.match(/url:.*?[\\'\\"](.*?)[\\'\\"]/)[1],
parse: 0
}
} else {
input
}
`,
limit: 6,
推荐: '.leibox&&li;*;*;*;*',
// double:true, // 推荐内容是否双层定位
一级: '.mrb&&li;img&&alt;img&&data-original;.jidi&&Text;a&&href',
二级: {
"title": "h1&&Text;.moviedteail_list&&li:eq(0)&&Text",
"img": ".dyimg&&img&&src",
"desc": ".moviedteail_list&&li:eq(-1)&&Text;;;.moviedteail_list&&li:eq(7)&&Text;.moviedteail_list&&li:eq(5)&&Text",
"content": ".yp_context&&p&&Text",
"tabs": ".mi_paly_box .ypxingq_t--span",
"lists": ".paly_list_btn:eq(#id) a"
},
搜索: '.search_list&&li;img&&alt;img&&data-original;.nostag&&Text;a&&href',
}

View File

@ -670,14 +670,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@ -685,7 +685,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),

View File

@ -679,14 +679,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@ -694,7 +694,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),

View File

@ -1,37 +0,0 @@
var rule = {
title: '追剧迷',
模板: 'vfed',
host: 'https://www.zhuijumi.cc',
// url:'/videotype/fyclass-fypage.html',
url: '/mp4type/fyclass-fypage.html',
searchUrl: '/mp4search/-.html?wd=**',
class_parse: '.fed-part-tips li;a&&Text;a&&href;.*/(.*?).html',
cate_exclude: '更多|申请',
lazy: `js:
var html = JSON.parse(request(input).match(/r player_.*?=(.*?)</)[1]);
var url = html.url;
if (html.encrypt == '1') {
url = unescape(url)
} else if (html.encrypt == '2') {
url = unescape(base64Decode(url))
}
if (/\\.m3u8|\\.mp4/.test(url)) {
input = {
jx: 0,
url: url,
parse: 0
}
} else {
input
}
`,
二级: {
"title": "h1&&Text;.fed-col-xs6--span:eq(0)&&Text",
"img": ".fed-list-info&&a&&data-original",
"desc": ".fed-col-xs12.fed-part-eone:eq(3)&&Text;;;.fed-col-xs12.fed-part-eone--span:eq(0)&&Text;.fed-col-xs12.fed-part-eone--span:eq(1)&&Text",
"content": ".fed-conv-text:eq(0)&&Text",
"tabs": "ul.fed-padding&&li",
"lists": ".fed-tabs-btm:eq(#id) li"
},
搜索: '.fed-list-deta;h1&&Text;.fed-lazy&&data-original;.fed-list-remarks&&Text;a&&href;.fed-col-xs12.fed-part-eone:eq(2)&&Text',
}

86
JSON/aliShare.json Normal file
View File

@ -0,0 +1,86 @@
[
{
"name": "我的网盘",
"folders": [
{
"shareId": "",
"folder": "root"
}
]
},
{
"name": "影视一",
"folders": [
{
"shareId": "dW5pJdgF8c9",
"folder": "root"
},
{
"shareId": "Y5wMKfVDD6K",
"folder": "root"
},
{
"shareId": "LEaepiYfxcw",
"folder": "root"
},
{
"shareId": "v1bBBEcNf9p",
"folder": "root"
}
]
},
{
"name": "影视二",
"folders": [
{
"shareId": "uWa9gbM3RJ7",
"folder": "655c7b6c66368f43652a45288146c6e7cb269aa8"
}
]
},
{
"name": "影视三",
"folders": [
{
"shareId": "4ydLxf7VgH7",
"folder": "root"
}
]
},
{
"name": "影视四",
"folders": [
{
"shareId": "5bsnAp5fbCW",
"folder": "root"
}
]
},
{
"name": "影视五",
"folders": [
{
"shareId": "dieULBdYP3D",
"folder": "root"
}
]
},
{
"name": "影视六",
"folders": [
{
"shareId": "sg8CdGUwmUr",
"folder": "root"
}
]
},
{
"name": "影视七",
"folders": [
{
"shareId": "wHPKUENKFsS",
"folder": "root"
}
]
}
]

199
JSON/aliyunpansearch.json Normal file
View File

@ -0,0 +1,199 @@
{
"classes": [
{
"type_name": "电影",
"type_id": "dy"
},
{
"type_name": "电视",
"type_id": "ds"
},
{
"type_name": "短剧",
"type_id": "dj"
},
{
"type_name": "综艺",
"type_id": "zy"
},
{
"type_name": "动漫",
"type_id": "dm"
},
{
"type_name": "音乐",
"type_id": "yy"
}
],
"filters": {
"dy": [
{
"key": "root",
"name": "来源",
"value": [
{
"n": "全部",
"v": "0"
},
{
"n": "夸克",
"v": "2"
},
{
"n": "UC",
"v": "5"
},
{
"n": "阿里",
"v": "1"
},
{
"n": "百度",
"v": "4"
}
]
}
],
"ds": [
{
"key": "root",
"name": "来源",
"value": [
{
"n": "全部",
"v": "0"
},
{
"n": "夸克",
"v": "2"
},
{
"n": "UC",
"v": "5"
},
{
"n": "阿里",
"v": "1"
},
{
"n": "百度",
"v": "4"
}
]
}
],
"dj": [
{
"key": "root",
"name": "来源",
"value": [
{
"n": "全部",
"v": "0"
},
{
"n": "夸克",
"v": "2"
},
{
"n": "UC",
"v": "5"
},
{
"n": "阿里",
"v": "1"
},
{
"n": "百度",
"v": "4"
}
]
}
],
"zy": [
{
"key": "root",
"name": "来源",
"value": [
{
"n": "全部",
"v": "0"
},
{
"n": "夸克",
"v": "2"
},
{
"n": "UC",
"v": "5"
},
{
"n": "阿里",
"v": "1"
},
{
"n": "百度",
"v": "4"
}
]
}
],
"dm": [
{
"key": "root",
"name": "来源",
"value": [
{
"n": "全部",
"v": "0"
},
{
"n": "夸克",
"v": "2"
},
{
"n": "UC",
"v": "5"
},
{
"n": "阿里",
"v": "1"
},
{
"n": "百度",
"v": "4"
}
]
}
],
"yy": [
{
"key": "root",
"name": "来源",
"value": [
{
"n": "全部",
"v": "0"
},
{
"n": "夸克",
"v": "2"
},
{
"n": "UC",
"v": "5"
},
{
"n": "阿里",
"v": "1"
},
{
"n": "百度",
"v": "4"
}
]
}
]
},
"siteUrl": "https://ysapi.yingso.fun/v7/ali/all"
}

1461
JSON/bj.json Normal file

File diff suppressed because it is too large Load Diff

1470
JSON/dawo.json Normal file

File diff suppressed because it is too large Load Diff

1461
JSON/ex.json Normal file

File diff suppressed because it is too large Load Diff

1452
JSON/hb.json Normal file

File diff suppressed because it is too large Load Diff

20
JSON/hm.json Normal file
View File

@ -0,0 +1,20 @@
{
"Classes": [
{
"type_name": "电影",
"type_id": "1"
},
{
"type_name": "剧集",
"type_id": "2"
},
{
"type_name": "动漫",
"type_id": "3"
},
{
"type_name": "综艺",
"type_id": "5"
}
]
}

1667
JSON/lb.json Normal file

File diff suppressed because it is too large Load Diff

33
JSON/lj.json Normal file
View File

@ -0,0 +1,33 @@
{
"SiteUrl": "https://www.leijing.xyz",
"Classes": [
{
"type_name": "电影",
"type_id": "42204681950354"
},
{
"type_name": "剧集",
"type_id": "42204684250355"
},
{
"type_name": "影视原盘",
"type_id": "42212287587456"
},
{
"type_name": "综艺",
"type_id": "42210356650363"
},
{
"type_name": "动漫",
"type_id": "42204792950357"
},
{
"type_name": "纪录片",
"type_id": "42204697150356"
},
{
"type_name": "演唱会",
"type_id": "42317879720298"
}
]
}

1460
JSON/mogg.json Normal file

File diff suppressed because it is too large Load Diff

1458
JSON/og.json Normal file

File diff suppressed because it is too large Load Diff

24
JSON/pan1.json Normal file
View File

@ -0,0 +1,24 @@
{
"Classes": [
{
"type_name": "电影",
"type_id": "2"
},
{
"type_name": "剧集",
"type_id": "48"
},
{
"type_name": "4K原盘",
"type_id": "56"
},
{
"type_name": "综艺",
"type_id": "52"
},
{
"type_name": "动漫",
"type_id": "37"
}
]
}

47
JSON/quarkShare.json Normal file
View File

@ -0,0 +1,47 @@
[
{
"name": "我的网盘",
"folders": [
{
"shareId": "",
"folder": "0"
}
]
},
{
"name": "幼儿教育",
"folders": [
{
"shareId": "a08f66152533",
"folder": "0"
}
]
},
{
"name": "4K影视",
"folders": [
{
"shareId": "7568042397a9",
"folder": "0a9f0d04a8704f35b18763948ece0593"
}
]
},
{
"name": "短剧合集1",
"folders": [
{
"shareId": "885fd4ba2d92",
"folder": "81ca012717cb45228f237e26d8da20c8"
}
]
},
{
"name": "短剧合集2",
"folders": [
{
"shareId": "a1cda418984f",
"folder": "7e09c18d7f8045f983eca086be8ddb8f"
}
]
}
]

1457
JSON/sd.json Normal file

File diff suppressed because it is too large Load Diff

11
JSON/ucShare.json Normal file
View File

@ -0,0 +1,11 @@
[
{
"name": "我的网盘",
"folders": [
{
"shareId": "",
"folder": "0"
}
]
}
]

View File

@ -1,16 +0,0 @@
{
"drives": [
{
"name": "七米藍",
"server": "https://al.chirmyram.com/dav",
"user": "alist",
"pass": "alist"
},
{
"name": "影視庫",
"server": "https://esir.eu.org/dav",
"user": "alist",
"pass": "alist"
}
]
}

2112
JSON/wogg.json Normal file

File diff suppressed because it is too large Load Diff

1461
JSON/xf.json Normal file

File diff suppressed because it is too large Load Diff

1457
JSON/xm.json Normal file

File diff suppressed because it is too large Load Diff

1674
JSON/yyds.json Normal file

File diff suppressed because it is too large Load Diff

1703
JSON/zz.json Normal file

File diff suppressed because it is too large Load Diff

301
PY/优酷视频.py Normal file
View File

@ -0,0 +1,301 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import sys
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import quote
from Crypto.Hash import MD5
import requests
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
self.session = requests.Session()
self.session.headers.update(self.headers)
self.session.cookies.update(self.cookie)
self.get_ctoken()
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
host='https://www.youku.com'
shost='https://search.youku.com'
h5host='https://acs.youku.com'
ihost='https://v.youku.com'
headers = {
'User-Agent': 'Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)',
'Referer': f'{host}/'
}
cookie={
"__ysuid": "17416134165380iB",
"__aysid": "1741613416541WbD",
"xlly_s": "1",
"isI18n": "false",
"cna": "bNdVIKmmsHgCAXW9W6yrQ1/s",
"__ayft": "1741672162330",
"__arpvid": "1741672162331FBKgrn-1741672162342",
"__ayscnt": "1",
"__aypstp": "1",
"__ayspstp": "3",
"tfstk": "gZbiib4JpG-6DqW-B98_2rwPuFrd1fTXQt3vHEp4YpJIBA3OgrWcwOi90RTOo9XVQ5tAM5NcK_CP6Ep97K2ce1XDc59v3KXAgGFLyzC11ET2n8U8yoyib67M3xL25e8gS8pbyzC1_ET4e8URWTsSnHv2uh8VTeJBgEuN3d-ELQAWuKWV36PHGpJ2uEWVTxvicLX1ewyUXYSekxMf-CxMEqpnoqVvshvP_pABOwvXjL5wKqeulm52np_zpkfCDGW9Ot4uKFIRwZtP7vP9_gfAr3KEpDWXSIfWRay-DHIc_Z-hAzkD1i5Ooi5LZ0O5YO_1mUc476YMI3R6xzucUnRlNe_zemKdm172xMwr2L7CTgIkbvndhFAVh3_YFV9Ng__52U4SQKIdZZjc4diE4EUxlFrfKmiXbBOHeP72v7sAahuTtWm78hRB1yV3tmg9bBOEhWVnq5KwOBL5."
}
def homeContent(self, filter):
result = {}
categories = ["电视剧", "电影", "综艺", "动漫", "少儿", "纪录片", "文化", "亲子", "教育", "搞笑", "生活",
"体育", "音乐", "游戏"]
classes = [{'type_name': category, 'type_id': category} for category in categories]
filters = {}
self.typeid = {}
with ThreadPoolExecutor(max_workers=len(categories)) as executor:
tasks = {
executor.submit(self.cf, {'type': category}, True): category
for category in categories
}
for future in as_completed(tasks):
try:
category = tasks[future]
session, ft = future.result()
filters[category] = ft
self.typeid[category] = session
except Exception as e:
print(f"处理分类 {tasks[future]} 时出错: {str(e)}")
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
try:
vlist = []
params={"ms_codes":"2019061000","params":"{\"debug\":0,\"gray\":0,\"pageNo\":1,\"utdid\":\"ZYmGMAAAACkDAMU8hbiMmYdd\",\"userId\":\"\",\"bizKey\":\"YOUKU_WEB\",\"appPackageKey\":\"com.youku.YouKu\",\"showNodeList\":0,\"reqSubNode\":0,\"nodeKey\":\"WEBHOME\",\"bizContext\":\"{\\\"spmA\\\":\\\"a2hja\\\"}\"}","system_info":"{\"device\":\"pcweb\",\"os\":\"pcweb\",\"ver\":\"1.0.0.0\",\"userAgent\":\"Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)\",\"guid\":\"1590141704165YXe\",\"appPackageKey\":\"com.youku.pcweb\",\"young\":0,\"brand\":\"\",\"network\":\"\",\"ouid\":\"\",\"idfa\":\"\",\"scale\":\"\",\"operator\":\"\",\"resolution\":\"\",\"pid\":\"\",\"childGender\":0,\"zx\":0}"}
data=self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.home.query/1.0/',params)
okey=list(data['data'].keys())[0]
for i in data['data'][okey]['data']['nodes'][0]['nodes'][-1]['nodes'][0]['nodes']:
if i.get('nodes') and i['nodes'][0].get('data'):
i=i['nodes'][0]['data']
if i.get('assignId'):
vlist.append({
'vod_id': i['assignId'],
'vod_name': i.get('title'),
'vod_pic': i.get('vImg') or i.get('img'),
'vod_year': i.get('mark',{}).get('data',{}).get('text'),
'vod_remarks': i.get('summary')
})
return {'list': vlist}
except Exception as e:
print(f"处理主页视频数据时出错: {str(e)}")
return {'list': []}
def categoryContent(self, tid, pg, filter, extend):
result = {}
vlist = []
result['page'] = pg
result['limit'] = 90
result['total'] = 999999
pagecount = 9999
params = {'type': tid}
id = self.typeid[tid]
params.update(extend)
if pg == '1':
id=self.cf(params)
data=self.session.get(f'{self.host}/category/data?session={id}&params={quote(json.dumps(params))}&pageNo={pg}').json()
try:
data=data['data']['filterData']
for i in data['listData']:
if i.get('videoLink') and 's=' in i['videoLink']:
vlist.append({
'vod_id': i.get('videoLink').split('s=')[-1],
'vod_name': i.get('title'),
'vod_pic': i.get('img'),
'vod_year': i.get('rightTagText'),
'vod_remarks': i.get('summary')
})
self.typeid[tid]=quote(json.dumps(data['session']))
except:
pagecount=pg
result['list'] = vlist
result['pagecount'] = pagecount
return result
def detailContent(self, ids):
try:
data=self.session.get(f'{self.ihost}/v_getvideo_info/?showId={ids[0]}').json()
v=data['data']
vod = {
'type_name': v.get('showVideotype'),
'vod_year': v.get('lastUpdate'),
'vod_remarks': v.get('rc_title'),
'vod_actor': v.get('_personNameStr'),
'vod_content': v.get('showdesc'),
'vod_play_from': '优酷',
'vod_play_url': ''
}
params={"biz":"new_detail_web2","videoId":v.get('vid'),"scene":"web_page","componentVersion":"3","ip":data.get('ip'),"debug":0,"utdid":"ZYmGMAAAACkDAMU8hbiMmYdd","userId":0,"platform":"pc","nextSession":"","gray":0,"source":"pcNoPrev","showId":ids[0]}
sdata,index=self.getinfo(params)
pdata=sdata['nodes']
if index > len(pdata):
batch_size = len(pdata)
total_batches = ((index + batch_size - 1) // batch_size) - 1
ssj = json.loads(sdata['data']['session'])
with ThreadPoolExecutor(max_workers=total_batches) as executor:
futures = []
for batch in range(total_batches):
start = batch_size + 1 + (batch * batch_size)
end = start + batch_size - 1
next_session = ssj.copy()
next_session.update({
"itemStartStage": start,
"itemEndStage": min(end, index)
})
current_params = params.copy()
current_params['nextSession'] = json.dumps(next_session)
futures.append((start, executor.submit(self.getvinfo, current_params)))
futures.sort(key=lambda x: x[0])
for _, future in futures:
try:
result = future.result()
pdata.extend(result['nodes'])
except Exception as e:
print(f"Error fetching data: {str(e)}")
vod['vod_play_url'] = '#'.join([f"{i['data'].get('title')}${i['data']['action'].get('value')}" for i in pdata])
return {'list': [vod]}
except Exception as e:
print(e)
return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'呜呜呜${self.host}'}]}
def searchContent(self, key, quick, pg="1"):
data=self.session.get(f'{self.shost}/api/search?pg={pg}&keyword={key}').json()
vlist = []
for i in data['pageComponentList']:
if i.get('commonData') and (i['commonData'].get('showId') or i['commonData'].get('realShowId')):
i=i['commonData']
vlist.append({
'vod_id': i.get('showId') or i.get('realShowId'),
'vod_name': i['titleDTO'].get('displayName'),
'vod_pic': i['posterDTO'].get('vThumbUrl'),
'vod_year': i.get('feature'),
'vod_remarks': i.get('updateNotice')
})
return {'list': vlist, 'page': pg}
def playerContent(self, flag, id, vipFlags):
return {'jx':1,'parse': 1, 'url': f"{self.ihost}/video?vid={id}", 'header': ''}
def localProxy(self, param):
pass
def cf(self,params,b=False):
response = self.session.get(f'{self.host}/category/data?params={quote(json.dumps(params))}&optionRefresh=1&pageNo=1').json()
data=response['data']['filterData']
session=quote(json.dumps(data['session']))
if b:
return session,self.get_filter_data(data['filter']['filterData'][1:])
return session
def process_key(self, key):
if '_' not in key:
return key
parts = key.split('_')
result = parts[0]
for part in parts[1:]:
if part:
result += part[0].upper() + part[1:]
return result
def get_filter_data(self, data):
result = []
try:
for item in data:
if not item.get('subFilter'):
continue
first_sub = item['subFilter'][0]
if not first_sub.get('filterType'):
continue
filter_item = {
'key': self.process_key(first_sub['filterType']),
'name': first_sub['title'],
'value': []
}
for sub in item['subFilter']:
if 'value' in sub:
filter_item['value'].append({
'n': sub['title'],
'v': sub['value']
})
if filter_item['value']:
result.append(filter_item)
except Exception as e:
print(f"处理筛选数据时出错: {str(e)}")
return result
def get_ctoken(self):
data=self.session.get(f'{self.h5host}/h5/mtop.ykrec.recommendservice.recommend/1.0/?jsv=2.6.1&appKey=24679788')
def md5(self,t,text):
h = MD5.new()
token=self.session.cookies.get('_m_h5_tk').split('_')[0]
data=f"{token}&{t}&24679788&{text}"
h.update(data.encode('utf-8'))
return h.hexdigest()
def getdata(self, url, params, recursion_count=0, max_recursion=3):
data = json.dumps(params)
t = int(time.time() * 1000)
jsdata = {
'appKey': '24679788',
't': t,
'sign': self.md5(t, data),
'data': data
}
response = self.session.get(url, params=jsdata)
if '令牌过期' in response.text:
if recursion_count >= max_recursion:
raise Exception("达到最大递归次数,无法继续请求")
self.get_ctoken()
return self.getdata(url, params, recursion_count + 1, max_recursion)
else:
return response.json()
def getvinfo(self,params):
body = {
"ms_codes": "2019030100",
"params": json.dumps(params),
"system_info": "{\"os\":\"iku\",\"device\":\"iku\",\"ver\":\"9.2.9\",\"appPackageKey\":\"com.youku.iku\",\"appPackageId\":\"pcweb\"}"
}
data = self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.gateway.new.execute/1.0/', body)
okey = list(data['data'].keys())[0]
i = data['data'][okey]['data']
return i
def getinfo(self,params):
i = self.getvinfo(params)
jdata=i['nodes'][0]['nodes'][3]
info=i['data']['extra']['episodeTotal']
if i['data']['extra']['showCategory'] in ['电影','游戏']:
jdata = i['nodes'][0]['nodes'][4]
return jdata,info

View File

@ -112,20 +112,20 @@ class Spider(Spider):
def playerContent(self, flag, id, vipFlags):
ids = json.loads(self.d64(id))
h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
url = ids['url']
p=1
try:
if re.search(r'\?url=', ids['parse_api_url']):
if re.search(r'url=', ids['parse_api_url']):
data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
url = data.get('url') or data['data'].get('url')
elif not re.search(r'\.m3u8|\.mp4', ids.get('url')):
body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes('encrypt', ids['url']))}&token={ids.get('token')}"
else:
body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
url = json.loads(b)['url']
if 'error' in url: raise ValueError(f"解析失败: {url}")
p = 0
except Exception as e:
print('错误信息:', e)
pass
url, p = ids['url'], 1
if re.search(r'\.jpg|\.png|\.jpeg', url):
url = self.Mproxy(url)
result = {}

468
PY/哔哩视频.py Normal file

File diff suppressed because one or more lines are too long

248
PY/爱奇艺.py Normal file
View File

@ -0,0 +1,248 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import random
import sys
from base64 import b64encode, b64decode
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import urlencode
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
self.did = self.random_str(32)
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
rhost = 'https://www.iqiyi.com'
hhost='https://mesh.if.iqiyi.com'
dhost='https://miniapp.iqiyi.com'
headers = {
'Origin': rhost,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
'Referer': f'{rhost}/',
}
def homeContent(self, filter):
result = {}
cateManual = {
"全部": "1009",
"电影": "1",
"剧集": "2",
"综艺": "6",
"动漫": "4",
"儿童": "15",
"微剧": "35",
"纪录片": "3"
}
classes = []
filters = {}
for k in cateManual:
classes.append({
'type_name': k,
'type_id': cateManual[k]
})
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
results = executor.map(self.getf, classes)
for id, ft in results:
if len(ft):filters[id] = ft
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
data=self.fetch(f'{self.hhost}/portal/lw/v5/channel/recommend?v=13.014.21150', headers=self.headers).json()
vlist = []
for i in data['items'][1:]:
for j in i['video'][0]['data']:
id = j.get('firstId')
pic=j.get('prevue',{}).get('image_url') or j.get('album_image_url_hover')
if id and pic:
pu=j.get('prevue',{}).get('page_url') or j.get('page_url').split('?')[0]
id = f'{id}@{self.e64(pu)}'
vlist.append({
'vod_id': id,
'vod_name': j.get('display_name'),
'vod_pic': pic,
'vod_year': j.get('sns_score'),
'vod_remarks': j.get('dq_updatestatus') or j.get('rank_prefix')
})
return {'list':vlist}
def categoryContent(self, tid, pg, filter, extend):
if pg == "1":
self.sid = ''
new_data = {'mode':'24'}
for key, value in extend.items():
if value:
key_value_pairs = self.d64(value).split(',')
for pair in key_value_pairs:
k, v = pair.split('=')
if k in new_data:
new_data[k] += "," + v
else:
new_data[k] = v
path=f'/portal/lw/videolib/data?uid=&passport_id=&ret_num=60&version=13.014.21150&device_id={self.did}&channel_id={tid}&page_id={pg}&session={self.sid}&os=&conduit_id=&vip=0&auth&recent_selected_tag=&ad=%5B%7B%22lm%22:%225%22,%22ai%22:%225%22,%22fp%22:%226%22,%22sei%22:%22Sa867aa9d326e2bd8654d8c2a8636055e%22,%22position%22:%22library%22%7D%5D&adExt=%7B%22r%22:%221.2.1-ares6-pure%22%7D&dfp=a12f96215b2f7842a98c082799ca0c3d9236be00946701b106829754d8ece3aaf8&filter={urlencode(new_data)}'
data=self.fetch(f'{self.hhost}{path}', headers=self.headers).json()
self.sid = data['session']
videos = []
for i in data['data']:
id = i.get('firstId') or i.get('tv_id')
if not id:
id=i.get('play_url').split(';')[0].split('=')[-1]
if id and not i.get('h'):
id=f'{id}@{self.e64(i.get("page_url"))}'
videos.append({
'vod_id': id,
'vod_name': i.get('display_name'),
'vod_pic': i.get('album_image_url_hover'),
'vod_year': i.get('sns_score'),
'vod_remarks': i.get('dq_updatestatus') or i.get('pay_mark')
})
result = {}
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
ids = ids[0].split('@')
ids[-1] = self.d64(ids[-1])
data = self.fetch(f'{self.dhost}/h5/mina/baidu/play/body/v1/{ids[0]}/', headers=self.headers).json()
v=data['data']['playInfo']
vod = {
'vod_name': v.get('albumName'),
'type_name': v.get('tags'),
'vod_year': v.get('albumYear'),
'vod_remarks': v.get('updateStrategy'),
'vod_actor': v.get('mainActors'),
'vod_director': v.get('directors'),
'vod_content': v.get('albumDesc'),
'vod_play_from': '爱奇艺',
'vod_play_url': ''
}
if data.get('data') and data['data'].get('videoList') and data['data']['videoList'].get('videos'):
purl=[f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
pg=data['data']['videoList'].get('totalPages')
if pg and pg > 1:
id = v['albumId']
pages = list(range(2, pg + 1))
page_results = {}
with ThreadPoolExecutor(max_workers=10) as executor:
future_to_page = {
executor.submit(self.fetch_page_data, page, id): page
for page in pages
}
for future in as_completed(future_to_page):
page = future_to_page[future]
try:
result = future.result()
page_results[page] = result
except Exception as e:
print(f"Error fetching page {page}: {e}")
for page in sorted(page_results.keys()):
purl.extend(page_results[page])
vod['vod_play_url'] = '#'.join(purl)
else:
vdata=self.fetch(f'{self.dhost}/h5/mina/baidu/play/head/v1/{ids[0]}/', headers=self.headers).json()
v=vdata['data']['playInfo']
vod = {
'vod_name': v.get('shortTitle'),
'type_name': v.get('channelName'),
'vod_year': v.get('year'),
'vod_remarks': v.get('focus'),
'vod_actor': v.get('mainActors'),
'vod_director': v.get('directors'),
'vod_content': v.get('desc'),
'vod_play_from': '爱奇艺',
'vod_play_url': f'{v.get("shortTitle")}${ids[-1]}'
}
return {'list':[vod]}
def searchContent(self, key, quick, pg="1"):
data=self.fetch(f'{self.hhost}/portal/lw/search/homePageV3?key={key}&current_page={pg}&mode=1&source=input&suggest=&version=13.014.21150&pageNum={pg}&pageSize=25&pu=&u={self.did}&scale=150&token=&userVip=0&conduit=&vipType=-1&os=&osShortName=win10&dataType=&appMode=', headers=self.headers).json()
videos = []
vdata=data['data']['templates']
for i in data['data']['templates']:
if i.get('intentAlbumInfos'):
vdata=[{'albumInfo': c} for c in i['intentAlbumInfos']]+vdata
for i in vdata:
if i.get('albumInfo') and (i['albumInfo'].get('playQipuId','') or i['albumInfo'].get('qipuId')) and i['albumInfo'].get('pageUrl'):
b=i['albumInfo']
id=f"{(b.get('playQipuId','') or b.get('qipuId'))}@{self.e64(b.get('pageUrl'))}"
videos.append({
'vod_id': id,
'vod_name': b.get('title'),
'vod_pic': b.get('img'),
'vod_year': (b.get('year',{}) or {}).get('value'),
'vod_remarks': b.get('subscriptContent') or b.get('channel') or b.get('vipTips')
})
return {'list':videos,'page':pg}
def playerContent(self, flag, id, vipFlags):
return {'jx':1,'parse': 1, 'url': id, 'header': ''}
def localProxy(self, param):
pass
def fetch_page_data(self, page, id):
try:
url = f'{self.dhost}/h5/mina/avlist/{page}/{id}/'
data = self.fetch(url, headers=self.headers).json()
return [f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
except:
return []
def getf(self,body):
data=self.fetch(f'{self.hhost}/portal/lw/videolib/tag?channel_id={body["type_id"]}&tagAdd=&selected_tag_name=&version=13.014.21150&device={self.did}&uid=', headers=self.headers).json()
ft = []
# for i in data[:-1]:
for i in data:
try:
value_array = [{"n": value['text'], "v": self.e64(value['tag_param'])} for value in i['tags'] if
value.get('tag_param')]
ft.append({"key": i['group'], "name": i['group'], "value": value_array})
except:
print(i)
return (body['type_id'], ft)
def e64(self, text):
try:
text_bytes = text.encode('utf-8')
encoded_bytes = b64encode(text_bytes)
return encoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64编码错误: {str(e)}")
return ""
def d64(self,encoded_text: str):
try:
encoded_bytes = encoded_text.encode('utf-8')
decoded_bytes = b64decode(encoded_bytes)
return decoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64解码错误: {str(e)}")
return ""
def random_str(self,length=16):
hex_chars = '0123456789abcdef'
return ''.join(random.choice(hex_chars) for _ in range(length))

768
PY/网络直播.py Normal file
View File

@ -0,0 +1,768 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import re
import sys
import time
from base64 import b64decode, b64encode
from urllib.parse import parse_qs
import requests
from pyquery import PyQuery as pq
sys.path.append('..')
from base.spider import Spider
from concurrent.futures import ThreadPoolExecutor
class Spider(Spider):
def init(self, extend=""):
tid = 'douyin'
headers = self.gethr(0, tid)
response = requests.head(self.hosts[tid], headers=headers)
ttwid = response.cookies.get('ttwid')
headers.update({
'authority': self.hosts[tid].split('//')[-1],
'cookie': f'ttwid={ttwid}' if ttwid else ''
})
self.dyheaders = headers
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
headers = [
{
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
},
{
"User-Agent": "Dart/3.4 (dart:io)"
}
]
excepturl = 'https://www.baidu.com'
hosts = {
"huya": ["https://www.huya.com","https://mp.huya.com"],
"douyin": "https://live.douyin.com",
"douyu": "https://www.douyu.com",
"wangyi": "https://cc.163.com",
"bili": ["https://api.live.bilibili.com", "https://api.bilibili.com"]
}
referers = {
"huya": "https://live.cdn.huya.com",
"douyin": "https://live.douyin.com",
"douyu": "https://m.douyu.com",
"bili": "https://live.bilibili.com"
}
playheaders = {
"wangyi": {
"User-Agent": "ExoPlayer",
"Connection": "Keep-Alive",
"Icy-MetaData": "1"
},
"bili": {
'Accept': '*/*',
'Icy-MetaData': '1',
'referer': referers['bili'],
'user-agent': headers[0]['User-Agent']
},
'douyin': {
'User-Agent': 'libmpv',
'Icy-MetaData': '1'
},
'huya': {
'User-Agent': 'ExoPlayer',
'Connection': 'Keep-Alive',
'Icy-MetaData': '1'
},
'douyu': {
'User-Agent': 'libmpv',
'Icy-MetaData': '1'
}
}
def process_bili(self):
try:
self.blfdata = self.fetch(
f'{self.hosts["bili"][0]}/room/v1/Area/getList?need_entrance=1&parent_id=0',
headers=self.gethr(0, 'bili')
).json()
return ('bili', [{'key': 'cate', 'name': '分类',
'value': [{'n': i['name'], 'v': str(i['id'])}
for i in self.blfdata['data']]}])
except Exception as e:
print(f"bili处理错误: {e}")
return 'bili', None
def process_douyin(self):
try:
data = self.getpq(self.hosts['douyin'], headers=self.dyheaders)('script')
for i in data.items():
if 'categoryData' in i.text():
content = i.text()
start = content.find('{')
end = content.rfind('}') + 1
if start != -1 and end != -1:
json_str = content[start:end]
json_str = json_str.replace('\\"', '"')
try:
self.dyifdata = json.loads(json_str)
return ('douyin', [{'key': 'cate', 'name': '分类',
'value': [{'n': i['partition']['title'],
'v': f"{i['partition']['id_str']}@@{i['partition']['title']}"}
for i in self.dyifdata['categoryData']]}])
except json.JSONDecodeError as e:
print(f"douyin解析错误: {e}")
return 'douyin', None
except Exception as e:
print(f"douyin请求或处理错误: {e}")
return 'douyin', None
def process_douyu(self):
try:
self.dyufdata = self.fetch(
f'{self.referers["douyu"]}/api/cate/list',
headers=self.headers[1]
).json()
return ('douyu', [{'key': 'cate', 'name': '分类',
'value': [{'n': i['cate1Name'], 'v': str(i['cate1Id'])}
for i in self.dyufdata['data']['cate1Info']]}])
except Exception as e:
print(f"douyu错误: {e}")
return 'douyu', None
def homeContent(self, filter):
result = {}
cateManual = {
"虎牙": "huya",
"哔哩": "bili",
"抖音": "douyin",
"斗鱼": "douyu",
"网易": "wangyi"
}
classes = []
filters = {
'huya': [{'key': 'cate', 'name': '分类',
'value': [{'n': '网游', 'v': '1'}, {'n': '单机', 'v': '2'},
{'n': '娱乐', 'v': '8'}, {'n': '手游', 'v': '3'}]}]
}
with ThreadPoolExecutor(max_workers=3) as executor:
futures = {
executor.submit(self.process_bili): 'bili',
executor.submit(self.process_douyin): 'douyin',
executor.submit(self.process_douyu): 'douyu'
}
for future in futures:
platform, filter_data = future.result()
if filter_data:
filters[platform] = filter_data
for k in cateManual:
classes.append({
'type_name': k,
'type_id': cateManual[k]
})
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
pass
def categoryContent(self, tid, pg, filter, extend):
vdata = []
result = {}
pagecount = 9999
result['page'] = pg
result['limit'] = 90
result['total'] = 999999
if tid == 'wangyi':
vdata, pagecount = self.wyccContent(tid, pg, filter, extend, vdata)
elif 'bili' in tid:
vdata, pagecount = self.biliContent(tid, pg, filter, extend, vdata)
elif 'huya' in tid:
vdata, pagecount = self.huyaContent(tid, pg, filter, extend, vdata)
elif 'douyin' in tid:
vdata, pagecount = self.douyinContent(tid, pg, filter, extend, vdata)
elif 'douyu' in tid:
vdata, pagecount = self.douyuContent(tid, pg, filter, extend, vdata)
result['list'] = vdata
result['pagecount'] = pagecount
return result
def wyccContent(self, tid, pg, filter, extend, vdata):
params = {
'format': 'json',
'start': (int(pg) - 1) * 20,
'size': '20',
}
response = self.fetch(f'{self.hosts[tid]}/api/category/live/', params=params, headers=self.headers[0]).json()
for i in response['lives']:
if i.get('cuteid'):
bvdata = self.buildvod(
vod_id=f"{tid}@@{i['cuteid']}",
vod_name=i.get('title'),
vod_pic=i.get('cover'),
vod_remarks=i.get('nickname'),
style={"type": "rect", "ratio": 1.33}
)
vdata.append(bvdata)
return vdata, 9999
def biliContent(self, tid, pg, filter, extend, vdata):
if extend.get('cate') and pg == '1' and 'click' not in tid:
for i in self.blfdata['data']:
if str(i['id']) == extend['cate']:
for j in i['list']:
v = self.buildvod(
vod_id=f"click_{tid}@@{i['id']}@@{j['id']}",
vod_name=j.get('name'),
vod_pic=j.get('pic'),
vod_tag=1,
style={"type": "oval", "ratio": 1}
)
vdata.append(v)
return vdata, 1
else:
path = f'/xlive/web-interface/v1/second/getListByArea?platform=web&sort=online&page_size=30&page={pg}'
if 'click' in tid:
ids = tid.split('_')[1].split('@@')
tid = ids[0]
path = f'/xlive/web-interface/v1/second/getList?platform=web&parent_area_id={ids[1]}&area_id={ids[-1]}&sort_type=&page={pg}'
data = self.fetch(f'{self.hosts[tid][0]}{path}', headers=self.gethr(0, tid)).json()
for i in data['data']['list']:
if i.get('roomid'):
data = self.buildvod(
f"{tid}@@{i['roomid']}",
i.get('title'),
i.get('cover'),
i.get('watched_show', {}).get('text_large'),
0,
i.get('uname'),
style={"type": "rect", "ratio": 1.33}
)
vdata.append(data)
return vdata, 9999
def huyaContent(self, tid, pg, filter, extend, vdata):
if extend.get('cate') and pg == '1' and 'click' not in tid:
id = extend.get('cate')
data = self.fetch(f'{self.referers[tid]}/liveconfig/game/bussLive?bussType={id}',
headers=self.headers[1]).json()
for i in data['data']:
v = self.buildvod(
vod_id=f"click_{tid}@@{int(i['gid'])}",
vod_name=i.get('gameFullName'),
vod_pic=f'https://huyaimg.msstatic.com/cdnimage/game/{int(i["gid"])}-MS.jpg',
vod_tag=1,
style={"type": "oval", "ratio": 1}
)
vdata.append(v)
return vdata, 1
else:
gid = ''
if 'click' in tid:
ids = tid.split('_')[1].split('@@')
tid = ids[0]
gid = f'&gameId={ids[1]}'
data = self.fetch(f'{self.hosts[tid][0]}/cache.php?m=LiveList&do=getLiveListByPage&tagAll=0{gid}&page={pg}',
headers=self.headers[1]).json()
for i in data['data']['datas']:
if i.get('profileRoom'):
v = self.buildvod(
f"{tid}@@{i['profileRoom']}",
i.get('introduction'),
i.get('screenshot'),
str(int(i.get('totalCount', '1')) / 10000) + '',
0,
i.get('nick'),
style={"type": "rect", "ratio": 1.33}
)
vdata.append(v)
return vdata, 9999
def douyinContent(self, tid, pg, filter, extend, vdata):
if extend.get('cate') and pg == '1' and 'click' not in tid:
ids = extend.get('cate').split('@@')
for i in self.dyifdata['categoryData']:
c = i['partition']
if c['id_str'] == ids[0] and c['title'] == ids[1]:
vlist = i['sub_partition'].copy()
vlist.insert(0, {'partition': c})
for j in vlist:
j = j['partition']
v = self.buildvod(
vod_id=f"click_{tid}@@{j['id_str']}@@{j['type']}",
vod_name=j.get('title'),
vod_pic='https://p3-pc-weboff.byteimg.com/tos-cn-i-9r5gewecjs/pwa_v3/512x512-1.png',
vod_tag=1,
style={"type": "oval", "ratio": 1}
)
vdata.append(v)
return vdata, 1
else:
path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition=720&partition_type=1'
if 'click' in tid:
ids = tid.split('_')[1].split('@@')
tid = ids[0]
path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition={ids[1]}&partition_type={ids[-1]}&req_from=2'
data = self.fetch(f'{self.hosts[tid]}{path}', headers=self.dyheaders).json()
for i in data['data']['data']:
v = self.buildvod(
vod_id=f"{tid}@@{i['web_rid']}",
vod_name=i['room'].get('title'),
vod_pic=i['room']['cover'].get('url_list')[0],
vod_year=i.get('user_count_str'),
vod_remarks=i['room']['owner'].get('nickname'),
style={"type": "rect", "ratio": 1.33}
)
vdata.append(v)
return vdata, 9999
def douyuContent(self, tid, pg, filter, extend, vdata):
if extend.get('cate') and pg == '1' and 'click' not in tid:
for i in self.dyufdata['data']['cate2Info']:
if str(i['cate1Id']) == extend['cate']:
v = self.buildvod(
vod_id=f"click_{tid}@@{i['cate2Id']}",
vod_name=i.get('cate2Name'),
vod_pic=i.get('icon'),
vod_remarks=i.get('count'),
vod_tag=1,
style={"type": "oval", "ratio": 1}
)
vdata.append(v)
return vdata, 1
else:
path = f'/japi/weblist/apinc/allpage/6/{pg}'
if 'click' in tid:
ids = tid.split('_')[1].split('@@')
tid = ids[0]
path = f'/gapi/rkc/directory/mixList/2_{ids[1]}/{pg}'
url = f'{self.hosts[tid]}{path}'
data = self.fetch(url, headers=self.headers[1]).json()
for i in data['data']['rl']:
v = self.buildvod(
vod_id=f"{tid}@@{i['rid']}",
vod_name=i.get('rn'),
vod_pic=i.get('rs16'),
vod_year=str(int(i.get('ol', 1)) / 10000) + '',
vod_remarks=i.get('nn'),
style={"type": "rect", "ratio": 1.33}
)
vdata.append(v)
return vdata, 9999
def detailContent(self, ids):
ids = ids[0].split('@@')
if ids[0] == 'wangyi':
vod = self.wyccDetail(ids)
elif ids[0] == 'bili':
vod = self.biliDetail(ids)
elif ids[0] == 'huya':
vod = self.huyaDetail(ids)
elif ids[0] == 'douyin':
vod = self.douyinDetail(ids)
elif ids[0] == 'douyu':
vod = self.douyuDetail(ids)
return {'list': [vod]}
def wyccDetail(self, ids):
try:
vdata = self.getpq(f'{self.hosts[ids[0]]}/{ids[1]}', self.headers[0])('script').eq(-1).text()
def get_quality_name(vbr):
if vbr <= 600:
return "标清"
elif vbr <= 1000:
return "高清"
elif vbr <= 2000:
return "超清"
else:
return "蓝光"
data = json.loads(vdata)['props']['pageProps']['roomInfoInitData']
name = data['live'].get('title', ids[0])
vod = self.buildvod(vod_name=data.get('keywords_suffix'), vod_remarks=data['live'].get('title'),
vod_content=data.get('description_suffix'))
resolution_data = data['live']['quickplay']['resolution']
all_streams = {}
sorted_qualities = sorted(resolution_data.items(),
key=lambda x: x[1]['vbr'],
reverse=True)
for quality, data in sorted_qualities:
vbr = data['vbr']
quality_name = get_quality_name(vbr)
for cdn_name, url in data['cdn'].items():
if cdn_name not in all_streams and type(url) == str and url.startswith('http'):
all_streams[cdn_name] = []
if isinstance(url, str) and url.startswith('http'):
all_streams[cdn_name].extend([quality_name, url])
plists = []
names = []
for i, (cdn_name, stream_list) in enumerate(all_streams.items(), 1):
names.append(f'线路{i}')
pstr = f"{name}${ids[0]}@@{self.e64(json.dumps(stream_list))}"
plists.append(pstr)
vod['vod_play_from'] = "$$$".join(names)
vod['vod_play_url'] = "$$$".join(plists)
return vod
except Exception as e:
return self.handle_exception(e)
def biliDetail(self, ids):
try:
vdata = self.fetch(
f'{self.hosts[ids[0]][0]}/xlive/web-room/v1/index/getInfoByRoom?room_id={ids[1]}&wts={int(time.time())}',
headers=self.gethr(0, ids[0])).json()
v = vdata['data']['room_info']
vod = self.buildvod(
vod_name=v.get('title'),
type_name=v.get('parent_area_name') + '/' + v.get('area_name'),
vod_remarks=v.get('tags'),
vod_play_from=v.get('title'),
)
data = self.fetch(
f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0%2C1&format=0%2C1%2C2&codec=0%2C1&platform=web',
headers=self.gethr(0, ids[0])).json()
vdnams = data['data']['playurl_info']['playurl']['g_qn_desc']
all_accept_qns = []
streams = data['data']['playurl_info']['playurl']['stream']
for stream in streams:
for format_item in stream['format']:
for codec in format_item['codec']:
if 'accept_qn' in codec:
all_accept_qns.append(codec['accept_qn'])
max_accept_qn = max(all_accept_qns, key=len) if all_accept_qns else []
quality_map = {
item['qn']: item['desc']
for item in vdnams
}
quality_names = [f"{quality_map.get(qn)}${ids[0]}@@{ids[1]}@@{qn}" for qn in max_accept_qn]
vod['vod_play_url'] = "#".join(quality_names)
return vod
except Exception as e:
return self.handle_exception(e)
def huyaDetail(self, ids):
try:
vdata = self.fetch(f'{self.hosts[ids[0]][1]}/cache.php?m=Live&do=profileRoom&roomid={ids[1]}',
headers=self.headers[0]).json()
v = vdata['data']['liveData']
vod = self.buildvod(
vod_name=v.get('introduction'),
type_name=v.get('gameFullName'),
vod_director=v.get('nick'),
vod_remarks=v.get('contentIntro'),
)
data = dict(reversed(list(vdata['data']['stream'].items())))
names = []
plist = []
for stream_type, stream_data in data.items():
if isinstance(stream_data, dict) and 'multiLine' in stream_data and 'rateArray' in stream_data:
names.append(f"线路{len(names) + 1}")
qualities = sorted(
stream_data['rateArray'],
key=lambda x: (x['iBitRate'], x['sDisplayName']),
reverse=True
)
cdn_urls = []
for cdn in stream_data['multiLine']:
quality_urls = []
for quality in qualities:
quality_name = quality['sDisplayName']
bit_rate = quality['iBitRate']
base_url = cdn['url']
if bit_rate > 0:
if '.m3u8' in base_url:
new_url = base_url.replace(
'ratio=2000',
f'ratio={bit_rate}'
)
else:
new_url = base_url.replace(
'imgplus.flv',
f'imgplus_{bit_rate}.flv'
)
else:
new_url = base_url
quality_urls.extend([quality_name, new_url])
encoded_urls = self.e64(json.dumps(quality_urls))
cdn_urls.append(f"{cdn['cdnType']}${ids[0]}@@{encoded_urls}")
if cdn_urls:
plist.append('#'.join(cdn_urls))
vod['vod_play_from'] = "$$$".join(names)
vod['vod_play_url'] = "$$$".join(plist)
return vod
except Exception as e:
return self.handle_exception(e)
def douyinDetail(self, ids):
url = f'{self.hosts[ids[0]]}/webcast/room/web/enter/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&enter_from=web_live&web_rid={ids[1]}&room_id_str=&enter_source=&Room-Enter-User-Login-Ab=0&is_need_double_stream=false&cookie_enabled=true&screen_width=1980&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=125.0.0.0'
data = self.fetch(url, headers=self.dyheaders).json()
try:
vdata = data['data']['data'][0]
vod = self.buildvod(
vod_name=vdata['title'],
vod_remarks=vdata['user_count_str'],
)
resolution_data = vdata['stream_url']['live_core_sdk_data']['pull_data']['options']['qualities']
stream_json = vdata['stream_url']['live_core_sdk_data']['pull_data']['stream_data']
stream_json = json.loads(stream_json)
available_types = []
if any(sdk_key in stream_json['data'] and 'main' in stream_json['data'][sdk_key] for sdk_key in
stream_json['data']):
available_types.append('main')
if any(sdk_key in stream_json['data'] and 'backup' in stream_json['data'][sdk_key] for sdk_key in
stream_json['data']):
available_types.append('backup')
plist = []
for line_type in available_types:
format_arrays = {'flv': [], 'hls': [], 'lls': []}
qualities = sorted(resolution_data, key=lambda x: x['level'], reverse=True)
for quality in qualities:
sdk_key = quality['sdk_key']
if sdk_key in stream_json['data'] and line_type in stream_json['data'][sdk_key]:
stream_info = stream_json['data'][sdk_key][line_type]
if stream_info.get('flv'):
format_arrays['flv'].extend([quality['name'], stream_info['flv']])
if stream_info.get('hls'):
format_arrays['hls'].extend([quality['name'], stream_info['hls']])
if stream_info.get('lls'):
format_arrays['lls'].extend([quality['name'], stream_info['lls']])
format_urls = []
for format_name, url_array in format_arrays.items():
if url_array:
encoded_urls = self.e64(json.dumps(url_array))
format_urls.append(f"{format_name}${ids[0]}@@{encoded_urls}")
if format_urls:
plist.append('#'.join(format_urls))
names = ['线路1', '线路2'][:len(plist)]
vod['vod_play_from'] = "$$$".join(names)
vod['vod_play_url'] = "$$$".join(plist)
return vod
except Exception as e:
return self.handle_exception(e)
def douyuDetail(self, ids):
headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{ids[1]}')
try:
data = self.fetch(f'{self.hosts[ids[0]]}/betard/{ids[1]}', headers=headers).json()
vname = data['room']['room_name']
vod = self.buildvod(
vod_name=vname,
vod_remarks=data['room'].get('second_lvl_name'),
vod_director=data['room'].get('nickname'),
)
vdata = self.fetch(f'{self.hosts[ids[0]]}/swf_api/homeH5Enc?rids={ids[1]}', headers=headers).json()
json_body = vdata['data']
json_body = {"html": self.douyu_text(json_body[f'room{ids[1]}']), "rid": ids[1]}
sign = self.post('http://alive.nsapps.cn/api/AllLive/DouyuSign', json=json_body, headers=self.headers[1]).json()['data']
body = f'{sign}&cdn=&rate=-1&ver=Douyu_223061205&iar=1&ive=1&hevc=0&fa=0'
body=self.params_to_json(body)
nubdata = self.post(f'{self.hosts[ids[0]]}/lapi/live/getH5Play/{ids[1]}', data=body, headers=headers).json()
plist = []
names = []
for i,x in enumerate(nubdata['data']['cdnsWithName']):
names.append(f'线路{i+1}')
d = {'sign': sign, 'cdn': x['cdn'], 'id': ids[1]}
plist.append(
f'{vname}${ids[0]}@@{self.e64(json.dumps(d))}@@{self.e64(json.dumps(nubdata["data"]["multirates"]))}')
vod['vod_play_from'] = "$$$".join(names)
vod['vod_play_url'] = "$$$".join(plist)
return vod
except Exception as e:
return self.handle_exception(e)
def douyu_text(self, text):
function_positions = [m.start() for m in re.finditer('function', text)]
total_functions = len(function_positions)
if total_functions % 2 == 0:
target_index = total_functions // 2 + 1
else:
target_index = (total_functions - 1) // 2 + 1
if total_functions >= target_index:
cut_position = function_positions[target_index - 1]
ctext = text[4:cut_position]
return re.sub(r'eval\(strc\)\([\w\d,]+\)', 'strc', ctext)
return text
def searchContent(self, key, quick, pg="1"):
pass
def playerContent(self, flag, id, vipFlags):
try:
ids = id.split('@@')
p = 1
if ids[0] in ['wangyi', 'douyin','huya']:
p, url = 0, json.loads(self.d64(ids[1]))
elif ids[0] == 'bili':
p, url = self.biliplay(ids)
elif ids[0] == 'huya':
p, url = 0, json.loads(self.d64(ids[1]))
elif ids[0] == 'douyu':
p, url = self.douyuplay(ids)
return {'parse': p, 'url': url, 'header': self.playheaders[ids[0]]}
except Exception as e:
return {'parse': 1, 'url': self.excepturl, 'header': self.headers[0]}
def biliplay(self, ids):
try:
data = self.fetch(
f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0,1&format=0,2&codec=0&platform=web&qn={ids[2]}',
headers=self.gethr(0, ids[0])).json()
urls = []
line_index = 1
for stream in data['data']['playurl_info']['playurl']['stream']:
for format_item in stream['format']:
for codec in format_item['codec']:
for url_info in codec['url_info']:
full_url = f"{url_info['host']}/{codec['base_url'].lstrip('/')}{url_info['extra']}"
urls.extend([f"线路{line_index}", full_url])
line_index += 1
return 0, urls
except Exception as e:
return 1, self.excepturl
def douyuplay(self, ids):
try:
sdata = json.loads(self.d64(ids[1]))
headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{sdata["id"]}')
ldata = json.loads(self.d64(ids[2]))
result_obj = {}
with ThreadPoolExecutor(max_workers=len(ldata)) as executor:
futures = [
executor.submit(
self.douyufp,
sdata,
quality,
headers,
self.hosts[ids[0]],
result_obj
) for quality in ldata
]
for future in futures:
future.result()
result = []
for bit in sorted(result_obj.keys(), reverse=True):
result.extend(result_obj[bit])
if result:
return 0, result
return 1, self.excepturl
except Exception as e:
return 1, self.excepturl
def douyufp(self, sdata, quality, headers, host, result_obj):
try:
body = f'{sdata["sign"]}&cdn={sdata["cdn"]}&rate={quality["rate"]}'
body=self.params_to_json(body)
data = self.post(f'{host}/lapi/live/getH5Play/{sdata["id"]}',
data=body, headers=headers).json()
if data.get('data'):
play_url = data['data']['rtmp_url'] + '/' + data['data']['rtmp_live']
bit = quality.get('bit', 0)
if bit not in result_obj:
result_obj[bit] = []
result_obj[bit].extend([quality['name'], play_url])
except Exception as e:
print(f"Error fetching {quality['name']}: {str(e)}")
def localProxy(self, param):
pass
def e64(self, text):
try:
text_bytes = text.encode('utf-8')
encoded_bytes = b64encode(text_bytes)
return encoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64编码错误: {str(e)}")
return ""
def d64(self, encoded_text):
try:
encoded_bytes = encoded_text.encode('utf-8')
decoded_bytes = b64decode(encoded_bytes)
return decoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64解码错误: {str(e)}")
return ""
def josn_to_params(self, params, skip_empty=False):
query = []
for k, v in params.items():
if skip_empty and not v:
continue
query.append(f"{k}={v}")
return "&".join(query)
def params_to_json(self, query_string):
parsed_data = parse_qs(query_string)
result = {key: value[0] for key, value in parsed_data.items()}
return result
def buildvod(self, vod_id='', vod_name='', vod_pic='', vod_year='', vod_tag='', vod_remarks='', style='',
type_name='', vod_area='', vod_actor='', vod_director='',
vod_content='', vod_play_from='', vod_play_url=''):
vod = {
'vod_id': vod_id,
'vod_name': vod_name,
'vod_pic': vod_pic,
'vod_year': vod_year,
'vod_tag': 'folder' if vod_tag else '',
'vod_remarks': vod_remarks,
'style': style,
'type_name': type_name,
'vod_area': vod_area,
'vod_actor': vod_actor,
'vod_director': vod_director,
'vod_content': vod_content,
'vod_play_from': vod_play_from,
'vod_play_url': vod_play_url
}
vod = {key: value for key, value in vod.items() if value}
return vod
def getpq(self, url, headers=None, cookies=None):
data = self.fetch(url, headers=headers, cookies=cookies).text
try:
return pq(data)
except Exception as e:
print(f"解析页面错误: {str(e)}")
return pq(data.encode('utf-8'))
def gethr(self, index, rf='', zr=''):
headers = self.headers[index]
if zr:
headers['referer'] = zr
else:
headers['referer'] = f"{self.referers[rf]}/"
return headers
def handle_exception(self, e):
print(f"报错: {str(e)}")
return {'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'翻车啦${self.excepturl}'}

323
PY/腾讯视频.py Normal file
View File

@ -0,0 +1,323 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import sys
import uuid
import copy
sys.path.append('..')
from base.spider import Spider
from concurrent.futures import ThreadPoolExecutor, as_completed
class Spider(Spider):
def init(self, extend=""):
self.dbody = {
"page_params": {
"channel_id": "",
"filter_params": "sort=75",
"page_type": "channel_operation",
"page_id": "channel_list_second_page"
}
}
self.body = self.dbody
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
host = 'https://v.qq.com'
apihost = 'https://pbaccess.video.qq.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
'origin': host,
'referer': f'{host}/'
}
def homeContent(self, filter):
cdata = {
"电视剧": "100113",
"电影": "100173",
"综艺": "100109",
"纪录片": "100105",
"动漫": "100119",
"少儿": "100150",
"短剧": "110755"
}
result = {}
classes = []
filters = {}
for k in cdata:
classes.append({
'type_name': k,
'type_id': cdata[k]
})
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
futures = [executor.submit(self.get_filter_data, item['type_id']) for item in classes]
for future in futures:
cid, data = future.result()
if not data.get('data', {}).get('module_list_datas'):
continue
filter_dict = {}
try:
items = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
for item in items:
if not item.get('item_params', {}).get('index_item_key'):
continue
params = item['item_params']
filter_key = params['index_item_key']
if filter_key not in filter_dict:
filter_dict[filter_key] = {
'key': filter_key,
'name': params['index_name'],
'value': []
}
filter_dict[filter_key]['value'].append({
'n': params['option_name'],
'v': params['option_value']
})
except (IndexError, KeyError):
continue
filters[cid] = list(filter_dict.values())
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
json_data = {'page_context':None,'page_params':{'page_id':'100101','page_type':'channel','skip_privacy_types':'0','support_click_scan':'1','new_mark_label_enabled':'1','ams_cookies':'',},'page_bypass_params':{'params':{'caller_id':'','data_mode':'default','page_id':'','page_type':'channel','platform_id':'2','user_mode':'default',},'scene':'channel','abtest_bypass_id':'',}}
data = self.post(f'{self.apihost}/trpc.vector_layout.page_view.PageService/getPage',headers=self.headers, json=json_data).json()
vlist = []
for it in data['data']['CardList'][0]['children_list']['list']['cards']:
if it.get('params'):
p = it['params']
tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
id = it.get('id') or p.get('cid')
name = p.get('mz_title') or p.get('title')
if name and 'http' not in id:
vlist.append({
'vod_id': id,
'vod_name': name,
'vod_pic': p.get('image_url'),
'vod_year': tag.get('tag_2', {}).get('text'),
'vod_remarks': tag.get('tag_4', {}).get('text')
})
return {'list': vlist}
def categoryContent(self, tid, pg, filter, extend):
result = {}
params = {
"sort": extend.get('sort', '75'),
"attr": extend.get('attr', '-1'),
"itype": extend.get('itype', '-1'),
"ipay": extend.get('ipay', '-1'),
"iarea": extend.get('iarea', '-1'),
"iyear": extend.get('iyear', '-1'),
"theater": extend.get('theater', '-1'),
"award": extend.get('award', '-1'),
"recommend": extend.get('recommend', '-1')
}
if pg == '1':
self.body = self.dbody.copy()
self.body['page_params']['channel_id'] = tid
self.body['page_params']['filter_params'] = self.josn_to_params(params)
data = self.post(
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
json=self.body, headers=self.headers).json()
ndata = data['data']
if ndata['has_next_page']:
result['pagecount'] = 9999
self.body['page_context'] = ndata['next_page_context']
else:
result['pagecount'] = int(pg)
vlist = []
for its in ndata['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']:
id = its.get('item_params', {}).get('cid')
if id:
p = its['item_params']
tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
name = p.get('mz_title') or p.get('title')
pic = p.get('new_pic_hz') or p.get('new_pic_vt')
vlist.append({
'vod_id': id,
'vod_name': name,
'vod_pic': pic,
'vod_year': tag.get('tag_2', {}).get('text'),
'vod_remarks': tag.get('tag_4', {}).get('text')
})
result['list'] = vlist
result['page'] = pg
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
vbody = {"page_params":{"req_from":"web","cid":ids[0],"vid":"","lid":"","page_type":"detail_operation","page_id":"detail_page_introduction"},"has_cache":1}
body = {"page_params":{"req_from":"web_vsite","page_id":"vsite_episode_list","page_type":"detail_operation","id_type":"1","page_size":"","cid":ids[0],"vid":"","lid":"","page_num":"","page_context":"","detail_page_type":"1"},"has_cache":1}
with ThreadPoolExecutor(max_workers=2) as executor:
future_detail = executor.submit(self.get_vdata, vbody)
future_episodes = executor.submit(self.get_vdata, body)
vdata = future_detail.result()
data = future_episodes.result()
pdata = self.process_tabs(data, body, ids)
if not pdata:
return self.handle_exception(None, "No pdata available")
try:
star_list = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][
0].get('sub_items', {}).get('star_list', {}).get('item_datas', [])
actors = [star['item_params']['name'] for star in star_list]
names = ['腾讯视频', '预告片']
plist, ylist = self.process_pdata(pdata, ids)
if not plist:
del names[0]
if not ylist:
del names[1]
vod = self.build_vod(vdata, actors, plist, ylist, names)
return {'list': [vod]}
except Exception as e:
return self.handle_exception(e, "Error processing detail")
def searchContent(self, key, quick, pg="1"):
headers = self.headers.copy()
headers.update({'Content-Type': 'application/json'})
body = {'version':'25021101','clientType':1,'filterValue':'','uuid':str(uuid.uuid4()),'retry':0,'query':key,'pagenum':int(pg)-1,'pagesize':30,'queryFrom':0,'searchDatakey':'','transInfo':'','isneedQc':True,'preQid':'','adClientInfo':'','extraInfo':{'isNewMarkLabel':'1','multi_terminal_pc':'1','themeType':'1',},}
data = self.post(f'{self.apihost}/trpc.videosearch.mobile_search.MultiTerminalSearch/MbSearch?vplatform=2',
json=body, headers=headers).json()
vlist = []
vname=["电视剧", "电影", "综艺", "纪录片", "动漫", "少儿", "短剧"]
v=data['data']['normalList']['itemList']
d=data['data']['areaBoxList'][0]['itemList']
q=v+d
if v[0].get('doc') and v[0]['doc'].get('id') =='MainNeed':q=d+v
for k in q:
if k.get('doc') and k.get('videoInfo') and k['doc'].get('id') and '外站' not in k['videoInfo'].get('subTitle') and k['videoInfo'].get('title') and k['videoInfo'].get('typeName') in vname:
img_tag = k.get('videoInfo', {}).get('imgTag')
if img_tag is not None and isinstance(img_tag, str):
try:
tag = json.loads(img_tag)
except json.JSONDecodeError as e:
tag = {}
else:
tag = {}
pic = k.get('videoInfo', {}).get('imgUrl')
vlist.append({
'vod_id': k['doc']['id'],
'vod_name': self.removeHtmlTags(k['videoInfo']['title']),
'vod_pic': pic,
'vod_year': k['videoInfo'].get('typeName') +' '+ tag.get('tag_2', {}).get('text', ''),
'vod_remarks': tag.get('tag_4', {}).get('text', '')
})
return {'list': vlist, 'page': pg}
def playerContent(self, flag, id, vipFlags):
ids = id.split('@')
url = f"{self.host}/x/cover/{ids[0]}/{ids[1]}.html"
return {'jx':1,'parse': 1, 'url': url, 'header': ''}
def localProxy(self, param):
pass
def get_filter_data(self, cid):
hbody = self.dbody.copy()
hbody['page_params']['channel_id'] = cid
data = self.post(
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
json=hbody, headers=self.headers).json()
return cid, data
def get_vdata(self, body):
try:
vdata = self.post(
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=3000010&vplatform=2&vversion_name=8.2.96',
json=body, headers=self.headers
).json()
return vdata
except Exception as e:
print(f"Error in get_vdata: {str(e)}")
return {'data': {'module_list_datas': []}}
def process_pdata(self, pdata, ids):
plist = []
ylist = []
for k in pdata:
if k.get('item_id'):
pid = f"{k['item_params']['union_title']}${ids[0]}@{k['item_id']}"
if '预告' in k['item_params']['union_title']:
ylist.append(pid)
else:
plist.append(pid)
return plist, ylist
def build_vod(self, vdata, actors, plist, ylist, names):
d = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][0]['item_params']
urls = []
if plist:
urls.append('#'.join(plist))
if ylist:
urls.append('#'.join(ylist))
vod = {
'type_name': d.get('sub_genre', ''),
'vod_name': d.get('title', ''),
'vod_year': d.get('year', ''),
'vod_area': d.get('area_name', ''),
'vod_remarks': d.get('holly_online_time', '') or d.get('hotval', ''),
'vod_actor': ','.join(actors),
'vod_content': d.get('cover_description', ''),
'vod_play_from': '$$$'.join(names),
'vod_play_url': '$$$'.join(urls)
}
return vod
def handle_exception(self, e, message):
print(f"{message}: {str(e)}")
return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': '翻车啦#555'}]}
def process_tabs(self, data, body, ids):
try:
pdata = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
tabs = data['data']['module_list_datas'][-1]['module_datas'][-1]['module_params'].get('tabs')
if tabs and len(json.loads(tabs)):
tabs = json.loads(tabs)
remaining_tabs = tabs[1:]
task_queue = []
for tab in remaining_tabs:
nbody = copy.deepcopy(body)
nbody['page_params']['page_context'] = tab['page_context']
task_queue.append(nbody)
with ThreadPoolExecutor(max_workers=10) as executor:
future_map = {executor.submit(self.get_vdata, task): idx for idx, task in enumerate(task_queue)}
results = [None] * len(task_queue)
for future in as_completed(future_map.keys()):
idx = future_map[future]
results[idx] = future.result()
for result in results:
if result:
page_data = result['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists'][
'item_datas']
pdata.extend(page_data)
return pdata
except Exception as e:
print(f"Error processing episodes: {str(e)}")
return []
def josn_to_params(self, params, skip_empty=False):
query = []
for k, v in params.items():
if skip_empty and not v:
continue
query.append(f"{k}={v}")
return "&".join(query)

205
PY/芒果视频.py Normal file
View File

@ -0,0 +1,205 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import sys
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
rhost='https://www.mgtv.com'
host='https://pianku.api.mgtv.com'
vhost='https://pcweb.api.mgtv.com'
mhost='https://dc.bz.mgtv.com'
shost='https://mobileso.bz.mgtv.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
'origin': rhost,
'referer': f'{rhost}/'
}
def homeContent(self, filter):
result = {}
cateManual = {
"电影": "3",
"电视剧": "2",
"综艺": "1",
"动画": "50",
"少儿": "10",
"纪录片": "51",
"教育": "115"
}
classes = []
filters = {}
for k in cateManual:
classes.append({
'type_name': k,
'type_id': cateManual[k]
})
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
results = executor.map(self.getf, classes)
for id, ft in results:
if len(ft):filters[id] = ft
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
data=self.fetch(f'{self.mhost}/dynamic/v1/channel/index/0/0/0/1000000/0/0/17/1354?type=17&version=5.0&t={str(int(time.time()*1000))}&_support=10000000', headers=self.headers).json()
videoList = []
for i in data['data']:
if i.get('DSLList') and len(i['DSLList']):
for j in i['DSLList']:
if j.get('data') and j['data'].get('items') and len(j['data']['items']):
for k in j['data']['items']:
videoList.append({
'vod_id': k["videoId"],
'vod_name': k['videoName'],
'vod_pic': k['img'],
'vod_year': k.get('cornerTitle'),
'vod_remarks': k.get('time') or k.get('desc'),
})
return {'list':videoList}
def categoryContent(self, tid, pg, filter, extend):
body={
'allowedRC': '1',
'platform': 'pcweb',
'channelId': tid,
'pn': pg,
'pc': '80',
'hudong': '1',
'_support': '10000000'
}
body.update(extend)
data=self.fetch(f'{self.host}/rider/list/pcweb/v3', params=body, headers=self.headers).json()
videoList = []
for i in data['data']['hitDocs']:
videoList.append({
'vod_id': i["playPartId"],
'vod_name': i['title'],
'vod_pic': i['img'],
'vod_year': (i.get('rightCorner',{}) or {}).get('text') or i.get('year'),
'vod_remarks': i['updateInfo']
})
result = {}
result['list'] = videoList
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
vbody={'allowedRC': '1', 'vid': ids[0], 'type': 'b', '_support': '10000000'}
vdata=self.fetch(f'{self.vhost}/video/info', params=vbody, headers=self.headers).json()
d=vdata['data']['info']['detail']
vod = {
'vod_name': vdata['data']['info']['title'],
'type_name': d.get('kind'),
'vod_year': d.get('releaseTime'),
'vod_area': d.get('area'),
'vod_lang': d.get('language'),
'vod_remarks': d.get('updateInfo'),
'vod_actor': d.get('leader'),
'vod_director': d.get('director'),
'vod_content': d.get('story'),
'vod_play_from': '芒果TV',
'vod_play_url': ''
}
data,pdata=self.fetch_page_data('1', ids[0],True)
pagecount=data['data'].get('total_page') or 1
if int(pagecount)>1:
pages = list(range(2, pagecount+1))
page_results = {}
with ThreadPoolExecutor(max_workers=10) as executor:
future_to_page = {
executor.submit(self.fetch_page_data, page, ids[0]): page
for page in pages
}
for future in as_completed(future_to_page):
page = future_to_page[future]
try:
result = future.result()
page_results[page] = result
except Exception as e:
print(f"Error fetching page {page}: {e}")
for page in sorted(page_results.keys()):
pdata.extend(page_results[page])
vod['vod_play_url'] = '#'.join(pdata)
return {'list':[vod]}
def searchContent(self, key, quick, pg="1"):
data=self.fetch(f'{self.shost}/applet/search/v1?channelCode=mobile-wxap&q={key}&pn={pg}&pc=10&_support=10000000', headers=self.headers).json()
videoList = []
for i in data['data']['contents']:
if i.get('data') and len(i['data']):
k = i['data'][0]
if k.get('vid') and k.get('img'):
try:
videoList.append({
'vod_id': k['vid'],
'vod_name': k['title'],
'vod_pic': k['img'],
'vod_year': (i.get('rightTopCorner',{}) or {}).get('text') or i.get('year'),
'vod_remarks': '/'.join(i.get('desc',[])),
})
except:
print(k)
return {'list':videoList,'page':pg}
def playerContent(self, flag, id, vipFlags):
id=f'{self.rhost}{id}'
return {'jx':1,'parse': 1, 'url': id, 'header': ''}
def localProxy(self, param):
pass
def getf(self, body):
params = {
'allowedRC': '1',
'channelId': body['type_id'],
'platform': 'pcweb',
'_support': '10000000',
}
data = self.fetch(f'{self.host}/rider/config/channel/v1', params=params, headers=self.headers).json()
ft = []
for i in data['data']['listItems']:
try:
value_array = [{"n": value['tagName'], "v": value['tagId']} for value in i['items'] if
value.get('tagName')]
ft.append({"key": i['eName'], "name": i['typeName'], "value": value_array})
except:
print(i)
return body['type_id'], ft
def fetch_page_data(self, page, id, b=False):
body = {'version': '5.5.35', 'video_id': id, 'page': page, 'size': '30',
'platform': '4', 'src': 'mgtv', 'allowedRC': '1', '_support': '10000000'}
data = self.fetch(f'{self.vhost}/episode/list', params=body, headers=self.headers).json()
ldata = [f'{i["t3"]}${i["url"]}' for i in data['data']['list']]
if b:
return data, ldata
else:
return ldata

225
PY/金牌影视.py Normal file
View File

@ -0,0 +1,225 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import sys
import threading
import uuid
import requests
sys.path.append('..')
from base.spider import Spider
import time
from Crypto.Hash import MD5, SHA1
class Spider(Spider):
'''
配置示例
{
"key": "xxxx",
"name": "xxxx",
"type": 3,
"api": ".所在路径/金牌.py",
"searchable": 1,
"quickSearch": 1,
"filterable": 1,
"changeable": 1,
"ext": {
"site": "https://www.jiabaide.cn,域名2,域名3"
}
},
'''
def init(self, extend=""):
if extend:
hosts=json.loads(extend)['site']
self.host = self.host_late(hosts)
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
def homeContent(self, filter):
cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
result = {}
classes = []
filters={}
for k in cdata['data']:
classes.append({
'type_name': k['typeName'],
'type_id': str(k['typeId']),
})
sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
for tid, d in fdata['data'].items():
current_sort_values = sort_values.copy()
if tid == '1':
del current_sort_values[0]
filters[tid] = [
{"key": "type", "name": "类型",
"value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
*([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
"value": [{"n": i["itemText"], "v": i["itemText"]}
for i in d["plotList"]]}]),
{"key": "area", "name": "地区",
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
{"key": "year", "name": "年份",
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
{"key": "lang", "name": "语言",
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
{"key": "sort", "name": "排序", "value": current_sort_values}
]
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
data=[]
for i in data1['data'].values():
data.extend(i['list'])
data.extend(data2['data'])
vods=self.getvod(data)
return {'list':vods}
def categoryContent(self, tid, pg, filter, extend):
params = {
"area": extend.get('area', ''),
"filterStatus": "1",
"lang": extend.get('lang', ''),
"pageNum": pg,
"pageSize": "30",
"sort": extend.get('sort', '1'),
"sortBy": "1",
"type": extend.get('type', ''),
"type1": tid,
"v_class": extend.get('v_class', ''),
"year": extend.get('year', '')
}
data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
result = {}
result['list'] = self.getvod(data['data']['list'])
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
vod=self.getvod([data['data']])[0]
vod['vod_play_from']='金牌'
vod['vod_play_url'] = '#'.join(
f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
vod['episodelist'])
vod.pop('episodelist', None)
return {'list':[vod]}
def searchContent(self, key, quick, pg="1"):
params = {
"keyword": key,
"pageNum": pg,
"pageSize": "8",
"sourceCode": "1"
}
data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
vods=self.getvod(data['data']['result']['list'])
return {'list':vods,'page':pg}
def playerContent(self, flag, id, vipFlags):
self.header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
'sec-ch-ua-platform': '"Windows"',
'DNT': '1',
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
'sec-ch-ua-mobile': '?0',
'Origin': self.host,
'Referer': f'{self.host}/'
}
ids=id.split('@@')
pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
vlist=[]
for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
return {'parse':0,'url':vlist,'header':self.header}
def localProxy(self, param):
pass
def host_late(self, url_list):
if isinstance(url_list, str):
urls = [u.strip() for u in url_list.split(',')]
else:
urls = url_list
if len(urls) <= 1:
return urls[0] if urls else ''
results = {}
threads = []
def test_host(url):
try:
start_time = time.time()
response = requests.head(url, timeout=1.0, allow_redirects=False)
delay = (time.time() - start_time) * 1000
results[url] = delay
except Exception as e:
results[url] = float('inf')
for url in urls:
t = threading.Thread(target=test_host, args=(url,))
threads.append(t)
t.start()
for t in threads:
t.join()
return min(results.items(), key=lambda x: x[1])[0]
def md5(self, sign_key):
md5_hash = MD5.new()
md5_hash.update(sign_key.encode('utf-8'))
md5_result = md5_hash.hexdigest()
return md5_result
def js(self, param):
return '&'.join(f"{k}={v}" for k, v in param.items())
def getheaders(self, param=None):
if param is None:param = {}
t=str(int(time.time()*1000))
param['key']='cb808529bae6b6be45ecfab29a4889bc'
param['t']=t
sha1_hash = SHA1.new()
sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
sign = sha1_hash.hexdigest()
deviceid = str(uuid.uuid4())
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
'Accept': 'application/json, text/plain, */*',
'sign': sign,
't': t,
'deviceid':deviceid
}
return headers
def convert_field_name(self, field):
field = field.lower()
if field.startswith('vod') and len(field) > 3:
field = field.replace('vod', 'vod_')
if field.startswith('type') and len(field) > 4:
field = field.replace('type', 'type_')
return field
def getvod(self, array):
return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]

View File

@ -1,35 +1,15 @@
{
"站名": "修罗影视",
"请求头": "User-Agent@Mozilla/5.0 (Linux Android 12 PEHM00 Build/SKQ1.210216.001 wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/130.0.6723.108 Mobile Safari/537.36",
"编码": "UTF-8",
"图片代理": "0",
"直接播放": "0",
"主页url": "http://v.xlys.ltd.ua",
"分类url": "http://v.xlys.ltd.ua/s/{cateId}/{catePg}",
"分类": "动作$dongzuo#爱情$aiqing#喜剧$xiju#科幻$kehuan#恐怖$kongbu#战争$zhanzheng#武侠$wuxia#魔幻$mohuan#剧情$juqing#动画$donghua#惊悚$jingsong#灾难$zainan#悬疑$xuanyi#警匪$jingfei#文艺$wenyi#青春$qingchun#冒险$maoxian#犯罪$fanzui#纪录$jilu#古装$guzhuang#奇幻$qihuan#国语$guoyu#综艺$zongyi#历史$lishi#运动$yundong#原创压制$yuanchuang#美剧$meiju#韩剧$hanju#国产电视剧$guoju#日剧$riju#英剧$yingju#德剧$deju#俄剧$eju#巴剧$baju#加剧$jiaju#西剧$spanish#意大利剧$yidaliju#泰剧$taiju#港台剧$gangtaiju#法剧$faju#澳剧$aoju",
"数组": "card-link&&/h3>",
"标题": "<h3*\">&&<",
"数组": "card card-sm card-link&&</div>",
"图片": "src=\"&&\"",
"副标题": "red-fg\">&&<",
"标题": "mb-0 card-title text-truncate\">&&<",
"副标题": "start-0 text-red-fg\">&&</span>",
"链接": "href=\"&&\"",
"影片年代": "上映日期:&&</p>",
"影片地区": "地区:&&</p>",
"影片类型": "类型:&&</p>",
"状态": "豆瓣 :&&</div>",
"导演": "导演:&&</p>",
"主演": "主演:&&</p",
"简介": "剧情简介:&&>",
"线路数组": "download-list\">&&</div>",
"线路标题": "磁力",
"播放二次截取": "",
"播放数组": "<td>&&</a></td>",
"倒序": "0",
"播放列表": "<a&&/a>",
"播放标题": ">&&<",
"播放链接": "",
"解析": "",
"跳转播放链接": "",
"跳转解析": "",
"搜索请求头": "User-Agent$MOBILE_UA",
"搜索url": "http://v.xlys.ltd.ua/search/{wd}/{pg}"
"简介": "剧情简介:&&\"",
"线路标题": "磁力+>&&<",
"播放数组": "download-list&&</tbody>",
"播放列表": "<tr&&</tr>",
"播放标题": "text-muted\">&&</td>[不包含:网盘下载]",
"跳转播放链接": "href=\"&&\"",
"分类url": "https://v.xlys.ltd.ua/s/all/{catePg}?type={cateId};;d0",
"分类": "电影$0#电视剧$1"
}

1369
api.json

File diff suppressed because it is too large Load Diff

Binary file not shown.

BIN
wex.jar

Binary file not shown.