commit 3572e29279890d0dd5bf154970f9b6ebbb2b57a2
Author: Wang.Luo <1593775941@qq.com>
Date: Thu Oct 23 02:14:43 2025 +0800
```
feat(PyramidStore): 初始化项目并添加基础配置文件
添加 .gitignore 忽略子仓库的 .git 目录
添加 LICENSE 文件,使用 GNU General Public License v3.0
添加 README.md 说明文档,包含调试示例、免责声明和配置说明
添加 base/localProxy.py 基础代理配置文件
添加版本控制图片文件(二进制差异)
```
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..eda8184
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+
+# 忽略子仓库的.git目录
+PyramidStore/.git/
\ No newline at end of file
diff --git a/.版本.png b/.版本.png
new file mode 100644
index 0000000..50c456e
Binary files /dev/null and b/.版本.png differ
diff --git a/PyramidStore/LICENSE b/PyramidStore/LICENSE
new file mode 100644
index 0000000..f288702
--- /dev/null
+++ b/PyramidStore/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/PyramidStore/README.md b/PyramidStore/README.md
new file mode 100644
index 0000000..7941424
--- /dev/null
+++ b/PyramidStore/README.md
@@ -0,0 +1,29 @@
+# Pyramid
+
+基于[PyramidStore](https://github.com/UndCover/PyramidStore),用于支持影视及其衍生app使用python爬虫作为数据源,[原版Pyramid源码地址](https://github.com/UndCover/Pyramid)
+
+## 调试示例
+
+参考 [小白调试示例.py](https://github.com/JJBJJ/PyramidStore/tree/main/plugin/小白调试示例.py)
+
+## 免责声明
+
+本项目仅供爬虫技术学习交流使用,所有代码开源且免费,严禁任何商业用途,搜索结果均来自源站,本人不承担任何责任
+
+## 食用方法
+
+推荐:ok影视
+
+开袋即食:不需要挂载任何jar!不需要挂载任何jar!不需要挂载任何jar!!!
+**一定要开启存储权限!一定要开启存储权限!一定要开启存储权限!**
+
+## 配置示例
+
+配置文件sites添加内容参考 [example.json](https://github.com/JJBJJ/PyramidStore/blob/main/example.json)
+
+### [Python爬虫写法参考](https://github.com/JJBJJ/PyramidStore/blob/main/spider.md)
+
+### [影视版源码地址](https://github.com/FongMi/TV/tree/release/chaquo)
+
+### 问题反馈
+问题请反馈到[telegram](https://t.me/+A3SLQRmPVi9kOThl)
diff --git a/PyramidStore/base/localProxy.py b/PyramidStore/base/localProxy.py
new file mode 100644
index 0000000..fc7eb02
--- /dev/null
+++ b/PyramidStore/base/localProxy.py
@@ -0,0 +1,6 @@
+class Proxy:
+ def getUrl(self, local):
+ return 'http://127.0.0.1:9978'
+
+ def getPort(self):
+ return 9978
\ No newline at end of file
diff --git a/PyramidStore/base/spider.py b/PyramidStore/base/spider.py
new file mode 100644
index 0000000..80ab9d5
--- /dev/null
+++ b/PyramidStore/base/spider.py
@@ -0,0 +1,151 @@
+import re
+import os
+import json
+import time
+import requests
+from lxml import etree
+from abc import abstractmethod, ABCMeta
+from importlib.machinery import SourceFileLoader
+from base.localProxy import Proxy
+
+class Spider(metaclass=ABCMeta):
+ _instance = None
+
+ def __init__(self):
+ self.extend = ''
+
+ def __new__(cls, *args, **kwargs):
+ if cls._instance:
+ return cls._instance
+ else:
+ cls._instance = super().__new__(cls)
+ return cls._instance
+
+ @abstractmethod
+ def init(self, extend=""):
+ pass
+
+ def homeContent(self, filter):
+ pass
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ pass
+
+ def detailContent(self, ids):
+ pass
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def localProxy(self, param):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def getName(self):
+ pass
+
+ def getDependence(self):
+ return []
+
+ def loadSpider(self, name):
+ return self.loadModule(name).Spider()
+
+ def loadModule(self, name):
+ path = os.path.join(os.path.join("../plugin"), f'{name}.py')
+ return SourceFileLoader(name, path).load_module()
+
+ def regStr(self, reg, src, group=1):
+ m = re.search(reg, src)
+ src = ''
+ if m:
+ src = m.group(group)
+ return src
+
+ def removeHtmlTags(self, src):
+ clean = re.compile('<.*?>')
+ return re.sub(clean, '', src)
+
+ def cleanText(self, src):
+ clean = re.sub('[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F1E0-\U0001F1FF]', '',
+ src)
+ return clean
+
+ def fetch(self, url, params=None, cookies=None, headers=None, timeout=5, verify=True, stream=False,
+ allow_redirects=True):
+ rsp = requests.get(url, params=params, cookies=cookies, headers=headers, timeout=timeout, verify=verify,
+ stream=stream, allow_redirects=allow_redirects)
+ rsp.encoding = 'utf-8'
+ return rsp
+
+ def post(self, url, params=None, data=None, json=None, cookies=None, headers=None, timeout=5, verify=True,
+ stream=False, allow_redirects=True):
+ rsp = requests.post(url, params=params, data=data, json=json, cookies=cookies, headers=headers, timeout=timeout,
+ verify=verify, stream=stream, allow_redirects=allow_redirects)
+ rsp.encoding = 'utf-8'
+ return rsp
+
+ def html(self, content):
+ return etree.HTML(content)
+
+ def str2json(str):
+ return json.loads(str)
+
+ def json2str(str):
+ return json.dumps(str, ensure_ascii=False)
+
+ def getProxyUrl(self, local=True):
+ return f'{Proxy.getUrl(local)}?do=py'
+
+ def log(self, msg):
+ if isinstance(msg, dict) or isinstance(msg, list):
+ print(json.dumps(msg, ensure_ascii=False))
+ else:
+ print(f'{msg}')
+
+ def getCache(self, key):
+ value = self.fetch(f'http://127.0.0.1:{Proxy.getPort()}/cache?do=get&key={key}', timeout=5).text
+ if len(value) > 0:
+ if value.startswith('{') and value.endswith('}') or value.startswith('[') and value.endswith(']'):
+ value = json.loads(value)
+ if type(value) == dict:
+ if not 'expiresAt' in value or value['expiresAt'] >= int(time.time()):
+ return value
+ else:
+ self.delCache(key)
+ return None
+ return value
+ else:
+ return None
+
+ def setCache(self, key, value):
+ if type(value) in [int, float]:
+ value = str(value)
+ if len(value) > 0:
+ if type(value) == dict or type(value) == list:
+ value = json.dumps(value, ensure_ascii=False)
+ r = self.post(f'http://127.0.0.1:{Proxy.getPort()}/cache?do=set&key={key}', data={"value": value}, timeout=5)
+ return 'succeed' if r.status_code == 200 else 'failed'
+
+ def delCache(self, key):
+ r = self.fetch(f'http://127.0.0.1:{Proxy.getPort()}/cache?do=del&key={key}', timeout=5)
+ return 'succeed' if r.status_code == 200 else 'failed'
\ No newline at end of file
diff --git a/PyramidStore/example.json b/PyramidStore/example.json
new file mode 100644
index 0000000..e64c92c
--- /dev/null
+++ b/PyramidStore/example.json
@@ -0,0 +1,23 @@
+[
+ {
+ "key": "金牌",
+ "name": "金牌",
+ "type": 3,
+ "api": "爬虫所在位置/金牌.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "ext": {
+ "site": "域名1,域名2,域名3......"
+ }
+ },
+ {
+ "key": "光速",
+ "name": "光速",
+ "type": 3,
+ "api": "爬虫所在位置/光速.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1
+ }
+]
diff --git a/PyramidStore/plugin/adult/51吸瓜.py b/PyramidStore/plugin/adult/51吸瓜.py
new file mode 100644
index 0000000..81ba712
--- /dev/null
+++ b/PyramidStore/plugin/adult/51吸瓜.py
@@ -0,0 +1,404 @@
+# -*- coding: utf-8 -*-
+# 🌈 Love
+import json
+import random
+import re
+import sys
+import threading
+import time
+from base64 import b64decode, b64encode
+from urllib.parse import urlparse
+
+import requests
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import unpad
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ try:self.proxies = json.loads(extend)
+ except:self.proxies = {}
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
+ 'Connection': 'keep-alive',
+ 'Cache-Control': 'no-cache',
+ }
+ # Use working dynamic URLs directly
+ self.host = self.get_working_host()
+ self.headers.update({'Origin': self.host, 'Referer': f"{self.host}/"})
+ self.log(f"使用站点: {self.host}")
+ print(f"使用站点: {self.host}")
+ pass
+
+ def getName(self):
+ return "🌈 51吸瓜"
+
+ def isVideoFormat(self, url):
+ # Treat direct media formats as playable without parsing
+ return any(ext in (url or '') for ext in ['.m3u8', '.mp4', '.ts'])
+
+ def manualVideoCheck(self):
+ return False
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ try:
+ response = requests.get(self.host, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'class': [], 'list': []}
+
+ data = self.getpq(response.text)
+ result = {}
+ classes = []
+
+ # Try to get categories from different possible locations
+ category_selectors = [
+ '.category-list ul li',
+ '.nav-menu li',
+ '.menu li',
+ 'nav ul li'
+ ]
+
+ for selector in category_selectors:
+ for k in data(selector).items():
+ link = k('a')
+ href = (link.attr('href') or '').strip()
+ name = (link.text() or '').strip()
+ # Skip placeholder or invalid entries
+ if not href or href == '#' or not name:
+ continue
+ classes.append({
+ 'type_name': name,
+ 'type_id': href
+ })
+ if classes:
+ break
+
+ # If no categories found, create some default ones
+ if not classes:
+ classes = [
+ {'type_name': '首页', 'type_id': '/'},
+ {'type_name': '最新', 'type_id': '/latest/'},
+ {'type_name': '热门', 'type_id': '/hot/'}
+ ]
+
+ result['class'] = classes
+ result['list'] = self.getlist(data('#index article a'))
+ return result
+
+ except Exception as e:
+ print(f"homeContent error: {e}")
+ return {'class': [], 'list': []}
+
+ def homeVideoContent(self):
+ try:
+ response = requests.get(self.host, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'list': []}
+ data = self.getpq(response.text)
+ return {'list': self.getlist(data('#index article a, #archive article a'))}
+ except Exception as e:
+ print(f"homeVideoContent error: {e}")
+ return {'list': []}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ try:
+ if '@folder' in tid:
+ id = tid.replace('@folder', '')
+ videos = self.getfod(id)
+ else:
+ # Build URL properly
+ if tid.startswith('/'):
+ if pg and pg != '1':
+ url = f"{self.host}{tid}page/{pg}/"
+ else:
+ url = f"{self.host}{tid}"
+ else:
+ url = f"{self.host}/{tid}"
+
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ data = self.getpq(response.text)
+ videos = self.getlist(data('#archive article a, #index article a'), tid)
+
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 1 if '@folder' in tid else 99999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ except Exception as e:
+ print(f"categoryContent error: {e}")
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ def detailContent(self, ids):
+ try:
+ url = f"{self.host}{ids[0]}" if not ids[0].startswith('http') else ids[0]
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ return {'list': [{'vod_play_from': '51吸瓜', 'vod_play_url': f'页面加载失败${url}'}]}
+
+ data = self.getpq(response.text)
+ vod = {'vod_play_from': '51吸瓜'}
+
+ # Get content/description
+ try:
+ clist = []
+ if data('.tags .keywords a'):
+ for k in data('.tags .keywords a').items():
+ title = k.text()
+ href = k.attr('href')
+ if title and href:
+ clist.append('[a=cr:' + json.dumps({'id': href, 'name': title}) + '/]' + title + '[/a]')
+ vod['vod_content'] = ' '.join(clist) if clist else data('.post-title').text()
+ except:
+ vod['vod_content'] = data('.post-title').text() or '51吸瓜视频'
+
+ # Get video URLs (build episode list when multiple players exist)
+ try:
+ plist = []
+ used_names = set()
+ if data('.dplayer'):
+ for c, k in enumerate(data('.dplayer').items(), start=1):
+ config_attr = k.attr('data-config')
+ if config_attr:
+ try:
+ config = json.loads(config_attr)
+ video_url = config.get('video', {}).get('url', '')
+ # Determine a readable episode name from nearby headings if present
+ ep_name = ''
+ try:
+ parent = k.parents().eq(0)
+ # search up to a few ancestors for a heading text
+ for _ in range(3):
+ if not parent: break
+ heading = parent.find('h2, h3, h4').eq(0).text() or ''
+ heading = heading.strip()
+ if heading:
+ ep_name = heading
+ break
+ parent = parent.parents().eq(0)
+ except Exception:
+ ep_name = ''
+ base_name = ep_name if ep_name else f"视频{c}"
+ name = base_name
+ count = 2
+ # Ensure the name is unique
+ while name in used_names:
+ name = f"{base_name} {count}"
+ count += 1
+ used_names.add(name)
+ if video_url:
+ self.log(f"解析到视频: {name} -> {video_url}")
+ print(f"解析到视频: {name} -> {video_url}")
+ plist.append(f"{name}${video_url}")
+ except:
+ continue
+
+ if plist:
+ self.log(f"拼装播放列表,共{len(plist)}个")
+ print(f"拼装播放列表,共{len(plist)}个")
+ vod['vod_play_url'] = '#'.join(plist)
+ else:
+ vod['vod_play_url'] = f"未找到视频源${url}"
+
+ except Exception as e:
+ vod['vod_play_url'] = f"视频解析失败${url}"
+
+ return {'list': [vod]}
+
+ except Exception as e:
+ print(f"detailContent error: {e}")
+ return {'list': [{'vod_play_from': '51吸瓜', 'vod_play_url': f'详情页加载失败${ids[0] if ids else ""}'}]}
+
+ def searchContent(self, key, quick, pg="1"):
+ try:
+ url = f"{self.host}/search/{key}/{pg}" if pg != "1" else f"{self.host}/search/{key}/"
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ return {'list': [], 'page': pg}
+
+ data = self.getpq(response.text)
+ videos = self.getlist(data('#archive article a, #index article a'))
+ return {'list': videos, 'page': pg}
+
+ except Exception as e:
+ print(f"searchContent error: {e}")
+ return {'list': [], 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ p = 1
+ if self.isVideoFormat(url):
+ # m3u8/mp4 direct play; when using proxy setting, wrap to proxy for m3u8
+ if '.m3u8' in url:
+ url = self.proxy(url)
+ p = 0
+ self.log(f"播放请求: parse={p}, url={url}")
+ print(f"播放请求: parse={p}, url={url}")
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ if param.get('type') == 'img':
+ res=requests.get(param['url'], headers=self.headers, proxies=self.proxies, timeout=10)
+ return [200,res.headers.get('Content-Type'),self.aesimg(res.content)]
+ elif param.get('type') == 'm3u8':return self.m3Proxy(param['url'])
+ else:return self.tsProxy(param['url'])
+
+ def proxy(self, data, type='m3u8'):
+ if data and len(self.proxies):return f"{self.getProxyUrl()}&url={self.e64(data)}&type={type}"
+ else:return data
+
+ def m3Proxy(self, url):
+ url=self.d64(url)
+ ydata = requests.get(url, headers=self.headers, proxies=self.proxies, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = requests.get(url, headers=self.headers, proxies=self.proxies).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ iskey=True
+ for index, string in enumerate(lines):
+ if iskey and 'URI' in string:
+ pattern = r'URI="([^"]*)"'
+ match = re.search(pattern, string)
+ if match:
+ lines[index] = re.sub(pattern, f'URI="{self.proxy(match.group(1), "mkey")}"', string)
+ iskey=False
+ continue
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain = last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ lines[index] = self.proxy(string, string.split('.')[-1].split('?')[0])
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def tsProxy(self, url):
+ url = self.d64(url)
+ data = requests.get(url, headers=self.headers, proxies=self.proxies, stream=True)
+ return [200, data.headers['Content-Type'], data.content]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def get_working_host(self):
+ """Get working host from known dynamic URLs"""
+ # Known working URLs from the dynamic gateway
+ dynamic_urls = [
+ 'https://artist.vgwtswi.xyz',
+ 'https://ability.vgwtswi.xyz',
+ 'https://am.vgwtswi.xyz'
+ ]
+
+ # Test each URL to find a working one
+ for url in dynamic_urls:
+ try:
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=10)
+ if response.status_code == 200:
+ # Verify it has the expected content structure
+ data = self.getpq(response.text)
+ articles = data('#index article a')
+ if len(articles) > 0:
+ self.log(f"选用可用站点: {url}")
+ print(f"选用可用站点: {url}")
+ return url
+ except Exception as e:
+ continue
+
+ # Fallback to first URL if none work (better than crashing)
+ self.log(f"未检测到可用站点,回退: {dynamic_urls[0]}")
+ print(f"未检测到可用站点,回退: {dynamic_urls[0]}")
+ return dynamic_urls[0]
+
+
+ def getlist(self, data, tid=''):
+ videos = []
+ l = '/mrdg' in tid
+ for k in data.items():
+ a = k.attr('href')
+ b = k('h2').text()
+ # Some pages might not include datePublished; use a fallback
+ c = k('span[itemprop="datePublished"]').text() or k('.post-meta, .entry-meta, time').text()
+ if a and b:
+ videos.append({
+ 'vod_id': f"{a}{'@folder' if l else ''}",
+ 'vod_name': b.replace('\n', ' '),
+ 'vod_pic': self.getimg(k('script').text()),
+ 'vod_remarks': c or '',
+ 'vod_tag': 'folder' if l else '',
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return videos
+
+ def getfod(self, id):
+ url = f"{self.host}{id}"
+ data = self.getpq(requests.get(url, headers=self.headers, proxies=self.proxies).text)
+ vdata=data('.post-content[itemprop="articleBody"]')
+ r=['.txt-apps','.line','blockquote','.tags','.content-tabs']
+ for i in r:vdata.remove(i)
+ p=vdata('p')
+ videos=[]
+ for i,x in enumerate(vdata('h2').items()):
+ c=i*2
+ videos.append({
+ 'vod_id': p.eq(c)('a').attr('href'),
+ 'vod_name': p.eq(c).text(),
+ 'vod_pic': f"{self.getProxyUrl()}&url={p.eq(c+1)('img').attr('data-xkrkllgl')}&type=img",
+ 'vod_remarks':x.text()
+ })
+ return videos
+
+ def getimg(self, text):
+ match = re.search(r"loadBannerDirect\('([^']+)'", text)
+ if match:
+ url = match.group(1)
+ return f"{self.getProxyUrl()}&url={url}&type=img"
+ else:
+ return ''
+
+ def aesimg(self, word):
+ key = b'f5d965df75336270'
+ iv = b'97b60394abc2fbe1'
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted = unpad(cipher.decrypt(word), AES.block_size)
+ return decrypted
+
+ def getpq(self, data):
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
diff --git a/PyramidStore/plugin/adult/51吸瓜.py.bak b/PyramidStore/plugin/adult/51吸瓜.py.bak
new file mode 100644
index 0000000..81ba712
--- /dev/null
+++ b/PyramidStore/plugin/adult/51吸瓜.py.bak
@@ -0,0 +1,404 @@
+# -*- coding: utf-8 -*-
+# 🌈 Love
+import json
+import random
+import re
+import sys
+import threading
+import time
+from base64 import b64decode, b64encode
+from urllib.parse import urlparse
+
+import requests
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import unpad
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ try:self.proxies = json.loads(extend)
+ except:self.proxies = {}
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
+ 'Connection': 'keep-alive',
+ 'Cache-Control': 'no-cache',
+ }
+ # Use working dynamic URLs directly
+ self.host = self.get_working_host()
+ self.headers.update({'Origin': self.host, 'Referer': f"{self.host}/"})
+ self.log(f"使用站点: {self.host}")
+ print(f"使用站点: {self.host}")
+ pass
+
+ def getName(self):
+ return "🌈 51吸瓜"
+
+ def isVideoFormat(self, url):
+ # Treat direct media formats as playable without parsing
+ return any(ext in (url or '') for ext in ['.m3u8', '.mp4', '.ts'])
+
+ def manualVideoCheck(self):
+ return False
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ try:
+ response = requests.get(self.host, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'class': [], 'list': []}
+
+ data = self.getpq(response.text)
+ result = {}
+ classes = []
+
+ # Try to get categories from different possible locations
+ category_selectors = [
+ '.category-list ul li',
+ '.nav-menu li',
+ '.menu li',
+ 'nav ul li'
+ ]
+
+ for selector in category_selectors:
+ for k in data(selector).items():
+ link = k('a')
+ href = (link.attr('href') or '').strip()
+ name = (link.text() or '').strip()
+ # Skip placeholder or invalid entries
+ if not href or href == '#' or not name:
+ continue
+ classes.append({
+ 'type_name': name,
+ 'type_id': href
+ })
+ if classes:
+ break
+
+ # If no categories found, create some default ones
+ if not classes:
+ classes = [
+ {'type_name': '首页', 'type_id': '/'},
+ {'type_name': '最新', 'type_id': '/latest/'},
+ {'type_name': '热门', 'type_id': '/hot/'}
+ ]
+
+ result['class'] = classes
+ result['list'] = self.getlist(data('#index article a'))
+ return result
+
+ except Exception as e:
+ print(f"homeContent error: {e}")
+ return {'class': [], 'list': []}
+
+ def homeVideoContent(self):
+ try:
+ response = requests.get(self.host, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'list': []}
+ data = self.getpq(response.text)
+ return {'list': self.getlist(data('#index article a, #archive article a'))}
+ except Exception as e:
+ print(f"homeVideoContent error: {e}")
+ return {'list': []}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ try:
+ if '@folder' in tid:
+ id = tid.replace('@folder', '')
+ videos = self.getfod(id)
+ else:
+ # Build URL properly
+ if tid.startswith('/'):
+ if pg and pg != '1':
+ url = f"{self.host}{tid}page/{pg}/"
+ else:
+ url = f"{self.host}{tid}"
+ else:
+ url = f"{self.host}/{tid}"
+
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ data = self.getpq(response.text)
+ videos = self.getlist(data('#archive article a, #index article a'), tid)
+
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 1 if '@folder' in tid else 99999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ except Exception as e:
+ print(f"categoryContent error: {e}")
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ def detailContent(self, ids):
+ try:
+ url = f"{self.host}{ids[0]}" if not ids[0].startswith('http') else ids[0]
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ return {'list': [{'vod_play_from': '51吸瓜', 'vod_play_url': f'页面加载失败${url}'}]}
+
+ data = self.getpq(response.text)
+ vod = {'vod_play_from': '51吸瓜'}
+
+ # Get content/description
+ try:
+ clist = []
+ if data('.tags .keywords a'):
+ for k in data('.tags .keywords a').items():
+ title = k.text()
+ href = k.attr('href')
+ if title and href:
+ clist.append('[a=cr:' + json.dumps({'id': href, 'name': title}) + '/]' + title + '[/a]')
+ vod['vod_content'] = ' '.join(clist) if clist else data('.post-title').text()
+ except:
+ vod['vod_content'] = data('.post-title').text() or '51吸瓜视频'
+
+ # Get video URLs (build episode list when multiple players exist)
+ try:
+ plist = []
+ used_names = set()
+ if data('.dplayer'):
+ for c, k in enumerate(data('.dplayer').items(), start=1):
+ config_attr = k.attr('data-config')
+ if config_attr:
+ try:
+ config = json.loads(config_attr)
+ video_url = config.get('video', {}).get('url', '')
+ # Determine a readable episode name from nearby headings if present
+ ep_name = ''
+ try:
+ parent = k.parents().eq(0)
+ # search up to a few ancestors for a heading text
+ for _ in range(3):
+ if not parent: break
+ heading = parent.find('h2, h3, h4').eq(0).text() or ''
+ heading = heading.strip()
+ if heading:
+ ep_name = heading
+ break
+ parent = parent.parents().eq(0)
+ except Exception:
+ ep_name = ''
+ base_name = ep_name if ep_name else f"视频{c}"
+ name = base_name
+ count = 2
+ # Ensure the name is unique
+ while name in used_names:
+ name = f"{base_name} {count}"
+ count += 1
+ used_names.add(name)
+ if video_url:
+ self.log(f"解析到视频: {name} -> {video_url}")
+ print(f"解析到视频: {name} -> {video_url}")
+ plist.append(f"{name}${video_url}")
+ except:
+ continue
+
+ if plist:
+ self.log(f"拼装播放列表,共{len(plist)}个")
+ print(f"拼装播放列表,共{len(plist)}个")
+ vod['vod_play_url'] = '#'.join(plist)
+ else:
+ vod['vod_play_url'] = f"未找到视频源${url}"
+
+ except Exception as e:
+ vod['vod_play_url'] = f"视频解析失败${url}"
+
+ return {'list': [vod]}
+
+ except Exception as e:
+ print(f"detailContent error: {e}")
+ return {'list': [{'vod_play_from': '51吸瓜', 'vod_play_url': f'详情页加载失败${ids[0] if ids else ""}'}]}
+
+ def searchContent(self, key, quick, pg="1"):
+ try:
+ url = f"{self.host}/search/{key}/{pg}" if pg != "1" else f"{self.host}/search/{key}/"
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ return {'list': [], 'page': pg}
+
+ data = self.getpq(response.text)
+ videos = self.getlist(data('#archive article a, #index article a'))
+ return {'list': videos, 'page': pg}
+
+ except Exception as e:
+ print(f"searchContent error: {e}")
+ return {'list': [], 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ p = 1
+ if self.isVideoFormat(url):
+ # m3u8/mp4 direct play; when using proxy setting, wrap to proxy for m3u8
+ if '.m3u8' in url:
+ url = self.proxy(url)
+ p = 0
+ self.log(f"播放请求: parse={p}, url={url}")
+ print(f"播放请求: parse={p}, url={url}")
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ if param.get('type') == 'img':
+ res=requests.get(param['url'], headers=self.headers, proxies=self.proxies, timeout=10)
+ return [200,res.headers.get('Content-Type'),self.aesimg(res.content)]
+ elif param.get('type') == 'm3u8':return self.m3Proxy(param['url'])
+ else:return self.tsProxy(param['url'])
+
+ def proxy(self, data, type='m3u8'):
+ if data and len(self.proxies):return f"{self.getProxyUrl()}&url={self.e64(data)}&type={type}"
+ else:return data
+
+ def m3Proxy(self, url):
+ url=self.d64(url)
+ ydata = requests.get(url, headers=self.headers, proxies=self.proxies, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = requests.get(url, headers=self.headers, proxies=self.proxies).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ iskey=True
+ for index, string in enumerate(lines):
+ if iskey and 'URI' in string:
+ pattern = r'URI="([^"]*)"'
+ match = re.search(pattern, string)
+ if match:
+ lines[index] = re.sub(pattern, f'URI="{self.proxy(match.group(1), "mkey")}"', string)
+ iskey=False
+ continue
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain = last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ lines[index] = self.proxy(string, string.split('.')[-1].split('?')[0])
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def tsProxy(self, url):
+ url = self.d64(url)
+ data = requests.get(url, headers=self.headers, proxies=self.proxies, stream=True)
+ return [200, data.headers['Content-Type'], data.content]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def get_working_host(self):
+ """Get working host from known dynamic URLs"""
+ # Known working URLs from the dynamic gateway
+ dynamic_urls = [
+ 'https://artist.vgwtswi.xyz',
+ 'https://ability.vgwtswi.xyz',
+ 'https://am.vgwtswi.xyz'
+ ]
+
+ # Test each URL to find a working one
+ for url in dynamic_urls:
+ try:
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=10)
+ if response.status_code == 200:
+ # Verify it has the expected content structure
+ data = self.getpq(response.text)
+ articles = data('#index article a')
+ if len(articles) > 0:
+ self.log(f"选用可用站点: {url}")
+ print(f"选用可用站点: {url}")
+ return url
+ except Exception as e:
+ continue
+
+ # Fallback to first URL if none work (better than crashing)
+ self.log(f"未检测到可用站点,回退: {dynamic_urls[0]}")
+ print(f"未检测到可用站点,回退: {dynamic_urls[0]}")
+ return dynamic_urls[0]
+
+
+ def getlist(self, data, tid=''):
+ videos = []
+ l = '/mrdg' in tid
+ for k in data.items():
+ a = k.attr('href')
+ b = k('h2').text()
+ # Some pages might not include datePublished; use a fallback
+ c = k('span[itemprop="datePublished"]').text() or k('.post-meta, .entry-meta, time').text()
+ if a and b:
+ videos.append({
+ 'vod_id': f"{a}{'@folder' if l else ''}",
+ 'vod_name': b.replace('\n', ' '),
+ 'vod_pic': self.getimg(k('script').text()),
+ 'vod_remarks': c or '',
+ 'vod_tag': 'folder' if l else '',
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return videos
+
+ def getfod(self, id):
+ url = f"{self.host}{id}"
+ data = self.getpq(requests.get(url, headers=self.headers, proxies=self.proxies).text)
+ vdata=data('.post-content[itemprop="articleBody"]')
+ r=['.txt-apps','.line','blockquote','.tags','.content-tabs']
+ for i in r:vdata.remove(i)
+ p=vdata('p')
+ videos=[]
+ for i,x in enumerate(vdata('h2').items()):
+ c=i*2
+ videos.append({
+ 'vod_id': p.eq(c)('a').attr('href'),
+ 'vod_name': p.eq(c).text(),
+ 'vod_pic': f"{self.getProxyUrl()}&url={p.eq(c+1)('img').attr('data-xkrkllgl')}&type=img",
+ 'vod_remarks':x.text()
+ })
+ return videos
+
+ def getimg(self, text):
+ match = re.search(r"loadBannerDirect\('([^']+)'", text)
+ if match:
+ url = match.group(1)
+ return f"{self.getProxyUrl()}&url={url}&type=img"
+ else:
+ return ''
+
+ def aesimg(self, word):
+ key = b'f5d965df75336270'
+ iv = b'97b60394abc2fbe1'
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted = unpad(cipher.decrypt(word), AES.block_size)
+ return decrypted
+
+ def getpq(self, data):
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
diff --git a/PyramidStore/plugin/adult/DSYS.py b/PyramidStore/plugin/adult/DSYS.py
new file mode 100644
index 0000000..f71349d
--- /dev/null
+++ b/PyramidStore/plugin/adult/DSYS.py
@@ -0,0 +1,165 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import time
+import uuid
+from base64 import b64decode, b64encode
+import json
+import sys
+from urllib.parse import urlparse, urlunparse
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad, pad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = "https://api.230110.xyz"
+
+ phost = "https://cdn.230110.xyz"
+
+ headers = {
+ 'origin': host,
+ 'referer': f'{host}/',
+ 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.8 Mobile/15E148 Safari/604.1',
+ }
+
+ def homeContent(self, filter):
+ data='9XSPkyFMrOOG34JSg//ZosMof45cyBo9hwZMZ5rvI6Yz/ZZlXWIf8/644OzwW+FNIOdJ61R/Lxjy1tqN+ZzokxtiVzb8LjYAkh6GFudwAUXFt9yS1ZjAxC3tDKrQsJQLk3nym0s00DBBzLBntRBDFz7nbba+OOBuQOZpL3CESGL42l4opdoViQLhO/dIizY1kIOk2NxxpDC9Z751gPl1ctHWuLWhuLG/QWgNWi/iHScjKrMHJKcC9GQHst/4Q3dgZ03eQIIVB6jvoV1XXoBCz6fjM/jM3BXpzSttT4Stglwy93gWuNWuZiKypHK2Q0lO10oM0ceRW2a0fPGId+rNYMRO3cR/C0ZueD4cmTAVOuxVr9ZZSP8/nhD0bHyAPONXtchIDJb0O/kdFHk2KTJfQ5q4fHOyzezczc4iQDV/R0S8cGZKM14MF+wytA/iljfj43H0UYqq5pM+MCUGRTdYEtuxCp0+A+DiOhNZwY/Km/TgBoGZQWGbpljJ2LAVnWhxX+ickLH7zuR/FeIwP/R8zOuR+8C8UlT9eHTqtvfNzaGdFxt316atHy8TNjRO7J5a177mqsHs3ziG0toDDzLDCbhRUjFgVA3ktahhXiWaaCo/ZGSJAA8TDO5DYqnJ0JDaX0ILPj8QB5zxrHYmRE8PboIr3RBAjz1sREbaHfjrUjoh29ePhlolLV00EvgoxP5knaqt5Ws/sq5IG57qKCAPgqXzblPLHToJGBtukKhLp8jbGJrkb6PVn4/jysks0NGE'
+ return {'class':self.aes(data,False)}
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data = {"q": "", "filter": [f"type_id = {tid}"], "offset": (int(pg)-1) * 24, "limit": 24, "sort": ["video_time:desc"],"lang": "zh-cn", "route": "/videos/search"}
+ result = {}
+ if 'skey_' in tid:return self.searchContent(tid.split('_')[-1], True, pg)
+ result['list'] = self.getl(self.getdata(data))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data={"limit":1,"filter":[f"video_id = {ids[0]}"],"lang":"zh-cn","route":"/videos/search"}
+ res = self.getdata(data)[0]
+ purl=urlunparse(urlparse(self.phost)._replace(path=urlparse(res.get('video_url')).path))
+ vod = {
+ 'vod_play_from': 'dsysav',
+ 'vod_play_url': f"{res.get('video_duration')}${purl}"
+ }
+ if res.get('video_tag'):
+ clist = []
+ tags=res['video_tag'].split(',')
+ for k in tags:
+ clist.append('[a=cr:' + json.dumps({'id': f'skey_{k}', 'name': k}) + '/]' + k + '[/a]')
+ vod['vod_content'] = ' '.join(clist)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data={"q":key,"filter":[],"offset":(int(pg)-1) * 24,"limit":24,"sort":["video_time:desc"],"lang":"zh-cn","route":"/videos/search"}
+ return {'list':self.getl(self.getdata(data)),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ if id.endswith('.mpd'):
+ id=f"{self.getProxyUrl()}&url={self.e64(id)}&type=mpd"
+ return {'parse': 0, 'url': id, 'header':self.headers}
+
+ def localProxy(self, param):
+ if param.get('type') and param['type']=='mpd':
+ url = self.d64(param.get('url'))
+ ids=url.split('/')
+ id=f"{ids[-3]}/{ids[-2]}/"
+ xpu = f"{self.getProxyUrl()}&path=".replace('&', '&')
+ data = self.fetch(url, headers=self.headers).text
+ data = data.replace('initialization="', f'initialization="{xpu}{id}').replace('media="',f'media="{xpu}{id}')
+ return [200,'application/octet-stream',data]
+ else:
+ hsign=self.md5(f"AjPuom638LmWfWyeM5YueKuJ9PuWLdRn/mpd/{param.get('path')}1767196800")
+ bytes_data = bytes.fromhex(hsign)
+ sign = b64encode(bytes_data).decode('utf-8').replace('=','').replace('+','-').replace('/','_')
+ url=f"{self.phost}/mpd/{param.get('path')}?sign={sign}&expire=1767196800"
+ return [302,'text/plain',None,{'Location':url}]
+
+ def liveContent(self, url):
+ pass
+
+ def aes(self, text, operation=True):
+ key = b'OPQT123412FRANME'
+ iv = b'MRDCQP12QPM13412'
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ if operation:
+ ct_bytes = cipher.encrypt(pad(json.dumps(text).encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return json.loads(pt.decode("utf-8"))
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def getl(self,data):
+ videos = []
+ for i in data:
+ img = i.get('video_cover')
+ if img and 'http' in img:img = urlunparse(urlparse(self.phost)._replace(path=urlparse(img).path))
+ videos.append({
+ 'vod_id': i.get('video_id'),
+ 'vod_name': i.get('video_title'),
+ 'vod_pic': img,
+ 'vod_remarks': i.get('video_duration'),
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return videos
+
+ def getdata(self,data):
+ uid = str(uuid.uuid4())
+ t = int(time.time())
+ json_data = {
+ 'sign': self.md5(f"{self.e64(json.dumps(data))}{uid}{t}AjPuom638LmWfWyeM5YueKuJ9PuWLdRn"),
+ 'nonce': uid,
+ 'timestamp': t,
+ 'data': self.aes(data),
+ }
+ res = self.post(f"{self.host}/v1", json=json_data, headers=self.headers).json()
+ res = self.aes(res['data'], False)
+ return res
diff --git a/PyramidStore/plugin/adult/Miss.py b/PyramidStore/plugin/adult/Miss.py
new file mode 100644
index 0000000..0a04a36
--- /dev/null
+++ b/PyramidStore/plugin/adult/Miss.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import copy
+import gzip
+import json
+import re
+import sys
+import time
+import uuid
+from base64 import b64decode
+from urllib.parse import urlparse, urlunparse
+from Crypto.Hash import SHA1, HMAC
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ '''
+ {
+ "": "",
+ "ext": {
+ "site": "https://missav.ai",
+ "cfproxy": ""
+ }
+ }
+ 自备:过cf代理如https://xx.vvvv.cc/proxy?url=
+ '''
+ try:
+ ext=json.loads(extend)
+ self.host,self.pcf,self.phost=ext.get('site',''),ext.get('cfproxy',''),''
+ if self.pcf:
+ parsed_url=urlparse(self.pcf)
+ self.phost=parsed_url.scheme + "://" + parsed_url.netloc
+ except:
+ pass
+ self.headers = {
+ 'referer': f'{self.host}',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
+ }
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ xhost='https://client-rapi-missav.recombee.com'
+
+ fts = 'H4sIAAAAAAAAA23P30rDMBQG8FeRXM8X8FVGGZk90rA0HU3SMcZgXjn8V6p2BS2KoOiFAwUn2iK+TBP7GBpYXbG9/c6Pc77TnaABjNHOFtojVIDPUQcx7IJJvl9ydX30GwSYSpN0J4iZgTqJiywrPlN1vm/GJiPMJgGxJaZo2qnc3WXDuZIKMqSwUcX7Ui8O1DJRH3Gldh3CgMM2l31BhNGW8euq3PNFrac+PVNZ2NYzjMrbY53c6/Sm2uwDBczB7mGxqaDTWfkV6atXvXiu4FD2KeHOf3nxViahjv8YxwHYtWfyQ3NvFZYP85oSno3HvYDAiNevPqnosWFHAAPahnU6b2DXY8Jp0bO8QdfEmlo/SBd5PPUBAAA='
+
+ actfts = 'H4sIAAAAAAAAA5WVS2sUQRRG/0rT6xTcqq5Xiwjm/X6sQxZjbBLRBBeOIEGIIEgWrtwI4lJEQsjGhU6Iv2bGcf6FVUUydW/d1SxT55sDfbpmsn9WP+/e1A+q+rh7dnT8qp6rT3snXTz4N7icXH4OB697L/rxZP+sPo1g+Ot8PPg+vvoyOb+IOJ7Vb+fuqGxkJSrZmMOTexiORDjAGxs3GvDGinCANjp5NPbo4NHYo5PHYI8OHoM9JnkM9pjgMdhjksdijwkeiz02eSz22OCx2GOTx2GPDR6HPS55HPa44HHY45LHY48LHo89Pnk89vjg8djjk6fFHh88bfAcxNXduz/sv0Qvfnz74+/X65lf/OMqfzD9ndF8geYzWijQQkaLBVrMaKlASxktF2g5o5UCrWS0WqDVjNYKtJbReoHWM9oo0EZGmwXazGirQFsZbRdoO6OdAu1ktFug3Yz2CrRH70TvqEN3YvT75+TP+5nvxMNKwf0pCIWur4JwM5spVCAaRJtI9ZQ2IPBPg47UTKkGgb/wJlI7pQYE/ho/QsiCaFv61E+7J338Izj6MJi8+xSefnhzO/PTK1CmGt58G118zM+pDBloPtBk0PBBQwaKDxQZSD6QZAB8QN6UbNlAtmTg+cCTgeMDRwaWDywZ8JKSlJS8pCQlJS8pSUnJS0pSUvKSkpSUvKQkJYGXBFISeEkgJYGXBFISeEkgJYGXBFISeEkgJYGXBFISeEkgJYGXBFISeElI/7QO/gOZ7bAksggAAA=='
+
+ def homeContent(self, filter):
+ html = self.getpq(f"{self.host}/cn",headers=self.headers)
+ result = {}
+ filters = {}
+ classes=[]
+ for i in list(html('.mt-4.space-y-4').items())[:2]:
+ for j in i('ul li').items():
+ id=j('a').attr('href').split('/')[-1]
+ classes.append({
+ 'type_name': j.text(),
+ 'type_id': id
+ })
+ filters[id] = copy.deepcopy(self.ungzip(self.fts))
+ if id=='actresses':filters[id].extend(self.ungzip(self.actfts))
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = self.getlist(html('.grid-cols-2.md\\:grid-cols-3 .thumbnail.group'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params={
+ 'page':'' if pg=='1' else pg
+ }
+ ft = {
+ 'filters': extend.get('filters', ''),
+ 'sort': extend.get('sort', '')
+ }
+ if tid in ['makers', 'genres']:
+ ft = {}
+ elif tid == 'actresses':
+ ft = {
+ 'height': extend.get('height', ''),
+ 'cup': extend.get('cup', ''),
+ 'debut': extend.get('debut', ''),
+ 'age': extend.get('age', ''),
+ 'sort': extend.get('sort', '')
+ }
+ params.update(ft)
+ params = {k: v for k, v in params.items() if v != ""}
+ url=tid if 'http' in tid else f"{self.host}/cn/{tid}"
+ data=self.getpq(url,headers=self.headers,params=params)
+ result = {}
+ if tid in ['makers', 'genres']:
+ videos = self.gmsca(data)
+ elif tid == 'actresses':
+ videos = self.actca(data)
+ else:
+ videos = self.getlist(data('.grid-cols-2.md\\:grid-cols-3 .thumbnail.group'))
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ v=self.getpq(ids[0],headers=self.headers)
+ sctx=v('body script').text()
+ urls=self.execute_js(sctx)
+ if not urls:urls=f"嗅探${ids[0]}"
+ c=v('.space-y-2 .text-secondary')
+ ac,dt,bq=[],[],[]
+ for i in c.items():
+ if re.search(r"导演:|女优:",i.text()):
+ ac.extend(['[a=cr:' + json.dumps({'id': j.attr('href'), 'name': j.text()}) + '/]' + j.text() + '[/a]' for j in i('a').items()])
+ elif '发行商:' in i.text():
+ dt.extend(['[a=cr:' + json.dumps({'id': j.attr('href'), 'name': j.text()}) + '/]' + j.text() + '[/a]' for j in i('a').items()])
+ elif re.search(r"标籤:|系列:|类型:",i.text()):
+ bq.extend(['[a=cr:' + json.dumps({'id': j.attr('href'), 'name': j.text()}) + '/]' + j.text() + '[/a]' for j in i('a').items()])
+ np={'MissAV':urls,'相关视频':self.getfov(ids[0])}
+ vod = {
+ 'type_name': c.eq(-3)('a').text(),
+ 'vod_year': c.eq(0)('span').text(),
+ 'vod_remarks': ' '.join(bq),
+ 'vod_actor': ' '.join(ac),
+ 'vod_director': ' '.join(dt),
+ 'vod_content': v('.text-secondary.break-all').text()
+ }
+ names,plist=[],[]
+ for i,j in np.items():
+ if j:
+ names.append(i)
+ plist.append(j)
+ vod['vod_play_from']='$$$'.join(names)
+ vod['vod_play_url']='$$$'.join(plist)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getpq(f"{self.host}/cn/search/{key}",headers=self.headers,params={'page':pg})
+ return {'list': self.getlist(data('.grid-cols-2.md\\:grid-cols-3 .thumbnail.group')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ p=0 if '嗅' in flag else 1
+ if '相关' in flag:
+ try:
+ v = self.getpq(id, headers=self.headers)
+ sctx = v('body script').text()
+ urls = self.execute_js(sctx)
+ if not urls: raise Exception("没有找到地址")
+ p,id=0,urls.split('#')[0].split('$')[-1]
+ except:
+ p=1
+ return {'parse': p, 'url': id, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+ def getpq(self, url, headers=None,params='',min=0,max=3):
+ if not min and self.phost in url:
+ url=url.replace(self.phost,self.host)
+ if params=={}:params=''
+ if params:
+ params=f"?{self.josn_to_params(params)}"
+ response=self.fetch(f"{self.pcf}{url}{params}", headers=headers,verify=False)
+ res=response.text
+ if 300 <= response.status_code < 400:
+ if min >= max:raise Exception(f"重定向次数过多: {res}")
+ match = re.search(r"url=['\"](https?://[^'\"]+)['\"]", res)
+ if match:
+ url = match.group(1).replace(self.phost, self.host)
+ return self.getpq(url, headers=headers,params='',min=min+1,max=max)
+ try:
+ return pq(res)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(res.encode('utf-8'))
+
+ def getlist(self,data):
+ videos = []
+ names,ids=[],[]
+ for i in data.items():
+ k = i('.overflow-hidden.shadow-lg a')
+ id=k.eq(0).attr('href')
+ name=i('.text-secondary').text()
+ if id and id not in ids and name not in names:
+ ids.append(id)
+ names.append(name)
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': k.eq(0)('img').attr('data-src'),
+ 'vod_year': '' if len(list(k.items())) < 3 else k.eq(1).text(),
+ 'vod_remarks': k.eq(-1).text(),
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return videos
+
+ def gmsca(self,data):
+ acts=[]
+ for i in data('.grid.grid-cols-2.md\\:grid-cols-3 div').items():
+ acts.append({
+ 'vod_id': i('.text-nord13').attr('href'),
+ 'vod_name': i('.text-nord13').text(),
+ 'vod_pic': '',
+ 'vod_remarks': i('.text-nord10').text(),
+ 'vod_tag': 'folder',
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return acts
+
+ def actca(self,data):
+ acts=[]
+ for i in data('.max-w-full ul li').items():
+ acts.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('img').attr('alt'),
+ 'vod_pic': i('img').attr('src'),
+ 'vod_year': i('.text-nord10').eq(-1).text(),
+ 'vod_remarks': i('.text-nord10').eq(0).text(),
+ 'vod_tag': 'folder',
+ 'style': {"type": "oval"}
+ })
+ return acts
+
+ def getfov(self, url):
+ try:
+ h=self.headers.copy()
+ ids=url.split('/')
+ h.update({'referer':f'{url}/'})
+ t=str(int(time.time()))
+ params = {
+ 'frontend_timestamp': t,
+ 'frontend_sign': self.getsign(f"/missav-default/batch/?frontend_timestamp={t}"),
+ }
+ uid=str(uuid.uuid4())
+ json_data = {
+ 'requests': [
+ {
+ 'method': 'POST',
+ 'path': f'/recomms/items/{ids[-1]}/items/',
+ 'params': {
+ 'targetUserId': uid,
+ 'count': 13,
+ 'scenario': 'desktop-watch-next-side',
+ 'returnProperties': True,
+ 'includedProperties': [
+ 'title_cn',
+ 'duration',
+ 'has_chinese_subtitle',
+ 'has_english_subtitle',
+ 'is_uncensored_leak',
+ 'dm',
+ ],
+ 'cascadeCreate': True,
+ },
+ },
+ {
+ 'method': 'POST',
+ 'path': f'/recomms/items/{ids[-1]}/items/',
+ 'params': {
+ 'targetUserId': uid,
+ 'count': 12,
+ 'scenario': 'desktop-watch-next-bottom',
+ 'returnProperties': True,
+ 'includedProperties': [
+ 'title_cn',
+ 'duration',
+ 'has_chinese_subtitle',
+ 'has_english_subtitle',
+ 'is_uncensored_leak',
+ 'dm',
+ ],
+ 'cascadeCreate': True,
+ },
+ },
+ ],
+ 'distinctRecomms': True,
+ }
+ data = self.post(f'{self.xhost}/missav-default/batch/', params=params,headers=h, json=json_data).json()
+ vdata=[]
+ for i in data:
+ for j in i['json']['recomms']:
+ if j.get('id'):
+ vdata.append(f"{j['values']['title_cn']}${self.host}/cn/{j['id']}")
+ return '#'.join(vdata)
+ except Exception as e:
+ print(f"获取推荐失败: {e}")
+ return ''
+
+ def getsign(self, text):
+ message_bytes = text.encode('utf-8')
+ key_bytes = b'Ikkg568nlM51RHvldlPvc2GzZPE9R4XGzaH9Qj4zK9npbbbTly1gj9K4mgRn0QlV'
+ h = HMAC.new(key_bytes, digestmod=SHA1)
+ h.update(message_bytes)
+ signature = h.hexdigest()
+ return signature
+
+ def ungzip(self, data):
+ result=gzip.decompress(b64decode(data)).decode('utf-8')
+ return json.loads(result)
+
+ def execute_js(self, jstxt):
+ js_code = re.search(r"eval\(function\(p,a,c,k,e,d\).*?return p\}(.*?)\)\)", jstxt).group(0)
+ try:
+ from com.whl.quickjs.wrapper import QuickJSContext
+ ctx = QuickJSContext.create()
+ ctx.evaluate(js_code)
+ result = []
+ common_vars = ["source", "source842", "source1280"]
+ for var_name in common_vars:
+ try:
+ value = ctx.getGlobalObject().getProperty(var_name)
+ if value is not None:
+ if isinstance(value, str):
+ value_str = value
+ else:
+ value_str = value.toString()
+ if "http" in value_str:
+ result.append(f"{var_name}${value_str}")
+ self.log(f"找到变量 {var_name} = {value_str[:50]}...")
+ except Exception as var_err:
+ self.log(f"获取变量 {var_name} 失败: {var_err}")
+ ctx.destroy()
+ return '#'.join(result)
+ except Exception as e:
+ self.log(f"执行失败: {e}")
+ return None
+
diff --git a/PyramidStore/plugin/adult/Phb.py b/PyramidStore/plugin/adult/Phb.py
new file mode 100644
index 0000000..a92eec5
--- /dev/null
+++ b/PyramidStore/plugin/adult/Phb.py
@@ -0,0 +1,321 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+from base64 import b64decode, b64encode
+from urllib.parse import urlparse
+
+import requests
+from pyquery import PyQuery as pq
+from requests import Session
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ '''
+ 内置代理配置:真心jar为例
+ {
+ "key": "Phb",
+ "name": "Phb",
+ "type": 3,
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "api": "./py/Phb.py",
+ "ext": {
+ "http": "http://127.0.0.1:1072",
+ "https": "http://127.0.0.1:1072"
+ }
+ },
+ 注:http(s)代理都是http
+ '''
+ try:self.proxies = json.loads(extend)
+ except:self.proxies = {}
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
+ 'pragma': 'no-cache',
+ 'cache-control': 'no-cache',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
+ 'dnt': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'priority': 'u=1, i',
+ }
+ self.host = self.gethost()
+ self.headers.update({'referer': f'{self.host}/', 'origin': self.host})
+ self.session = Session()
+ self.session.proxies.update(self.proxies)
+ self.session.headers.update(self.headers)
+
+ # ====== 在这里处理并传入指定的 Cookie ======
+ # 你提供的 Cookie 字符串
+ cookie_string = "ss=827590546130942001; sessid=607526310895825838; comp_detect-cookies=57035.100000; fg_afaf12e314c5419a855ddc0bf120670f=89213.100000; fg_7d31324eedb583147b6dcbea0051c868=25322.100000; __s=686AA841-42FE722901BB38AD16-B0A8AB1; __l=686AA841-42FE722901BB38AD16-B0A8AB1; tj_UUID_v2=ChAf6M0hCSZM47qWcst9tIq2EgsIxdCqwwYQp_j6DRgB; _ga=GA1.1.1279613306.1751820360; ua=803dd0debe437cd2610f66cd8235a54c; platform=mobile; cookieConsent=3; d_fs=1; accessAgeDisclaimerPH=1; il=v1JnAJL5n4SJJ8ziiYM4g_WAF6rQvZDAsZWgNYIHsUSg0xNzY3NjMxNjgyREwyTWd1RUVBbnozdHFQV01vUW5leEZ0ajFSM1NvNDdSNkVrQ1BhXw..; bs=e1649232670c3a49db241055d6ccf891; bsdd=e1649232670c3a49db241055d6ccf891; tj_UUID=ChAf6M0hCSZM47qWcst9tIq2EgsIxdCqwwYQp_j6DRgBIiBlMTY0OTIzMjY3MGMzYTQ5ZGIyNDEwNTVkNmNjZjg5MQ==; d_uidb=67be68be-6c63-a0f0-03d0-83e3bd7611c8; d_uid=67be68be-6c63-a0f0-03d0-83e3bd7611c8; d_uidb=67be68be-6c63-a0f0-03d0-83e3bd7611c8; htjf-mobile=4; _ga_B39RFFWGYY=GS2.1.s1751820360$o1$g1$t1751820515$j29$l0$h0"
+
+ # 将 Cookie 字符串解析为字典
+ parsed_cookies = {}
+ for part in cookie_string.split('; '):
+ if '=' in part:
+ key, value = part.split('=', 1) # 只在第一个等号处分割,因为值可能包含等号
+ parsed_cookies[key.strip()] = value.strip() # strip() 用于去除可能存在的空格
+
+ self.session.cookies.update(parsed_cookies)
+ # ==================================
+
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "推荐": "/recommended",
+ "视频": "/video",
+ "片单": "/playlists",
+ "频道": "/channels",
+ "分类": "/categories",
+ "明星": "/pornstars"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ # data = self.getpq('/recommended')
+ # vhtml = data("#recommendedListings .pcVideoListItem .phimage")
+ # return {'list': self.getlist(vhtml)}
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ vdata = []
+ result = {}
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ if tid == '/video' or '_this_video' in tid:
+ pagestr = f'&' if '?' in tid else f'?'
+ tid = tid.split('_this_video')[0]
+ data = self.getpq(f'{tid}{pagestr}page={pg}')
+ vdata = self.getlist(data('#videoCategory .pcVideoListItem'))
+ elif tid == '/recommended':
+ data = self.getpq(f'{tid}?page={pg}')
+ vdata = self.getlist(data('#recommendedListings .pcVideoListItem .phimage'))
+ elif tid == '/playlists':
+ data = self.getpq(f'{tid}?page={pg}')
+ vhtml = data('#playListSection li')
+ vdata = []
+ for i in vhtml.items():
+ vdata.append({
+ 'vod_id': 'playlists_click_' + i('.thumbnail-info-wrapper .display-block a').attr('href'),
+ 'vod_name': i('.thumbnail-info-wrapper .display-block a').attr('title'),
+ 'vod_pic': self.proxy(i('.largeThumb').attr('src')),
+ 'vod_tag': 'folder',
+ 'vod_remarks': i('.playlist-videos .number').text(),
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ elif tid == '/channels':
+ data = self.getpq(f'{tid}?o=rk&page={pg}')
+ vhtml = data('#filterChannelsSection li .description')
+ vdata = []
+ for i in vhtml.items():
+ vdata.append({
+ 'vod_id': 'director_click_' + i('.avatar a').attr('href'),
+ 'vod_name': i('.avatar img').attr('alt'),
+ 'vod_pic': self.proxy(i('.avatar img').attr('src')),
+ 'vod_tag': 'folder',
+ 'vod_remarks': i('.descriptionContainer ul li').eq(-1).text(),
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ elif tid == '/categories' and pg == '1':
+ result['pagecount'] = 1
+ data = self.getpq(f'{tid}')
+ vhtml = data('.categoriesListSection li .relativeWrapper')
+ vdata = []
+ for i in vhtml.items():
+ vdata.append({
+ 'vod_id': i('a').attr('href') + '_this_video',
+ 'vod_name': i('a').attr('alt'),
+ 'vod_pic': self.proxy(i('a img').attr('src')),
+ 'vod_tag': 'folder',
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ elif tid == '/pornstars':
+ data = self.getpq(f'{tid}?o=t&page={pg}')
+ vhtml = data('#popularPornstars .performerCard .wrap')
+ vdata = []
+ for i in vhtml.items():
+ vdata.append({
+ 'vod_id': 'pornstars_click_' + i('a').attr('href'),
+ 'vod_name': i('.performerCardName').text(),
+ 'vod_pic': self.proxy(i('a img').attr('src')),
+ 'vod_tag': 'folder',
+ 'vod_year': i('.performerVideosViewsCount span').eq(0).text(),
+ 'vod_remarks': i('.performerVideosViewsCount span').eq(-1).text(),
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ elif 'playlists_click' in tid:
+ tid = tid.split('click_')[-1]
+ if pg == '1':
+ hdata = self.getpq(tid)
+ self.token = hdata('#searchInput').attr('data-token')
+ vdata = self.getlist(hdata('#videoPlaylist .pcVideoListItem .phimage'))
+ else:
+ tid = tid.split('playlist/')[-1]
+ data = self.getpq(f'/playlist/viewChunked?id={tid}&token={self.token}&page={pg}')
+ vdata = self.getlist(data('.pcVideoListItem .phimage'))
+ elif 'director_click' in tid:
+ tid = tid.split('click_')[-1]
+ data = self.getpq(f'{tid}/videos?page={pg}')
+ vdata = self.getlist(data('#showAllChanelVideos .pcVideoListItem .phimage'))
+ elif 'pornstars_click' in tid:
+ tid = tid.split('click_')[-1]
+ data = self.getpq(f'{tid}/videos?page={pg}')
+ vdata = self.getlist(data('#mostRecentVideosSection .pcVideoListItem .phimage'))
+ result['list'] = vdata
+ return result
+
+ def detailContent(self, ids):
+ url = f"{self.host}{ids[0]}"
+ data = self.getpq(ids[0])
+ vn = data('meta[property="og:title"]').attr('content')
+ dtext = data('.userInfo .usernameWrap a')
+ pdtitle = '[a=cr:' + json.dumps(
+ {'id': 'director_click_' + dtext.attr('href'), 'name': dtext.text()}) + '/]' + dtext.text() + '[/a]'
+ vod = {
+ 'vod_name': vn,
+ 'vod_director': pdtitle,
+ 'vod_remarks': (data('.userInfo').text() + ' / ' + data('.ratingInfo').text()).replace('\n', ' / '),
+ 'vod_play_from': 'Pornhub',
+ 'vod_play_url': ''
+ }
+ js_content = data("#player script").eq(0).text()
+ plist = [f"{vn}${self.e64(f'{1}@@@@{url}')}"]
+ try:
+ pattern = r'"mediaDefinitions":\s*(\[.*?\]),\s*"isVertical"'
+ match = re.search(pattern, js_content, re.DOTALL)
+ if match:
+ json_str = match.group(1)
+ udata = json.loads(json_str)
+ plist = [
+ f"{media['height']}${self.e64(f'{0}@@@@{url}')}"
+ for media in udata[:-1]
+ if (url := media.get('videoUrl'))
+ ]
+ except Exception as e:
+ print(f"提取mediaDefinitions失败: {str(e)}")
+ vod['vod_play_url'] = '#'.join(plist)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.getpq(f'/video/search?search={key}&page={pg}')
+ return {'list': self.getlist(data('#videoSearchResult .pcVideoListItem .phimage'))}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = self.d64(id).split('@@@@')
+ if '.m3u8' in ids[1]: ids[1] = self.proxy(ids[1], 'm3u8')
+ return {'parse': int(ids[0]), 'url': ids[1], 'header': self.headers}
+
+ def localProxy(self, param):
+ url = self.d64(param.get('url'))
+ if param.get('type') == 'm3u8':
+ return self.m3Proxy(url)
+ else:
+ return self.tsProxy(url)
+
+ def m3Proxy(self, url):
+ ydata = requests.get(url, headers=self.headers, proxies=self.proxies, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = requests.get(url, headers=self.headers, proxies=self.proxies).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ for index, string in enumerate(lines):
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain = last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ lines[index] = self.proxy(string, string.split('.')[-1].split('?')[0])
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def tsProxy(self, url):
+ data = requests.get(url, headers=self.headers, proxies=self.proxies, stream=True)
+ return [200, data.headers['Content-Type'], data.content]
+
+ def gethost(self):
+ try:
+ response = requests.get('https://www.pornhub.com', headers=self.headers, proxies=self.proxies,
+ allow_redirects=False)
+ return response.headers['Location'][:-1]
+ except Exception as e:
+ print(f"获取主页失败: {str(e)}")
+ return "https://www.pornhub.com"
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def getlist(self, data):
+ vlist = []
+ for i in data.items():
+ vlist.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('a').attr('title'),
+ 'vod_pic': self.proxy(i('img').attr('src')),
+ 'vod_remarks': i('.bgShadeEffect').text() or i('.duration').text(),
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ return vlist
+
+ def getpq(self, path):
+ try:
+ response = self.session.get(f'{self.host}{path}').text
+ return pq(response.encode('utf-8'))
+ except Exception as e:
+ print(f"请求失败: , {str(e)}")
+ return None
+
+ def proxy(self, data, type='img'):
+ if data and len(self.proxies):return f"{self.getProxyUrl()}&url={self.e64(data)}&type={type}"
+ else:return data
\ No newline at end of file
diff --git a/PyramidStore/plugin/adult/Xhm.py b/PyramidStore/plugin/adult/Xhm.py
new file mode 100644
index 0000000..218db26
--- /dev/null
+++ b/PyramidStore/plugin/adult/Xhm.py
@@ -0,0 +1,270 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+from base64 import b64decode, b64encode
+from pyquery import PyQuery as pq
+from requests import Session
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ self.headers['referer'] = f'{self.host}/'
+ self.session = Session()
+ self.session.headers.update(self.headers)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-full-version': '"133.0.6943.98"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"19.0.0"',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-full-version-list': '"Not(A:Brand";v="99.0.0.0", "Google Chrome";v="133.0.6943.98", "Chromium";v="133.0.6943.98"',
+ 'dnt': '1',
+ 'upgrade-insecure-requests': '1',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-user': '?1',
+ 'sec-fetch-dest': 'document',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'priority': 'u=0, i'
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "4K": "/4k",
+ "国产": "two_click_/categories/chinese",
+ "最新": "/newest",
+ "最佳": "/best",
+ "频道": "/channels",
+ "类别": "/categories",
+ "明星": "/pornstars"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ if k !='4K':filters[cateManual[k]]=[{'key':'type','name':'类型','value':[{'n':'4K','v':'/4k'}]}]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data = self.getpq()
+ return {'list': self.getlist(data(".thumb-list--sidebar .thumb-list__item"))}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ vdata = []
+ result = {}
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ if tid in ['/4k', '/newest', '/best'] or 'two_click_' in tid:
+ if 'two_click_' in tid: tid = tid.split('click_')[-1]
+ data = self.getpq(f'{tid}{extend.get("type","")}/{pg}')
+ vdata = self.getlist(data(".thumb-list--sidebar .thumb-list__item"))
+ elif tid == '/channels':
+ data = self.getpq(f'{tid}/{pg}')
+ jsdata = self.getjsdata(data)
+ for i in jsdata['channels']:
+ vdata.append({
+ 'vod_id': f"two_click_" + i.get('channelURL'),
+ 'vod_name': i.get('channelName'),
+ 'vod_pic': i.get('siteLogoURL'),
+ 'vod_year': f'videos:{i.get("videoCount")}',
+ 'vod_tag': 'folder',
+ 'vod_remarks': f'subscribers:{i["subscriptionModel"].get("subscribers")}',
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ elif tid == '/categories':
+ result['pagecount'] = pg
+ data = self.getpq(tid)
+ self.cdata = self.getjsdata(data)
+ for i in self.cdata['layoutPage']['store']['popular']['assignable']:
+ vdata.append({
+ 'vod_id': "one_click_" + i.get('id'),
+ 'vod_name': i.get('name'),
+ 'vod_pic': '',
+ 'vod_tag': 'folder',
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ elif tid == '/pornstars':
+ data = self.getpq(f'{tid}/{pg}')
+ pdata = self.getjsdata(data)
+ for i in pdata['pagesPornstarsComponent']['pornstarListProps']['pornstars']:
+ vdata.append({
+ 'vod_id': f"two_click_" + i.get('pageURL'),
+ 'vod_name': i.get('name'),
+ 'vod_pic': i.get('imageThumbUrl'),
+ 'vod_remarks': i.get('translatedCountryName'),
+ 'vod_tag': 'folder',
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ elif 'one_click' in tid:
+ result['pagecount'] = pg
+ tid = tid.split('click_')[-1]
+ for i in self.cdata['layoutPage']['store']['popular']['assignable']:
+ if i.get('id') == tid:
+ for j in i['items']:
+ vdata.append({
+ 'vod_id': f"two_click_" + j.get('url'),
+ 'vod_name': j.get('name'),
+ 'vod_pic': j.get('thumb'),
+ 'vod_tag': 'folder',
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ result['list'] = vdata
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(ids[0])
+ djs = self.getjsdata(data)
+ vn = data('meta[property="og:title"]').attr('content')
+ dtext = data('#video-tags-list-container')
+ href = dtext('a').attr('href')
+ title = dtext('span[class*="body-bold-"]').eq(0).text()
+ pdtitle = ''
+ if href:
+ pdtitle = '[a=cr:' + json.dumps({'id': 'two_click_' + href, 'name': title}) + '/]' + title + '[/a]'
+ vod = {
+ 'vod_name': vn,
+ 'vod_director': pdtitle,
+ 'vod_remarks': data('.rb-new__info').text(),
+ 'vod_play_from': 'Xhamster',
+ 'vod_play_url': ''
+ }
+ try:
+ plist = []
+ d = djs['xplayerSettings']['sources']
+ f = d.get('standard')
+ def custom_sort_key(url):
+ quality = url.split('$')[0]
+ number = ''.join(filter(str.isdigit, quality))
+ number = int(number) if number else 0
+ return -number, quality
+
+ if f:
+ for key, value in f.items():
+ if isinstance(value, list):
+ for info in value:
+ id = self.e64(f'{0}@@@@{info.get("url") or info.get("fallback")}')
+ plist.append(f"{info.get('label') or info.get('quality')}${id}")
+ plist.sort(key=custom_sort_key)
+ if d.get('hls'):
+ for format_type, info in d['hls'].items():
+ if url := info.get('url'):
+ encoded = self.e64(f'{0}@@@@{url}')
+ plist.append(f"{format_type}${encoded}")
+
+ except Exception as e:
+ plist = [f"{vn}${self.e64(f'{1}@@@@{ids[0]}')}"]
+ print(f"获取视频信息失败: {str(e)}")
+ vod['vod_play_url'] = '#'.join(plist)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.getpq(f'/search/{key}?page={pg}')
+ return {'list': self.getlist(data(".thumb-list--sidebar .thumb-list__item")), 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
+ 'pragma': 'no-cache',
+ 'cache-control': 'no-cache',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
+ 'dnt': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'origin': self.host,
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'referer': f'{self.host}/',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'priority': 'u=1, i',
+ }
+ ids = self.d64(id).split('@@@@')
+ return {'parse': int(ids[0]), 'url': ids[1], 'header': headers}
+
+ def localProxy(self, param):
+ pass
+
+ def gethost(self):
+ try:
+ response = self.fetch('https://xhamster.com', headers=self.headers, allow_redirects=False)
+ return response.headers['Location']
+ except Exception as e:
+ print(f"获取主页失败: {str(e)}")
+ return "https://zn.xhamster.com"
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def getlist(self, data):
+ vlist = []
+ for i in data.items():
+ vlist.append({
+ 'vod_id': i('.role-pop').attr('href'),
+ 'vod_name': i('.video-thumb-info a').text(),
+ 'vod_pic': i('.role-pop img').attr('src'),
+ 'vod_year': i('.video-thumb-info .video-thumb-views').text().split(' ')[0],
+ 'vod_remarks': i('.role-pop div[data-role="video-duration"]').text(),
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ return vlist
+
+ def getpq(self, path=''):
+ h = '' if path.startswith('http') else self.host
+ response = self.session.get(f'{h}{path}').text
+ try:
+ return pq(response)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(response.encode('utf-8'))
+
+ def getjsdata(self, data):
+ vhtml = data("script[id='initials-script']").text()
+ jst = json.loads(vhtml.split('initials=')[-1][:-1])
+ return jst
+
diff --git a/PyramidStore/plugin/adult/Xvd.py b/PyramidStore/plugin/adult/Xvd.py
new file mode 100644
index 0000000..38de72f
--- /dev/null
+++ b/PyramidStore/plugin/adult/Xvd.py
@@ -0,0 +1,276 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+from urllib.parse import urlparse
+
+import requests
+from pyquery import PyQuery as pq
+from base64 import b64decode, b64encode
+from requests import Session
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ try:self.proxies = json.loads(extend)
+ except:self.proxies = {}
+ self.session = Session()
+ self.session.proxies.update(self.proxies)
+ self.session.headers.update(self.headers)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = "https://www.xvideos.com"
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36",
+ "pragma": "no-cache",
+ "cache-control": "no-cache",
+ "sec-ch-ua-platform": "\"Windows\"",
+ "sec-ch-ua": "\"Not(A:Brand\";v=\"99\", \"Google Chrome\";v=\"133\", \"Chromium\";v=\"133\"",
+ "dnt": "1",
+ "origin":host,
+ 'referer':f'{host}/',
+ "sec-ch-ua-mobile": "?0",
+ "sec-fetch-site": "cross-site",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-dest": "empty",
+ "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
+ "priority": "u=1, i"
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "最新": "/new",
+ "最佳": "/best",
+ "频道": "/channels-index",
+ "标签": "/tags",
+ "明星": "/pornstars-index"
+ }
+ classes = []
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ result['class'] = classes
+ return result
+
+ def homeVideoContent(self):
+ data = self.getpq()
+ return {'list':self.getlist(data(".mozaique .frame-block"))}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ vdata = []
+ result = {}
+ page = f"/{int(pg) - 1}" if pg != '1' else ''
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ if tid=='/new' or 'tags_click' in tid:
+ if 'tags_click' in tid:tid=tid.split('click_')[-1]
+ data=self.getpq(f'{tid}/{pg}')
+ vdata=self.getlist(data(".mozaique .frame-block"))
+ elif tid=='/best':
+ if pg=='1':
+ self.path=self.session.get(f'{self.host}{tid}',allow_redirects=False).headers['Location']
+ data=self.getpq(f'{self.path}{page}')
+ vdata=self.getlist(data(".mozaique .frame-block"))
+ elif tid=='/channels-index' or tid=='/pornstars-index':
+ data = self.getpq(f'{tid}{page}')
+ vhtml=data(".mozaique .thumb-block")
+ for i in vhtml.items():
+ a = i('.thumb-inside .thumb a')
+ match = re.search(r'src="([^"]+)"', a('script').text())
+ img=''
+ if match:
+ img = match.group(1).strip()
+ vdata.append({
+ 'vod_id': f"channels_click_{'/channels'if tid=='/channels-index' else ''}"+a.attr('href'),
+ 'vod_name': a('.profile-name').text() or i('.profile-name').text().replace('\xa0','/'),
+ 'vod_pic': self.proxy(img),
+ 'vod_tag': 'folder',
+ 'vod_remarks': i('.thumb-under .profile-counts').text(),
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ elif tid=='/tags':
+ result['pagecount'] = pg
+ vhtml = self.getpq(tid)
+ vhtml = vhtml('.tags-list')
+ for d in vhtml.items():
+ for i in d('li a').items():
+ vdata.append({
+ 'vod_id': "tags_click_"+i.attr('href'),
+ 'vod_name': i.attr('title') or i('b').text(),
+ 'vod_pic': '',
+ 'vod_tag': 'folder',
+ 'vod_remarks': i('.navbadge').text(),
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ elif 'channels_click' in tid:
+ tid=tid.split('click_')[-1]
+ vhtml=self.session.post(f'{self.host}{tid}/videos/best/{int(pg)-1}').json()
+ for i in vhtml['videos']:
+ vdata.append({
+ 'vod_id': i.get('u'),
+ 'vod_name': i.get('tf'),
+ 'vod_pic': self.proxy(i.get('il')),
+ 'vod_year': i.get('n'),
+ 'vod_remarks': i.get('d'),
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ result['list'] = vdata
+ return result
+
+ def detailContent(self, ids):
+ url = f"{self.host}{ids[0]}"
+ data = self.getpq(ids[0])
+ vn=data('meta[property="og:title"]').attr('content')
+ dtext=data('.main-uploader a')
+ href=dtext.attr('href')
+ pdtitle=''
+ if href and href.count('/') < 2:
+ href=f'/channels{href}'
+ pdtitle = '[a=cr:' + json.dumps({'id': 'channels_click_'+href, 'name': dtext('.name').text()}) + '/]' + dtext('.name').text() + '[/a]'
+ vod = {
+ 'vod_name': vn,
+ 'vod_director':pdtitle,
+ 'vod_remarks': data('.page-title').text().replace(vn,''),
+ 'vod_play_from': 'Xvideos',
+ 'vod_play_url': ''
+ }
+ js_content = data("#video-player-bg script")
+ jstr=''
+ for script in js_content.items():
+ content = script.text()
+ if 'setVideoUrlLow' in content and 'html5player' in content:
+ jstr = content
+ break
+ plist = [f"{vn}${self.e64(f'{1}@@@@{url}')}"]
+ def extract_video_urls(js_content):
+ try:
+ low = re.search(r'setVideoUrlLow\([\'"]([^\'"]+)[\'"]\)', js_content)
+ high = re.search(r'setVideoUrlHigh\([\'"]([^\'"]+)[\'"]\)', js_content)
+ hls = re.search(r'setVideoHLS\([\'"]([^\'"]+)[\'"]\)', js_content)
+
+ return {
+ 'hls': hls.group(1) if hls else None,
+ 'high': high.group(1) if high else None,
+ 'low': low.group(1) if low else None
+ }
+ except Exception as e:
+ print(f"提取视频URL失败: {str(e)}")
+ return {}
+ if jstr:
+ try:
+ urls = extract_video_urls(jstr)
+ plist = [
+ f"{quality}${self.e64(f'{0}@@@@{url}')}"
+ for quality, url in urls.items()
+ if url
+ ]
+ except Exception as e:
+ print(f"提取url失败: {str(e)}")
+ vod['vod_play_url'] = '#'.join(plist)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getpq(f'/?k={key}&p={int(pg)-1}')
+ return {'list':self.getlist(data(".mozaique .frame-block")),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids=self.d64(id).split('@@@@')
+ if '.m3u8' in ids[1]: ids[1] = self.proxy(ids[1], 'm3u8')
+ return {'parse': int(ids[0]), 'url': ids[1], 'header': self.headers}
+
+ def localProxy(self, param):
+ url=self.d64(param['url'])
+ if param.get('type') == 'm3u8':
+ return self.m3Proxy(url)
+ else:
+ return self.tsProxy(url)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def getlist(self, data):
+ vlist=[]
+ for i in data.items():
+ a=i('.thumb-inside .thumb a')
+ b=i('.thumb-under .title a')
+ vlist.append({
+ 'vod_id': a.attr('href'),
+ 'vod_name': b('a').attr('title'),
+ 'vod_pic': self.proxy(a('img').attr('data-src')),
+ 'vod_year': a('.video-hd-mark').text(),
+ 'vod_remarks': b('.duration').text(),
+ 'style': {'ratio': 1.33, 'type': 'rect'}
+ })
+ return vlist
+
+ def getpq(self, path=''):
+ response = self.session.get(f'{self.host}{path}').text
+ try:
+ return pq(response)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(response.encode('utf-8'))
+
+ def m3Proxy(self, url):
+ ydata = requests.get(url, headers=self.headers, proxies=self.proxies, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = requests.get(url, headers=self.headers, proxies=self.proxies).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ for index, string in enumerate(lines):
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain=last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ lines[index] = self.proxy(string, string.split('.')[-1].split('?')[0])
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def tsProxy(self, url):
+ data = requests.get(url, headers=self.headers, proxies=self.proxies, stream=True)
+ return [200, data.headers['Content-Type'], data.content]
+
+ def proxy(self, data, type='img'):
+ if data and len(self.proxies):return f"{self.getProxyUrl()}&url={self.e64(data)}&type={type}"
+ else:return data
diff --git a/PyramidStore/plugin/adult/lavAPP.py b/PyramidStore/plugin/adult/lavAPP.py
new file mode 100644
index 0000000..3ecc807
--- /dev/null
+++ b/PyramidStore/plugin/adult/lavAPP.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+from base64 import b64encode, b64decode
+from Crypto.Hash import MD5, SHA256
+sys.path.append('..')
+from base.spider import Spider
+from Crypto.Cipher import AES
+import json
+import time
+
+
+class Spider(Spider):
+
+ def getName(self):
+ return "lav"
+
+ def init(self, extend=""):
+ self.id = self.ms(str(int(time.time() * 1000)))[:16]
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = "http://sir_new.tiansexyl.tv"
+ t = str(int(time.time() * 1000))
+ headers = {'User-Agent': 'okhttp-okgo/jeasonlzy', 'Connection': 'Keep-Alive',
+ 'Content-Type': 'application/x-www-form-urlencoded'}
+
+ def homeContent(self, filter):
+ cateManual = {"演员": "actor", "分类": "avsearch", }
+ classes = []
+ for k in cateManual:
+ classes.append({'type_name': k, 'type_id': cateManual[k]})
+ j = {'code': 'homePage', 'mod': 'down', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
+ 'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
+ 'oauth_id': self.id}
+
+ body = self.aes(j)
+ data = self.post(f'{self.host}/api.php?t={str(int(time.time() * 1000))}', data=body, headers=self.headers).json()['data']
+ data1 = self.aes(data, False)['data']
+ self.r = data1['r']
+ for i, d in enumerate(data1['avTag']):
+ # if i == 4:
+ # break
+ classes.append({'type_name': d['name'], 'type_id': d['tag']})
+ resutl = {}
+ resutl["class"] = classes
+ return resutl
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ id = tid.split("@@")
+ result = {}
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ if id[0] == 'avsearch':
+ if pg == '1':
+ j = {'code': 'avsearch', 'mod': 'search', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
+ 'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
+ 'oauth_id': self.id}
+ if len(id) > 1:
+ j = {'code': 'find', 'mod': 'tag', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
+ 'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
+ 'oauth_id': self.id, 'type': 'av', 'dis': 'new', 'page': str(pg), 'tag': id[1]}
+ elif id[0] == 'actor':
+ j = {'mod': 'actor', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv', 'app_type': 'rn',
+ 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn', 'oauth_id': self.id,
+ 'page': str(pg), 'filter': ''}
+ if len(id) > 1:
+ j = {'code': 'eq', 'mod': 'actor', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
+ 'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
+ 'oauth_id': self.id, 'page': str(pg), 'id': id[1], 'actor': id[2]}
+ else:
+ j = {'code': 'search', 'mod': 'av', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
+ 'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
+ 'oauth_id': self.id, 'page': str(pg), 'tag': id[0]}
+
+ body = self.aes(j)
+ data = self.post(f'{self.host}/api.php?t={str(int(time.time() * 1000))}', data=body, headers=self.headers).json()['data']
+ data1 = self.aes(data, False)['data']
+ videos = []
+ if tid == 'avsearch' and len(id) == 1:
+ for item in data1:
+ videos.append({"vod_id": id[0] + "@@" + str(item.get('tags')), 'vod_name': item.get('name'),
+ 'vod_pic': self.imgs(item.get('ico')), 'vod_tag': 'folder',
+ 'style': {"type": "rect", "ratio": 1.33}})
+ elif tid == 'actor' and len(id) == 1:
+ for item in data1:
+ videos.append({"vod_id": id[0] + "@@" + str(item.get('id')) + "@@" + item.get('name'),
+ 'vod_name': item.get('name'), 'vod_pic': self.imgs(item.get('cover')),
+ 'vod_tag': 'folder', 'style': {"type": "oval"}})
+ else:
+ for item in data1:
+ if item.get('_id'):
+ videos.append({"vod_id": str(item.get('id')), 'vod_name': item.get('title'),
+ 'vod_pic': self.imgs(item.get('cover_thumb') or item.get('cover_full')),
+ 'vod_remarks': item.get('good'), 'style': {"type": "rect", "ratio": 1.33}})
+ result["list"] = videos
+ return result
+
+ def detailContent(self, ids):
+ id = ids[0]
+ j = {'code': 'detail', 'mod': 'av', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
+ 'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
+ 'oauth_id': self.id, 'id': id}
+ body = self.aes(j)
+ data = self.post(f'{self.host}/api.php?t={str(int(time.time() * 1000))}', data=body, headers=self.headers).json()['data']
+ data1 = self.aes(data, False)['line']
+ vod = {}
+ play = []
+ for itt in data1:
+ a = itt['line'].get('s720')
+ if a:
+ b = a.split('.')
+ b[0] = 'https://m3u8'
+ a = '.'.join(b)
+ play.append(itt['info']['tips'] + "$" + a)
+ break
+ vod["vod_play_from"] = 'LAV'
+ vod["vod_play_url"] = "#".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ url = self.getProxyUrl() + "&url=" + b64encode(id.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+ self.hh = {'User-Agent': 'dd', 'Connection': 'Keep-Alive', 'Referer': self.r}
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = self.hh
+ return result
+
+ def localProxy(self, param):
+ url = param["url"]
+ if param.get('type') == "m3u8":
+ return self.vod(b64decode(url).decode('utf-8'))
+ else:
+ return self.img(url)
+
+ def vod(self, url):
+ data = self.fetch(url, headers=self.hh).text
+ key = bytes.fromhex("13d47399bda541b85e55830528d4e66f1791585b2d2216f23215c4c63ebace31")
+ iv = bytes.fromhex(data[:32])
+ data = data[32:]
+ cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
+ data_bytes = bytes.fromhex(data)
+ decrypted = cipher.decrypt(data_bytes)
+ encoded = decrypted.decode("utf-8").replace("\x08", "")
+ return [200, "application/vnd.apple.mpegur", encoded]
+
+ def imgs(self, url):
+ return self.getProxyUrl() + '&url=' + url
+
+ def img(self, url):
+ type = url.split('.')[-1]
+ data = self.fetch(url).text
+ key = bytes.fromhex("ba78f184208d775e1553550f2037f4af22cdcf1d263a65b4d5c74536f084a4b2")
+ iv = bytes.fromhex(data[:32])
+ data = data[32:]
+ cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
+ data_bytes = bytes.fromhex(data)
+ decrypted = cipher.decrypt(data_bytes)
+ return [200, f"image/{type}", decrypted]
+
+ def ms(self, data, m=False):
+ h = MD5.new()
+ if m:
+ h = SHA256.new()
+ h.update(data.encode('utf-8'))
+ return h.hexdigest()
+
+ def aes(self, data, operation=True):
+ key = bytes.fromhex("620f15cfdb5c79c34b3940537b21eda072e22f5d7151456dec3932d7a2b22c53")
+ t = str(int(time.time()))
+ ivt = self.ms(t)
+ if operation:
+ data = json.dumps(data, separators=(',', ':'))
+ iv = bytes.fromhex(ivt)
+ else:
+ iv = bytes.fromhex(data[:32])
+ data = data[32:]
+ cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
+ if operation:
+ data_bytes = data.encode('utf-8')
+ encrypted = cipher.encrypt(data_bytes)
+ ep = f'{ivt}{encrypted.hex()}'
+ edata = f"data={ep}×tamp={t}0d27dfacef1338483561a46b246bf36d"
+ sign = self.ms(self.ms(edata, True))
+ edata = f"timestamp={t}&data={ep}&sign={sign}"
+ return edata
+ else:
+ data_bytes = bytes.fromhex(data)
+ decrypted = cipher.decrypt(data_bytes)
+ return json.loads(decrypted.decode('utf-8'))
+
diff --git a/PyramidStore/plugin/adult/onlyfans gv.py b/PyramidStore/plugin/adult/onlyfans gv.py
new file mode 100644
index 0000000..69ed0f4
--- /dev/null
+++ b/PyramidStore/plugin/adult/onlyfans gv.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+#author:嗷呜群fans&claude4⚡
+import json
+import sys
+import re
+import time
+from base64 import b64encode
+from urllib.parse import urljoin, urlencode
+import requests
+from pyquery import PyQuery as pq
+from requests import Session
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+ def init(self, extend=""):
+ try:
+ self.proxies = json.loads(extend) if extend else {}
+ except:
+ self.proxies = {}
+
+ if isinstance(self.proxies, dict) and 'proxy' in self.proxies and isinstance(self.proxies['proxy'], dict):
+ self.proxies = self.proxies['proxy']
+
+ fixed = {}
+ for k, v in (self.proxies or {}).items():
+ if isinstance(v, str) and not v.startswith('http'):
+ fixed[k] = f'http://{v}'
+ else:
+ fixed[k] = v
+ self.proxies = fixed
+
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:142.0) Gecko/20100101 Firefox/142.0',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+ 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.3,en;q=0.2',
+ 'Referer': 'https://gayvidsclub.com/',
+ 'Origin': 'https://gayvidsclub.com',
+ }
+
+ self.host = "https://gayvidsclub.com"
+ self.session = Session()
+ self.session.proxies.update(self.proxies)
+ self.session.headers.update(self.headers)
+
+ def getName(self):
+ return "GayVidsClub"
+
+ def isVideoFormat(self, url):
+ return '.m3u8' in url or '.mp4' in url
+
+ def manualVideoCheck(self):
+ return True
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "最新": "/all-gay-porn/",
+ "COAT": "/all-gay-porn/coat/",
+ "MEN'S RUSH.TV": "/all-gay-porn/mens-rush-tv/",
+ "HUNK CHANNEL": "/all-gay-porn/hunk-channel/",
+ "KO": "/all-gay-porn/ko/",
+ "EXFEED": "/all-gay-porn/exfeed/",
+ "BRAVO!": "/all-gay-porn/bravo/",
+ "STR8BOYS": "/all-gay-porn/str8boys/",
+ "G-BOT": "/all-gay-porn/g-bot/"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data = self.fetchPage("/")
+ vlist = self.getlist(data("article"))
+ if not vlist:
+ data = self.fetchPage('/all-gay-porn/')
+ vlist = self.getlist(data("article"))
+ return {'list': vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+
+ if pg == 1:
+ url = tid
+ else:
+ url = f"{tid}page/{pg}/"
+
+ data = self.fetchPage(url)
+ vdata = self.getlist(data("article"))
+
+ result['list'] = vdata
+ return result
+
+ def detailContent(self, ids):
+ data = self.fetchPage(ids[0])
+
+ title = data('h1').text().strip()
+
+ iframe_src = None
+ iframe_elem = data('iframe')
+ if iframe_elem:
+ iframe_src = iframe_elem.attr('src')
+
+ if not iframe_src:
+ scripts = data('script')
+ for script in scripts.items():
+ script_text = script.text()
+ if 'iframe' in script_text and 'src' in script_text:
+ matches = re.findall(r'iframe.*?src=[\'"](https?://[^\'"]+)[\'"]', script_text)
+ if matches:
+ iframe_src = matches[0]
+ break
+
+ # 获取海报图片 - 确保使用横版图片
+ vod_pic = ""
+ img_elem = data('img')
+ if img_elem:
+ vod_pic = img_elem.attr('src')
+ # 确保使用横版海报图
+ if vod_pic and ('poster' in vod_pic or 'cover' in vod_pic):
+ # 已经是横版图片,不做处理
+ pass
+ elif vod_pic:
+ # 尝试转换为横版图片
+ vod_pic = self.ensure_horizontal_poster(vod_pic)
+
+ vod = {
+ 'vod_name': title,
+ 'vod_content': 'GayVidsClub视频',
+ 'vod_tag': 'GayVidsClub',
+ 'vod_pic': vod_pic, # 添加海报图片
+ 'vod_play_from': 'GayVidsClub',
+ 'vod_play_url': ''
+ }
+
+ play_lines = []
+
+ if iframe_src:
+ if not iframe_src.startswith('http'):
+ iframe_src = urljoin(self.host, iframe_src)
+ play_lines.append(f"直连${self.e64(iframe_src)}")
+
+ play_lines.append(f"嗅探${self.e64(ids[0])}")
+
+ if iframe_src:
+ play_lines.append(f"阿里云盘解析${self.e64(iframe_src)}")
+
+ play_lines.append(f"夸克网盘解析${self.e64(iframe_src)}")
+
+ play_lines.append(f"115网盘解析${self.e64(iframe_src)}")
+
+ play_lines.append(f"迅雷解析${self.e64(iframe_src)}")
+
+ play_lines.append(f"PikPak解析${self.e64(iframe_src)}")
+
+ play_lines.append(f"手机推送${iframe_src}")
+ else:
+ fallback_url = ids[0]
+ play_lines.append(f"阿里云盘解析${self.e64(fallback_url)}")
+ play_lines.append(f"夸克网盘解析${self.e64(fallback_url)}")
+ play_lines.append(f"115网盘解析${self.e64(fallback_url)}")
+ play_lines.append(f"迅雷解析${self.e64(fallback_url)}")
+ play_lines.append(f"PikPak解析${self.e64(fallback_url)}")
+ play_lines.append(f"手机推送${fallback_url}")
+
+ vod['vod_play_url'] = '#'.join(play_lines)
+
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ if pg == 1:
+ url = f"/?s={key}"
+ else:
+ url = f"/page/{pg}/?s={key}"
+
+ data = self.fetchPage(url)
+ return {'list': self.getlist(data("article")), 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ url = self.d64(id)
+
+ if "直连" in flag:
+ return {'parse': 0, 'url': url, 'header': self.headers}
+ elif "嗅探" in flag:
+ return {'parse': 1, 'url': url, 'header': self.headers}
+ elif "阿里云盘解析" in flag:
+ return self.parse_with_aliyun(url)
+ elif "夸克网盘解析" in flag:
+ return self.parse_with_quark(url)
+ elif "115网盘解析" in flag:
+ return self.parse_with_115(url)
+ elif "迅雷解析" in flag:
+ return self.parse_with_thunder(url)
+ elif "PikPak解析" in flag:
+ return self.parse_with_pikpak(url)
+ elif "手机推送" in flag:
+ return {'parse': 1, 'url': url, 'header': self.headers}
+ else:
+ return {'parse': 1, 'url': url, 'header': self.headers}
+
+ def fetchPage(self, url):
+ if not url.startswith('http'):
+ url = urljoin(self.host, url)
+ response = self.session.get(url)
+ return pq(response.text)
+
+ def getlist(self, items):
+ vlist = []
+ for item in items.items():
+ vid = item.find('a').attr('href')
+ img = item.find('img').attr('src')
+ name = item.find('h2').text()
+ if not name:
+ name = item.find('h3').text()
+
+ # 确保使用横版海报图
+ if img:
+ if '?' in img:
+ img = img.split('?')[0]
+ # 确保使用横版图片
+ img = self.ensure_horizontal_poster(img)
+
+ vlist.append({
+ 'vod_id': vid,
+ 'vod_name': name,
+ 'vod_pic': img,
+ 'vod_remarks': '',
+ 'style': {'type': 'rect', 'ratio': 1.33} # 添加横版样式
+ })
+ return vlist
+
+ def ensure_horizontal_poster(self, img_url):
+ """
+ 确保使用横版海报图片
+ """
+ if not img_url:
+ return img_url
+
+ # 如果已经是横版图片,直接返回
+ if 'poster' in img_url or 'cover' in img_url:
+ return img_url
+
+ # 尝试转换为横版图片
+ # 常见的竖版图片标识
+ vertical_indicators = ['thumb', 'vertical', 'portrait', 'square']
+
+ # 常见的横版图片标识
+ horizontal_indicators = ['poster', 'cover', 'horizontal', 'landscape']
+
+ # 检查是否是竖版图片
+ is_vertical = any(indicator in img_url for indicator in vertical_indicators)
+
+ if is_vertical:
+ # 尝试转换为横版图片
+ for v_indicator in vertical_indicators:
+ for h_indicator in horizontal_indicators:
+ if v_indicator in img_url:
+ # 替换竖版标识为横版标识
+ new_url = img_url.replace(v_indicator, h_indicator)
+ # 检查新URL是否有效
+ try:
+ response = self.session.head(new_url, timeout=3)
+ if response.status_code == 200:
+ return new_url
+ except:
+ continue
+
+ # 如果无法转换,尝试添加横版参数
+ if '?' in img_url:
+ new_url = img_url + '&type=horizontal'
+ else:
+ new_url = img_url + '?type=horizontal'
+
+ return new_url
+
+ return img_url
+
+ def e64(self, data):
+ return b64encode(data.encode()).decode()
+
+ def d64(self, data):
+ from base64 import b64decode
+ return b64decode(data).decode()
+
+ def parse_with_aliyun(self, url):
+ try:
+ parse_result = {
+ 'parse': 1,
+ 'url': url,
+ 'header': self.headers,
+ 'parse_type': 'aliyun',
+ 'message': '使用阿里云盘解析服务'
+ }
+ return parse_result
+ except Exception as e:
+ return {'parse': 1, 'url': url, 'header': self.headers}
+
+ def parse_with_quark(self, url):
+ try:
+ parse_result = {
+ 'parse': 1,
+ 'url': url,
+ 'header': self.headers,
+ 'parse_type': 'quark',
+ 'message': '使用夸克网盘解析服务'
+ }
+ return parse_result
+ except Exception as e:
+ return {'parse': 1, 'url': url, 'header': self.headers}
+
+ def parse_with_115(self, url):
+ try:
+ parse_result = {
+ 'parse': 1,
+ 'url': url,
+ 'header': self.headers,
+ 'parse_type': '115',
+ 'message': '使用115网盘解析服务'
+ }
+ return parse_result
+ except Exception as e:
+ return {'parse': 1, 'url': url, 'header': self.headers}
+
+ def parse_with_thunder(self, url):
+ try:
+ parse_result = {
+ 'parse': 1,
+ 'url': url,
+ 'header': self.headers,
+ 'parse_type': 'thunder',
+ 'message': '使用迅雷解析服务'
+ }
+ return parse_result
+ except Exception as e:
+ return {'parse': 1, 'url': url, 'header': self.headers}
+
+ def parse_with_pikpak(self, url):
+ try:
+ parse_result = {
+ 'parse': 1,
+ 'url': url,
+ 'header': self.headers,
+ 'parse_type': 'pikpak',
+ 'message': '使用PikPak解析服务'
+ }
+ return parse_result
+ except Exception as e:
+ return {'parse': 1, 'url': url, 'header': self.headers}
\ No newline at end of file
diff --git a/PyramidStore/plugin/adult/stripchat.py b/PyramidStore/plugin/adult/stripchat.py
new file mode 100644
index 0000000..ed96695
--- /dev/null
+++ b/PyramidStore/plugin/adult/stripchat.py
@@ -0,0 +1,243 @@
+# coding=utf-8
+# !/usr/bin/python
+import sys, re
+import base64
+import hashlib
+import requests
+from typing import Tuple
+from base.spider import Spider
+from datetime import datetime, timedelta
+from urllib.parse import quote, unquote
+from urllib3.util.retry import Retry
+sys.path.append('..')
+
+# 搜索用户名,关键词格式为“类别+空格+关键词”
+# 类别在标签上已注明,比如“女主播g”,则搜索类别为“g”
+# 搜索“g per”,则在“女主播”中搜索“per”, 关键词不区分大小写,但至少3位,否则空结果
+
+class Spider(Spider):
+
+ def init(self, extend="{}"):
+ origin = 'https://zh.stripchat.com'
+ self.host = origin
+ self.headers = {
+ 'Origin': origin,
+ 'Referer': f"{origin}/",
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0'
+ }
+ self.stripchat_key = self.decode_key_compact()
+ # 缓存字典
+ self._hash_cache = {}
+ self.create_session_with_retry()
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ CLASSES = [{'type_name': '女主播g', 'type_id': 'girls'}, {'type_name': '情侣c', 'type_id': 'couples'}, {'type_name': '男主播m', 'type_id': 'men'}, {'type_name': '跨性别t', 'type_id': 'trans'}]
+ VALUE = ({'n': '中国', 'v': 'tagLanguageChinese'}, {'n': '亚洲', 'v': 'ethnicityAsian'}, {'n': '白人', 'v': 'ethnicityWhite'}, {'n': '拉丁', 'v': 'ethnicityLatino'}, {'n': '混血', 'v': 'ethnicityMultiracial'}, {'n': '印度', 'v': 'ethnicityIndian'}, {'n': '阿拉伯', 'v': 'ethnicityMiddleEastern'}, {'n': '黑人', 'v': 'ethnicityEbony'})
+ VALUE_MEN = ({'n': '情侣', 'v': 'sexGayCouples'}, {'n': '直男', 'v': 'orientationStraight'})
+ TIDS = ('girls', 'couples', 'men', 'trans')
+ filters = {
+ tid: [{'key': 'tag', 'value': VALUE_MEN + VALUE if tid == 'men' else VALUE}]
+ for tid in TIDS
+ }
+ return {
+ 'class': CLASSES,
+ 'filters': filters
+ }
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ limit = 60
+ offset = limit * (int(pg) - 1)
+ url = f"{self.host}/api/front/models?improveTs=false&removeShows=false&limit={limit}&offset={offset}&primaryTag={tid}&sortBy=stripRanking&rcmGrp=A&rbCnGr=true&prxCnGr=false&nic=false"
+ if 'tag' in extend:
+ url += "&filterGroupTags=%5B%5B%22" + extend['tag'] + "%22%5D%5D"
+ rsp = self.fetch(url).json()
+ videos = [
+ {
+ "vod_id": str(vod['username']).strip(),
+ "vod_name": f"{self.country_code_to_flag(str(vod['country']).strip())}{str(vod['username']).strip()}",
+ "vod_pic": f"https://img.doppiocdn.net/thumbs/{vod['snapshotTimestamp']}/{vod['id']}",
+ "vod_remarks": "" if vod.get('status') == "public" else "🎫"
+ }
+ for vod in rsp.get('models', [])
+ ]
+ total = int(rsp.get('filteredCount', 0))
+ return {
+ "list": videos,
+ "page": pg,
+ "pagecount": (total + limit - 1) // limit,
+ "limit": limit,
+ "total": total
+ }
+
+ def detailContent(self, array):
+ username = array[0]
+ rsp = self.fetch(f"{self.host}/api/front/v2/models/username/{username}/cam").json()
+ info = rsp['cam']
+ user = rsp['user']['user']
+ id = str(user['id'])
+ country = str(user['country']).strip()
+ isLive = "" if user['isLive'] else " 已下播"
+ flag = self.country_code_to_flag(country)
+ remark, startAt = '', ''
+ if show := info.get('show'):
+ startAt = show.get('createdAt')
+ elif show := info.get('groupShowAnnouncement'):
+ startAt = show.get('startAt')
+ if startAt:
+ BJtime = (datetime.strptime(startAt, "%Y-%m-%dT%H:%M:%SZ") + timedelta(hours=8)).strftime("%m月%d日 %H:%M")
+ remark = f"🎫 始于 {BJtime}"
+ vod = {
+ "vod_id": id,
+ "vod_name": str(info['topic']).strip(),
+ "vod_pic": str(user['avatarUrl']),
+ "vod_director": f"{flag}{username}{isLive}",
+ "vod_remarks": remark,
+ 'vod_play_from': 'StripChat',
+ 'vod_play_url': f"{id}${id}"
+ }
+ return {'list': [vod]}
+
+ def process_key(self, key: str) -> Tuple[str, str]:
+ tags = {'G': 'girls', 'C': 'couples', 'M': 'men', 'T': 'trans'}
+ parts = key.split(maxsplit=1) # 仅分割第一个空格
+ if len(parts) > 1 and (tag := tags.get(parts[0].upper())):
+ return tag, parts[1].strip()
+ return 'girls', key.strip()
+
+ def searchContent(self, key, quick, pg="1"):
+ result = {}
+ if int(pg) > 1:
+ return result
+ tag, key = self.process_key(key)
+ url = f"{self.host}/api/front/v4/models/search/group/username?query={key}&limit=900&primaryTag={tag}"
+ rsp = self.fetch(url).json()
+ result['list'] = [
+ {
+ "vod_id": str(user['username']).strip(),
+ "vod_name": f"{self.country_code_to_flag(str(user['country']).strip())}{user['username']}",
+ "vod_pic": f"https://img.doppiocdn.net/thumbs/{user['snapshotTimestamp']}/{user['id']}",
+ "vod_remarks": "" if user['status'] == "public" else "🎫"
+ }
+ for user in rsp.get('models', [])
+ if user['isLive'] # 过滤条件
+ ]
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = f"https://edge-hls.doppiocdn.net/hls/{id}/master/{id}_auto.m3u8?playlistType=lowLatency"
+ rsp = self.fetch(url)
+ lines = rsp.text.strip().split('\n')
+ psch, pkey = '', ''
+ url = []
+ for i, line in enumerate(lines):
+ if line.startswith('#EXT-X-MOUFLON:'):
+ if parts := line.split(':'):
+ if len(parts) >= 4:
+ psch, pkey = parts[2], parts[3]
+ if '#EXT-X-STREAM-INF' in line:
+ name_start = line.find('NAME="') + 6
+ name_end = line.find('"', name_start)
+ qn = line[name_start:name_end]
+ # URL在下一行
+ url_base = lines[i + 1]
+ # 组合最终的URL,并加上psch和pkey参数
+ full_url = f"{url_base}&psch={psch}&pkey={pkey}"
+ proxy_url = f"{self.getProxyUrl()}&url={quote(full_url)}"
+ # 将画质和URL添加到列表中
+ url.extend([qn, proxy_url])
+ return {
+ "url": url,
+ "parse": '0',
+ "contentType": '',
+ "header": self.headers
+ }
+
+ def localProxy(self, param):
+ url = unquote(param['url'])
+ data = self.fetch(url)
+ if data.status_code == 403:
+ data = self.fetch(re.sub(r'\d+p\d*\.m3u8', '160p_blurred.m3u8', url))
+ if data.status_code != 200:
+ return [404, "text/plain", ""]
+ data = data.text
+ if "#EXT-X-MOUFLON:FILE" in data:
+ data = self.process_m3u8_content_v2(data)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def process_m3u8_content_v2(self, m3u8_content):
+ lines = m3u8_content.strip().split('\n')
+ for i, line in enumerate(lines):
+ if (line.startswith('#EXT-X-MOUFLON:FILE:') and 'media.mp4' in lines[i + 1]):
+ encrypted_data = line.split(':', 2)[2].strip()
+ try:
+ decrypted_data = self.decrypt(encrypted_data, self.stripchat_key)
+ except Exception as e:
+ decrypted_data = self.decrypt(encrypted_data, "Zokee2OhPh9kugh4")
+ lines[i + 1] = lines[i + 1].replace('media.mp4', decrypted_data)
+ return '\n'.join(lines)
+
+ def country_code_to_flag(self, country_code):
+ if len(country_code) != 2 or not country_code.isalpha():
+ return country_code
+ flag_emoji = ''.join([chr(ord(c.upper()) - ord('A') + 0x1F1E6) for c in country_code])
+ return flag_emoji
+
+ def decode_key_compact(self):
+ base64_str = "NTEgNzUgNjUgNjEgNmUgMzQgNjMgNjEgNjkgMzkgNjIgNmYgNGEgNjEgMzUgNjE="
+ decoded = base64.b64decode(base64_str).decode('utf-8')
+ key_bytes = bytes(int(hex_str, 16) for hex_str in decoded.split(" "))
+ return key_bytes.decode('utf-8')
+
+ def compute_hash(self, key: str) -> bytes:
+ """计算并缓存SHA-256哈希"""
+ if key not in self._hash_cache:
+ sha256 = hashlib.sha256()
+ sha256.update(key.encode('utf-8'))
+ self._hash_cache[key] = sha256.digest()
+ return self._hash_cache[key]
+
+ def decrypt(self, encrypted_b64: str, key: str) -> str:
+ # 修复Base64填充
+ padding = len(encrypted_b64) % 4
+ if padding:
+ encrypted_b64 += '=' * (4 - padding)
+
+ # 计算哈希并解密
+ hash_bytes = self.compute_hash(key)
+ encrypted_data = base64.b64decode(encrypted_b64)
+
+ # 异或解密
+ decrypted_bytes = bytearray()
+ for i, cipher_byte in enumerate(encrypted_data):
+ key_byte = hash_bytes[i % len(hash_bytes)]
+ decrypted_bytes.append(cipher_byte ^ key_byte)
+ return decrypted_bytes.decode('utf-8')
+
+ def create_session_with_retry(self):
+ self.session = requests.Session()
+ retry_strategy = Retry(
+ total = 3,
+ backoff_factor = 0.3,
+ status_forcelist = [429, 500, 502, 503, 504] # 需要重试的状态码
+ )
+ adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
+ self.session.mount("http://", adapter)
+ self.session.mount("https://", adapter)
+
+ def fetch(self, url):
+ return self.session.get(url, headers=self.headers, timeout=10)
diff --git a/PyramidStore/plugin/adult/今日看料.py b/PyramidStore/plugin/adult/今日看料.py
new file mode 100644
index 0000000..a1364d7
--- /dev/null
+++ b/PyramidStore/plugin/adult/今日看料.py
@@ -0,0 +1,716 @@
+# -*- coding: utf-8 -*-
+# 🌈 Love
+import json
+import random
+import re
+import sys
+import threading
+import time
+from base64 import b64decode, b64encode
+from urllib.parse import urlparse, quote
+
+import requests
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ try:self.proxies = json.loads(extend)
+ except:self.proxies = {}
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
+ 'Connection': 'keep-alive',
+ 'Cache-Control': 'no-cache',
+ }
+ # Use working dynamic URLs directly
+ self.host = self.get_working_host()
+ self.headers.update({'Origin': self.host, 'Referer': f"{self.host}/"})
+ self.log(f"使用站点: {self.host}")
+ print(f"使用站点: {self.host}")
+ pass
+
+ def getName(self):
+ return "🌈 今日看料"
+
+ def isVideoFormat(self, url):
+ # Treat direct media formats as playable without parsing
+ return any(ext in (url or '') for ext in ['.m3u8', '.mp4', '.ts'])
+
+ def manualVideoCheck(self):
+ return False
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ try:
+ response = requests.get(self.host, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'class': [], 'list': []}
+
+ data = self.getpq(response.text)
+ result = {}
+ classes = []
+
+ # 优先从导航栏获取分类
+ nav_selectors = [
+ '#navbarCollapse .navbar-nav .nav-item .nav-link',
+ '.navbar-nav .nav-item .nav-link',
+ '#nav .menu-item a',
+ '.menu .menu-item a'
+ ]
+
+ found_categories = False
+ for selector in nav_selectors:
+ for item in data(selector).items():
+ href = item.attr('href') or ''
+ name = item.text().strip()
+
+ # 过滤掉非分类链接
+ if (not href or not name or
+ href == '#' or
+ href.startswith('http') or
+ 'about' in href.lower() or
+ 'contact' in href.lower() or
+ 'tags' in href.lower() or
+ 'top' in href.lower() or
+ 'start' in href.lower() or
+ 'time' in href.lower()):
+ continue
+
+ # 确保是分类链接(包含category或明确的分类路径)
+ if '/category/' in href or any(cat in href for cat in ['/dy/', '/ks/', '/douyu/', '/hy/', '/hj/', '/tt/', '/wh/', '/asmr/', '/xb/', '/xsp/', '/rdgz/']):
+ # 处理相对路径
+ if href.startswith('/'):
+ type_id = href
+ else:
+ type_id = f'/{href}'
+
+ classes.append({
+ 'type_name': name,
+ 'type_id': type_id
+ })
+ found_categories = True
+
+ # 如果导航栏没找到,尝试从分类下拉菜单获取
+ if not found_categories:
+ category_selectors = [
+ '.category-list a',
+ '.slide-toggle + .category-list a',
+ '.menu .category-list a'
+ ]
+ for selector in category_selectors:
+ for item in data(selector).items():
+ href = item.attr('href') or ''
+ name = item.text().strip()
+
+ if href and name and href != '#':
+ if href.startswith('/'):
+ type_id = href
+ else:
+ type_id = f'/{href}'
+
+ classes.append({
+ 'type_name': name,
+ 'type_id': type_id
+ })
+ found_categories = True
+
+ # 去重
+ unique_classes = []
+ seen_ids = set()
+ for cls in classes:
+ if cls['type_id'] not in seen_ids:
+ unique_classes.append(cls)
+ seen_ids.add(cls['type_id'])
+
+ # 如果没有找到分类,创建默认分类
+ if not unique_classes:
+ unique_classes = [
+ {'type_name': '热点关注', 'type_id': '/category/rdgz/'},
+ {'type_name': '抖音', 'type_id': '/category/dy/'},
+ {'type_name': '快手', 'type_id': '/category/ks/'},
+ {'type_name': '斗鱼', 'type_id': '/category/douyu/'},
+ {'type_name': '虎牙', 'type_id': '/category/hy/'},
+ {'type_name': '花椒', 'type_id': '/category/hj/'},
+ {'type_name': '推特', 'type_id': '/category/tt/'},
+ {'type_name': '网红', 'type_id': '/category/wh/'},
+ {'type_name': 'ASMR', 'type_id': '/category/asmr/'},
+ {'type_name': 'X播', 'type_id': '/category/xb/'},
+ {'type_name': '小视频', 'type_id': '/category/xsp/'}
+ ]
+
+ result['class'] = unique_classes
+ result['list'] = self.getlist(data('#index article a, #archive article a'))
+ return result
+
+ except Exception as e:
+ print(f"homeContent error: {e}")
+ return {'class': [], 'list': []}
+
+ def homeVideoContent(self):
+ try:
+ response = requests.get(self.host, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ return {'list': []}
+ data = self.getpq(response.text)
+ return {'list': self.getlist(data('#index article a, #archive article a'))}
+ except Exception as e:
+ print(f"homeVideoContent error: {e}")
+ return {'list': []}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ try:
+ # 修复URL构建 - 去除多余的斜杠
+ base_url = tid.lstrip('/').rstrip('/')
+ if pg and pg != '1':
+ url = f"{self.host}{base_url}/{pg}/"
+ else:
+ url = f"{self.host}{base_url}/"
+
+ print(f"分类页面URL: {url}")
+
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ print(f"分类页面请求失败: {response.status_code}")
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ data = self.getpq(response.text)
+ videos = self.getlist(data('#archive article a, #index article a, .post-card'), tid)
+
+ # 如果没有找到视频,尝试其他选择器
+ if not videos:
+ videos = self.getlist(data('article a, .post a, .entry-title a'), tid)
+
+ print(f"找到 {len(videos)} 个视频")
+
+ # 改进的页数检测逻辑
+ pagecount = self.detect_page_count(data, pg)
+
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = pagecount
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ except Exception as e:
+ print(f"categoryContent error: {e}")
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ def tagContent(self, tid, pg, filter, extend):
+ """标签页面内容"""
+ try:
+ # 修复URL构建 - 去除多余的斜杠
+ base_url = tid.lstrip('/').rstrip('/')
+ if pg and pg != '1':
+ url = f"{self.host}{base_url}/{pg}/"
+ else:
+ url = f"{self.host}{base_url}/"
+
+ print(f"标签页面URL: {url}")
+
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+ if response.status_code != 200:
+ print(f"标签页面请求失败: {response.status_code}")
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ data = self.getpq(response.text)
+ videos = self.getlist(data('#archive article a, #index article a, .post-card'), tid)
+
+ # 如果没有找到视频,尝试其他选择器
+ if not videos:
+ videos = self.getlist(data('article a, .post a, .entry-title a'), tid)
+
+ print(f"找到 {len(videos)} 个标签相关视频")
+
+ # 页数检测
+ pagecount = self.detect_page_count(data, pg)
+
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = pagecount
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ except Exception as e:
+ print(f"tagContent error: {e}")
+ return {'list': [], 'page': pg, 'pagecount': 1, 'limit': 90, 'total': 0}
+
+ def detect_page_count(self, data, current_page):
+ """改进的页数检测方法"""
+ pagecount = 99999 # 默认大数字,允许无限翻页
+
+ # 方法1: 检查分页器中的所有页码链接
+ page_numbers = []
+
+ # 查找所有可能的页码链接
+ page_selectors = [
+ '.page-navigator a',
+ '.pagination a',
+ '.pages a',
+ '.page-numbers a'
+ ]
+
+ for selector in page_selectors:
+ for page_link in data(selector).items():
+ href = page_link.attr('href') or ''
+ text = page_link.text().strip()
+
+ # 从href中提取页码
+ if href:
+ # 匹配 /category/dy/2/ 这种格式
+ match = re.search(r'/(\d+)/?$', href.rstrip('/'))
+ if match:
+ page_num = int(match.group(1))
+ if page_num not in page_numbers:
+ page_numbers.append(page_num)
+
+ # 从文本中提取数字页码
+ if text and text.isdigit():
+ page_num = int(text)
+ if page_num not in page_numbers:
+ page_numbers.append(page_num)
+
+ # 如果有找到页码,取最大值
+ if page_numbers:
+ max_page = max(page_numbers)
+ print(f"从分页器检测到最大页码: {max_page}")
+ return max_page
+
+ # 方法2: 检查是否存在"下一页"按钮
+ next_selectors = [
+ '.page-navigator .next',
+ '.pagination .next',
+ '.next-page',
+ 'a:contains("下一页")'
+ ]
+
+ for selector in next_selectors:
+ if data(selector):
+ print("检测到下一页按钮,允许继续翻页")
+ return 99999
+
+ # 方法3: 如果当前页视频数量很少,可能没有下一页
+ if len(data('#archive article, #index article, .post-card')) < 5:
+ print("当前页内容较少,可能没有下一页")
+ return int(current_page)
+
+ print("使用默认页数: 99999")
+ return 99999
+
+ def detailContent(self, ids):
+ try:
+ url = f"{self.host}{ids[0]}" if not ids[0].startswith('http') else ids[0]
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ return {'list': [{'vod_play_from': '今日看料', 'vod_play_url': f'页面加载失败${url}'}]}
+
+ data = self.getpq(response.text)
+ vod = {'vod_play_from': '今日看料'}
+
+ # 获取标题
+ title_selectors = ['.post-title', 'h1.entry-title', 'h1', '.post-card-title']
+ for selector in title_selectors:
+ title_elem = data(selector)
+ if title_elem:
+ vod['vod_name'] = title_elem.text().strip()
+ break
+
+ if 'vod_name' not in vod:
+ vod['vod_name'] = '今日看料视频'
+
+ # 获取内容/描述
+ try:
+ clist = []
+ if data('.tags .keywords a'):
+ for k in data('.tags .keywords a').items():
+ title = k.text()
+ href = k.attr('href')
+ if title and href:
+ # 使href相对路径
+ if href.startswith(self.host):
+ href = href.replace(self.host, '')
+ clist.append('[a=cr:' + json.dumps({'id': href, 'name': title}) + '/]' + title + '[/a]')
+ vod['vod_content'] = ' '.join(clist) if clist else data('.post-content').text() or vod['vod_name']
+ except:
+ vod['vod_content'] = vod['vod_name']
+
+ # 获取视频URLs
+ try:
+ plist = []
+ used_names = set()
+
+ # 查找DPlayer视频
+ if data('.dplayer'):
+ for c, k in enumerate(data('.dplayer').items(), start=1):
+ config_attr = k.attr('data-config')
+ if config_attr:
+ try:
+ config = json.loads(config_attr)
+ video_url = config.get('video', {}).get('url', '')
+ if video_url:
+ name = f"视频{c}"
+ count = 2
+ while name in used_names:
+ name = f"视频{c}_{count}"
+ count += 1
+ used_names.add(name)
+ self.log(f"解析到视频: {name} -> {video_url}")
+ print(f"解析到视频: {name} -> {video_url}")
+ plist.append(f"{name}${video_url}")
+ except:
+ continue
+
+ # 查找视频标签
+ if not plist:
+ video_selectors = ['video source', 'video', 'iframe[src*="video"]', 'a[href*=".m3u8"]', 'a[href*=".mp4"]']
+ for selector in video_selectors:
+ for c, elem in enumerate(data(selector).items(), start=1):
+ src = elem.attr('src') or elem.attr('href') or ''
+ if src and any(ext in src for ext in ['.m3u8', '.mp4', 'video']):
+ name = f"视频{c}"
+ count = 2
+ while name in used_names:
+ name = f"视频{c}_{count}"
+ count += 1
+ used_names.add(name)
+ plist.append(f"{name}${src}")
+
+ if plist:
+ self.log(f"拼装播放列表,共{len(plist)}个")
+ print(f"拼装播放列表,共{len(plist)}个")
+ vod['vod_play_url'] = '#'.join(plist)
+ else:
+ vod['vod_play_url'] = f"正片${url}"
+
+ except Exception as e:
+ print(f"视频解析错误: {e}")
+ vod['vod_play_url'] = f"正片${url}"
+
+ return {'list': [vod]}
+
+ except Exception as e:
+ print(f"detailContent error: {e}")
+ return {'list': [{'vod_play_from': '今日看料', 'vod_play_url': f'详情页加载失败${ids[0] if ids else ""}'}]}
+
+ def searchContent(self, key, quick, pg="1"):
+ try:
+ # 优先使用标签搜索
+ encoded_key = quote(key)
+ url = f"{self.host}/tag/{encoded_key}/{pg}" if pg != "1" else f"{self.host}/tag/{encoded_key}/"
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ # 尝试搜索页面
+ url = f"{self.host}/search/{encoded_key}/{pg}" if pg != "1" else f"{self.host}/search/{encoded_key}/"
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ return {'list': [], 'page': pg}
+
+ data = self.getpq(response.text)
+ videos = self.getlist(data('#archive article a, #index article a, .post-card'))
+
+ # 使用改进的页数检测方法
+ pagecount = self.detect_page_count(data, pg)
+
+ return {'list': videos, 'page': pg, 'pagecount': pagecount}
+
+ except Exception as e:
+ print(f"searchContent error: {e}")
+ return {'list': [], 'page': pg}
+
+ def getTagsContent(self, pg="1"):
+ """获取标签页面内容"""
+ try:
+ url = f"{self.host}/tags.html"
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=15)
+
+ if response.status_code != 200:
+ return {'list': [], 'page': pg}
+
+ data = self.getpq(response.text)
+ tags = []
+
+ # 从标签页面提取所有标签 - 使用更宽松的选择器
+ for tag_elem in data('a[href*="/tag/"]').items():
+ tag_name = tag_elem.text().strip()
+ tag_href = tag_elem.attr('href') or ''
+
+ if tag_name and tag_href and '/tag/' in tag_href and tag_name != '全部标签': # 排除标题链接
+ # 处理为相对路径
+ tag_id = tag_href.replace(self.host, '')
+ if not tag_id.startswith('/'):
+ tag_id = '/' + tag_id
+
+ tags.append({
+ 'vod_id': tag_id,
+ 'vod_name': f"🏷️ {tag_name}",
+ 'vod_pic': '',
+ 'vod_remarks': '标签',
+ 'vod_tag': 'tag',
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+
+ print(f"找到 {len(tags)} 个标签")
+
+ # 分页处理 - 标签页面通常不需要分页
+ result = {}
+ result['list'] = tags
+ result['page'] = pg
+ result['pagecount'] = 1 # 标签页面通常只有一页
+ result['limit'] = 999
+ result['total'] = len(tags)
+ return result
+
+ except Exception as e:
+ print(f"getTagsContent error: {e}")
+ return {'list': [], 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ p = 1
+ if self.isVideoFormat(url):
+ if '.m3u8' in url:
+ url = self.proxy(url)
+ p = 0
+ self.log(f"播放请求: parse={p}, url={url}")
+ print(f"播放请求: parse={p}, url={url}")
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ try:
+ if param.get('type') == 'img':
+ img_url = self.d64(param['url'])
+ if not img_url.startswith(('http://', 'https://')):
+ if img_url.startswith('/'):
+ img_url = f"{self.host}{img_url}"
+ else:
+ img_url = f"{self.host}/{img_url}"
+
+ res = requests.get(img_url, headers=self.headers, proxies=self.proxies, timeout=10)
+ return [200, res.headers.get('Content-Type', 'image/jpeg'), res.content]
+ elif param.get('type') == 'm3u8':
+ return self.m3Proxy(param['url'])
+ else:
+ return self.tsProxy(param['url'])
+ except Exception as e:
+ print(f"localProxy error: {e}")
+ return [500, "text/plain", f"Proxy error: {str(e)}".encode()]
+
+ def proxy(self, data, type='m3u8'):
+ if data and len(self.proxies):
+ return f"{self.getProxyUrl()}&url={self.e64(data)}&type={type}"
+ else:
+ return data
+
+ def m3Proxy(self, url):
+ try:
+ url = self.d64(url)
+ ydata = requests.get(url, headers=self.headers, proxies=self.proxies, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = requests.get(url, headers=self.headers, proxies=self.proxies).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ iskey = True
+ for index, string in enumerate(lines):
+ if iskey and 'URI' in string:
+ pattern = r'URI="([^"]*)"'
+ match = re.search(pattern, string)
+ if match:
+ lines[index] = re.sub(pattern, f'URI="{self.proxy(match.group(1), "mkey")}"', string)
+ iskey = False
+ continue
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain = last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ lines[index] = self.proxy(string, string.split('.')[-1].split('?')[0])
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+ except Exception as e:
+ print(f"m3Proxy error: {e}")
+ return [500, "text/plain", f"m3u8 proxy error: {str(e)}".encode()]
+
+ def tsProxy(self, url):
+ try:
+ url = self.d64(url)
+ data = requests.get(url, headers=self.headers, proxies=self.proxies, stream=True)
+ return [200, data.headers.get('Content-Type', 'video/mp2t'), data.content]
+ except Exception as e:
+ print(f"tsProxy error: {e}")
+ return [500, "text/plain", f"ts proxy error: {str(e)}".encode()]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def get_working_host(self):
+ """Get working host from known dynamic URLs"""
+ dynamic_urls = [
+ 'https://kanliao2.one/',
+ 'https://kanliao7.org/',
+ 'https://kanliao7.net/',
+ 'https://kanliao14.com/'
+ ]
+
+ for url in dynamic_urls:
+ try:
+ response = requests.get(url, headers=self.headers, proxies=self.proxies, timeout=10)
+ if response.status_code == 200:
+ data = self.getpq(response.text)
+ articles = data('#index article a, #archive article a')
+ if len(articles) > 0:
+ self.log(f"选用可用站点: {url}")
+ print(f"选用可用站点: {url}")
+ return url
+ except Exception as e:
+ continue
+
+ self.log(f"未检测到可用站点,回退: {dynamic_urls[0]}")
+ print(f"未检测到可用站点,回退: {dynamic_urls[0]}")
+ return dynamic_urls[0]
+
+ def getlist(self, data, tid=''):
+ videos = []
+ for k in data.items():
+ a = k.attr('href')
+ b = k('h2').text() or k('.post-card-title').text() or k('.entry-title').text() or k.text()
+ c = k('span[itemprop="datePublished"]').text() or k('.post-meta, .entry-meta, time, .post-card-info').text()
+
+ # 过滤广告:检查是否包含"热搜HOT"标志
+ if self.is_advertisement(k):
+ print(f"过滤广告: {b}")
+ continue
+
+ if a and b and b.strip():
+ # 处理相对路径
+ if not a.startswith('http'):
+ if a.startswith('/'):
+ vod_id = a
+ else:
+ vod_id = f'/{a}'
+ else:
+ vod_id = a
+
+ videos.append({
+ 'vod_id': vod_id,
+ 'vod_name': b.replace('\n', ' ').strip(),
+ 'vod_pic': self.get_article_img(k),
+ 'vod_remarks': c.strip() if c else '',
+ 'vod_tag': '',
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return videos
+
+ def is_advertisement(self, article_elem):
+ """判断是否为广告(包含热搜HOT标志)"""
+ # 检查.wraps元素是否包含"热搜HOT"文本
+ hot_elements = article_elem.find('.wraps')
+ for elem in hot_elements.items():
+ if '热搜HOT' in elem.text():
+ return True
+
+ # 检查标题是否包含广告关键词
+ title = article_elem('h2').text() or article_elem('.post-card-title').text() or ''
+ ad_keywords = ['热搜HOT', '手机链接', 'DNS设置', '修改DNS', 'WIFI设置']
+ if any(keyword in title for keyword in ad_keywords):
+ return True
+
+ # 检查背景颜色是否为广告特有的渐变背景
+ style = article_elem.attr('style') or ''
+ if 'background:' in style and any(gradient in style for gradient in ['-webkit-linear-gradient', 'linear-gradient']):
+ # 进一步检查是否包含特定的广告颜色组合
+ ad_gradients = ['#ec008c,#fc6767', '#ffe259,#ffa751']
+ if any(gradient in style for gradient in ad_gradients):
+ return True
+
+ return False
+
+ def get_article_img(self, article_elem):
+ """从文章元素中提取图片,多种方式尝试"""
+ # 方式1: 从script标签中提取loadBannerDirect
+ script_text = article_elem('script').text()
+ if script_text:
+ match = re.search(r"loadBannerDirect\('([^']+)'", script_text)
+ if match:
+ url = match.group(1)
+ if not url.startswith(('http://', 'https://')):
+ if url.startswith('/'):
+ url = f"{self.host}{url}"
+ else:
+ url = f"{self.host}/{url}"
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=img"
+
+ # 方式2: 从背景图片中提取
+ bg_elem = article_elem.find('.blog-background')
+ if bg_elem:
+ style = bg_elem.attr('style') or ''
+ bg_match = re.search(r'background-image:\s*url\(["\']?([^"\'\)]+)["\']?\)', style)
+ if bg_match:
+ img_url = bg_match.group(1)
+ if img_url and not img_url.startswith('data:'):
+ if not img_url.startswith(('http://', 'https://')):
+ if img_url.startswith('/'):
+ img_url = f"{self.host}{img_url}"
+ else:
+ img_url = f"{self.host}/{img_url}"
+ return f"{self.getProxyUrl()}&url={self.e64(img_url)}&type=img"
+
+ # 方式3: 从图片标签中提取
+ img_elem = article_elem.find('img')
+ if img_elem:
+ data_src = img_elem.attr('data-src')
+ if data_src:
+ if not data_src.startswith(('http://', 'https://')):
+ if data_src.startswith('/'):
+ data_src = f"{self.host}{data_src}"
+ else:
+ data_src = f"{self.host}/{data_src}"
+ return f"{self.getProxyUrl()}&url={self.e64(data_src)}&type=img"
+
+ src = img_elem.attr('src')
+ if src:
+ if not src.startswith(('http://', 'https://')):
+ if src.startswith('/'):
+ src = f"{self.host}{src}"
+ else:
+ src = f"{self.host}/{src}"
+ return f"{self.getProxyUrl()}&url={self.e64(src)}&type=img"
+
+ return ''
+
+ def getpq(self, data):
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
\ No newline at end of file
diff --git a/PyramidStore/plugin/adult/好色TV.py b/PyramidStore/plugin/adult/好色TV.py
new file mode 100644
index 0000000..56c6087
--- /dev/null
+++ b/PyramidStore/plugin/adult/好色TV.py
@@ -0,0 +1,533 @@
+import re
+import sys
+import urllib.parse
+import threading
+import time
+import requests
+from pyquery import PyQuery as pq
+
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+ def __init__(self):
+ # 基础配置
+ self.name = '好色TV(优)'
+ self.host = 'https://hsex.icu/'
+ self.candidate_hosts = [
+ "https://hsex.icu/",
+ "https://hsex1.icu/",
+ "https://hsex.tv/"
+ ]
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
+ 'Referer': self.host
+ }
+ self.timeout = 5000
+
+ # 分类映射(关键修复:视频分类url_suffix设为空,适配list-{pg}.htm格式)
+ self.class_map = {
+ '视频': {'type_id': 'list', 'url_suffix': ''}, # 修复点1:视频分类后缀为空
+ '周榜': {'type_id': 'top7', 'url_suffix': 'top7'},
+ '月榜': {'type_id': 'top', 'url_suffix': 'top'},
+ '5分钟+': {'type_id': '5min', 'url_suffix': '5min'},
+ '10分钟+': {'type_id': 'long', 'url_suffix': 'long'}
+ }
+
+ def getName(self):
+ return self.name
+
+ def init(self, extend=""):
+ # 尝试获取最快可用域名
+ self.host = self.get_fastest_host()
+ self.headers['Referer'] = self.host
+
+ def isVideoFormat(self, url):
+ if not url:
+ return False
+ return any(fmt in url.lower() for fmt in ['.mp4', '.m3u8', '.flv', '.avi'])
+
+ def manualVideoCheck(self):
+ def check(url):
+ if not self.isVideoFormat(url):
+ return False
+ try:
+ resp = self.fetch(url, headers=self.headers, method='HEAD', timeout=3)
+ return resp.status_code in (200, 302) and 'video' in resp.headers.get('Content-Type', '')
+ except:
+ return False
+ return check
+
+ def get_fastest_host(self):
+ """测试候选域名,返回最快可用的"""
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ resp = requests.head(url, headers=self.headers, timeout=2, allow_redirects=False)
+ if resp.status_code in (200, 301, 302):
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ else:
+ results[url] = float('inf')
+ except:
+ results[url] = float('inf')
+
+ for host in self.candidate_hosts:
+ t = threading.Thread(target=test_host, args=(host,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+
+ valid_hosts = [(h, d) for h, d in results.items() if d != float('inf')]
+ return valid_hosts[0][0] if valid_hosts else self.candidate_hosts[0]
+
+ def homeContent(self, filter):
+ result = {}
+ # 构造分类列表
+ classes = []
+ for name, info in self.class_map.items():
+ classes.append({
+ 'type_name': name,
+ 'type_id': info['type_id']
+ })
+ result['class'] = classes
+
+ try:
+ # 获取首页内容
+ html = self.fetch_with_retry(self.host, retry=2, timeout=5).text
+ data = pq(html)
+
+ # 提取视频列表
+ vlist = []
+ items = data('.row .col-xs-6.col-md-3')
+ for item in items.items():
+ try:
+ title = item('h5').text().strip()
+ if not title:
+ continue
+
+ # 提取图片URL
+ style = item('.image').attr('style') or ''
+ pic_match = re.search(r'url\(["\']?([^"\']+)["\']?\)', style)
+ vod_pic = pic_match.group(1) if pic_match else ''
+ if vod_pic and not vod_pic.startswith('http'):
+ vod_pic = f"{self.host.rstrip('/')}/{vod_pic.lstrip('/')}"
+
+ # 提取时长备注
+ desc = item('.duration').text().strip() or '未知'
+
+ # 提取视频ID
+ href = item('a').attr('href') or ''
+ if not href:
+ continue
+ vod_id = href.split('/')[-1]
+ if not vod_id.endswith('.htm'):
+ vod_id += '.htm'
+
+ vlist.append({
+ 'vod_id': vod_id,
+ 'vod_name': title,
+ 'vod_pic': vod_pic,
+ 'vod_remarks': desc
+ })
+ except Exception as e:
+ print(f"解析首页视频项失败: {e}")
+ continue
+
+ result['list'] = vlist
+ except Exception as e:
+ print(f"首页解析失败: {e}")
+ result['list'] = []
+ return result
+
+ def homeVideoContent(self):
+ return []
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ try:
+ # 匹配分类信息
+ cate_info = None
+ for name, info in self.class_map.items():
+ if info['type_id'] == tid:
+ cate_info = info
+ break
+
+ if not cate_info:
+ result['list'] = []
+ return result
+
+ # 关键修复:区分视频分类与其他分类的URL格式
+ if tid == 'list': # 视频分类(type_id为list)
+ url = f"{self.host}list-{pg}.htm" # 格式:list-1.htm、list-2.htm
+ else: # 其他分类(周榜/月榜等):xxx_list-{pg}.htm
+ url = f"{self.host}{cate_info['url_suffix']}_list-{pg}.htm"
+
+ # 请求分类页
+ html = self.fetch(url, headers=self.headers, timeout=8).text
+ html = html.encode('utf-8', errors='ignore').decode('utf-8')
+ data = pq(html)
+
+ # 提取视频列表
+ vlist = []
+ items = data('.row .col-xs-6.col-md-3')
+ for item in items.items():
+ try:
+ title = item('h5').text().strip()
+ if not title:
+ continue
+
+ style = item('.image').attr('style') or ''
+ pic_match = re.search(r'url\(["\']?([^"\']+)["\']?\)', style)
+ vod_pic = pic_match.group(1) if pic_match else ''
+ if vod_pic and not vod_pic.startswith('http'):
+ vod_pic = f"{self.host.rstrip('/')}/{vod_pic.lstrip('/')}"
+
+ desc = item('.duration').text().strip() or '未知'
+
+ href = item('a').attr('href') or ''
+ if not href:
+ continue
+ vod_id = href.split('/')[-1]
+ if not vod_id.endswith('.htm'):
+ vod_id += '.htm'
+
+ vlist.append({
+ 'vod_id': vod_id,
+ 'vod_name': title,
+ 'vod_pic': vod_pic,
+ 'vod_remarks': desc
+ })
+ except Exception as e:
+ print(f"解析分类视频项失败: {e}")
+ continue
+
+ # 提取总页数
+ pagecount = 1
+ try:
+ pagination = data('.pagination1 li a')
+ page_nums = []
+ for a in pagination.items():
+ text = a.text().strip()
+ if text.isdigit():
+ page_nums.append(int(text))
+ if page_nums:
+ pagecount = max(page_nums)
+ except:
+ pagecount = 1
+
+ result['list'] = vlist
+ result['page'] = pg
+ result['pagecount'] = pagecount
+ result['limit'] = len(vlist)
+ result['total'] = 999999
+ except Exception as e:
+ print(f"分类解析失败: {e}")
+ result['list'] = []
+ result['page'] = pg
+ result['pagecount'] = 1
+ result['limit'] = 0
+ result['total'] = 0
+ return result
+
+ def detailContent(self, ids):
+ try:
+ if not ids or not ids[0]:
+ return {'list': []}
+
+ vod_id = ids[0].strip()
+ if not vod_id.endswith('.htm'):
+ vod_id += '.htm'
+ url = f"{self.host}{vod_id.lstrip('/')}"
+
+ html = self.fetch_with_retry(url, retry=2, timeout=8).text
+ html = html.encode('utf-8', errors='ignore').decode('utf-8')
+ data = pq(html)
+
+ # 提取标题
+ title = data('.panel-title, .video-title, h1').text().strip() or '未知标题'
+
+ # 提取封面图
+ vod_pic = ''
+ poster_style = data('.vjs-poster').attr('style') or ''
+ pic_match = re.search(r'url\(["\']?([^"\']+)["\']?\)', poster_style)
+ if pic_match:
+ vod_pic = pic_match.group(1)
+ if not vod_pic:
+ vod_pic = data('.video-pic img, .vjs-poster img, .thumbnail img').attr('src') or ''
+ if vod_pic and not vod_pic.startswith('http'):
+ vod_pic = f"{self.host}{vod_pic.lstrip('/')}"
+
+ # 提取时长和观看量
+ duration = '未知'
+ views = '未知'
+ info_items = data('.panel-body .col-md-3, .video-info .info-item, .info p')
+ for item in info_items.items():
+ text = item.text().strip()
+ if '时长' in text or 'duration' in text.lower():
+ duration = text.replace('时长:', '').replace('时长', '').strip()
+ elif '观看' in text or 'views' in text.lower():
+ views_match = re.search(r'(\d+\.?\d*[kK]?)次观看', text)
+ if views_match:
+ views = views_match.group(1)
+ else:
+ views = text.replace('观看:', '').replace('观看', '').strip()
+ remarks = f"{duration} | {views}"
+
+ # 提取播放地址
+ video_url = ''
+ m3u8_match = re.search(r'videoUrl\s*=\s*["\']([^"\']+\.m3u8)["\']', html)
+ if m3u8_match:
+ video_url = m3u8_match.group(1)
+ if not video_url:
+ source = data('source[src*=".m3u8"], source[src*=".mp4"]')
+ video_url = source.attr('src') or ''
+ if not video_url:
+ js_matches = re.findall(r'(https?://[^\s"\']+\.(?:m3u8|mp4))', html)
+ if js_matches:
+ video_url = js_matches[0]
+
+ if video_url and not video_url.startswith('http'):
+ video_url = f"{self.host}{video_url.lstrip('/')}"
+
+ vod = {
+ 'vod_id': vod_id,
+ 'vod_name': title,
+ 'vod_pic': vod_pic,
+ 'vod_remarks': remarks,
+ 'vod_play_from': '好色TV(优)',
+ 'vod_play_url': f'正片${video_url}' if video_url else '正片$暂无地址'
+ }
+ return {'list': [vod]}
+ except Exception as e:
+ print(f"详情解析失败: {e}")
+ return {'list': []}
+
+ def searchContent(self, key, quick, pg=1):
+ try:
+ # 关键词合法性校验
+ if not key.strip():
+ print("搜索关键词不能为空")
+ return {'list': [], 'page': int(pg), 'pagecount': 1, 'limit': 0, 'total': 0}
+
+ # 编码关键词
+ encoded_key = urllib.parse.quote(key.strip(), encoding='utf-8', errors='replace')
+
+ # 构造搜索URL
+ search_url = f"{self.host}search.htm"
+ params = {
+ 'search': encoded_key,
+ 'page': int(pg)
+ }
+
+ # 发起请求
+ resp = self.fetch(
+ url=search_url,
+ headers=self.headers,
+ params=params,
+ timeout=8
+ )
+ if resp.status_code not in (200, 302):
+ print(f"搜索页面请求失败,URL:{resp.url},状态码:{resp.status_code}")
+ return {'list': [], 'page': int(pg), 'pagecount': 1, 'limit': 0, 'total': 0}
+
+ # 处理页面内容
+ html = resp.text.encode('utf-8', errors='ignore').decode('utf-8')
+ data = pq(html)
+
+ # 检测无结果场景
+ no_result_texts = ['没有找到相关视频', '无搜索结果', 'No results found', '未找到匹配内容']
+ no_result = any(data(f'div:contains("{text}"), p:contains("{text}")').text() for text in no_result_texts)
+ if no_result:
+ print(f"搜索关键词「{key}」第{pg}页无结果")
+ return {'list': [], 'page': int(pg), 'pagecount': 1, 'limit': 0, 'total': 0}
+
+ # 解析搜索结果
+ vlist = []
+ items = data('.row .col-xs-6.col-md-3')
+ for item in items.items():
+ try:
+ title = item('h5').text().strip()
+ if not title:
+ continue
+
+ style = item('.image').attr('style') or ''
+ pic_match = re.search(r'url\(["\']?([^"\']+)["\']?\)', style)
+ vod_pic = pic_match.group(1) if pic_match else ''
+ if vod_pic and not vod_pic.startswith(('http://', 'https://')):
+ vod_pic = f"{self.host.rstrip('/')}/{vod_pic.lstrip('/')}"
+
+ desc = item('.duration').text().strip() or '未知时长'
+
+ href = item('a').attr('href') or ''
+ if not href:
+ continue
+ vod_id = href.split('/')[-1]
+ if not vod_id.endswith('.htm'):
+ vod_id += '.htm'
+
+ vlist.append({
+ 'vod_id': vod_id,
+ 'vod_name': title,
+ 'vod_pic': vod_pic,
+ 'vod_remarks': desc
+ })
+ except Exception as e:
+ print(f"解析单条搜索结果失败:{e}(跳过该条)")
+ continue
+
+ # 解析总页数
+ pagecount = 1
+ try:
+ pagination = data('.pagination1 li a')
+ page_nums = []
+ for a in pagination.items():
+ text = a.text().strip()
+ if text.isdigit():
+ page_nums.append(int(text))
+ if page_nums:
+ pagecount = max(page_nums)
+ print(f"搜索关键词「{key}」分页解析完成,共{pagecount}页")
+ except Exception as e:
+ print(f"解析分页失败(默认单页):{e}")
+ pagecount = 1
+
+ # 返回结果(修复点2:补全page键的引号,修正语法错误)
+ total = len(vlist) * pagecount
+ print(f"搜索关键词「{key}」第{pg}页处理完成,结果{len(vlist)}条,总页数{pagecount}")
+ return {
+ 'list': vlist,
+ 'page': int(pg), # 原代码此处缺少引号,导致语法错误
+ 'pagecount': pagecount,
+ 'limit': len(vlist),
+ 'total': total
+ }
+ except Exception as e:
+ print(f"搜索功能整体异常:{e}")
+ return {
+ 'list': [],
+ 'page': int(pg), 'pagecount': 1,
+ 'limit': 0,
+ 'total': 0
+ }
+
+ def playerContent(self, flag, id, vipFlags):
+ headers = self.headers.copy()
+ headers.update({
+ 'Referer': self.host,
+ 'Origin': self.host.rstrip('/'),
+ 'Host': urllib.parse.urlparse(self.host).netloc,
+ })
+
+ # 根据rule中的double设置
+ return {
+ 'parse': 1, # 根据rule中的play_parse设置
+ 'url': id,
+ 'header': headers,
+ 'double': True # 根据rule中的double设置
+ }
+
+ def localProxy(self, param):
+ try:
+ url = param['url']
+ if url and not url.startswith(('http://', 'https://')):
+ url = f"{self.host.rstrip('/')}/{url.lstrip('/')}"
+
+ img_headers = self.headers.copy()
+ img_headers.update({'Accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8'})
+
+ res = self.fetch(url, headers=img_headers, timeout=10)
+ content_type = res.headers.get('Content-Type', 'image/jpeg')
+
+ return [200, content_type, res.content]
+ except Exception as e:
+ print(f"图片代理失败: {e}")
+ return [200, 'image/jpeg', b'']
+
+ def fetch_with_retry(self, url, retry=2, timeout=5):
+ for i in range(retry + 1):
+ try:
+ resp = self.fetch(f'https://vpsdn.leuse.top/proxy?single=true&url={urllib.parse.quote(url)}',headers=self.headers, timeout=timeout)
+ if resp.status_code in (200, 301, 302):
+ return resp
+ print(f"请求{url}返回状态码{resp.status_code},重试中...")
+ except Exception as e:
+ print(f"第{i+1}次请求{url}失败: {e}")
+ if i < retry:
+ time.sleep(0.5)
+ return type('obj', (object,), {'text': '', 'status_code': 404})
+
+ def fetch(self, url, headers=None, timeout=5, method='GET', params=None):
+ headers = headers or self.headers
+ params = params or {}
+ try:
+ if method.upper() == 'GET':
+ resp = requests.get(
+ f'https://vpsdn.leuse.top/proxy?single=true&url={urllib.parse.quote(url)}',
+ headers=headers,
+ timeout=timeout,
+ allow_redirects=True,
+ params=params # 支持GET请求带参数,适配搜索分页
+ )
+ elif method.upper() == 'HEAD':
+ resp = requests.head(
+ f'https://vpsdn.leuse.top/proxy?single=true&url={urllib.parse.quote(url)}',
+ headers=headers,
+ timeout=timeout,
+ allow_redirects=False,
+ params=params
+ )
+ else:
+ resp = requests.get( # 默认GET请求,兼容其他方法调用
+ f'https://vpsdn.leuse.top/proxy?single=true&url={urllib.parse.quote(url)}',
+ headers=headers,
+ timeout=timeout,
+ allow_redirects=True,
+ params=params
+ )
+ # 自动适配编码,避免中文乱码
+ if 'charset' in resp.headers.get('Content-Type', '').lower():
+ resp.encoding = resp.apparent_encoding
+ else:
+ resp.encoding = 'utf-8'
+ return resp
+ except Exception as e:
+ print(f"网络请求失败({url}): {e}")
+ # 返回统一格式空响应,避免后续逻辑崩溃
+ return type('obj', (object,), {
+ 'text': '',
+ 'status_code': 500,
+ 'headers': {},
+ 'url': url
+ })
+
+
+# ------------------------------
+# 可选测试代码(运行时注释或删除,用于验证功能)
+# ------------------------------
+if __name__ == "__main__":
+ # 初始化爬虫
+ spider = Spider()
+ spider.init()
+
+ # 测试首页内容
+ print("=== 测试首页 ===")
+ home_data = spider.homeContent(filter='')
+ print(f"首页分类数:{len(home_data['class'])}")
+ print(f"首页视频数:{len(home_data['list'])}")
+
+ # 测试视频分类(修复后的数据获取)
+ print("\n=== 测试视频分类(第1页) ===")
+ cate_data = spider.categoryContent(tid='list', pg=1, filter='', extend='')
+ print(f"视频分类第1页视频数:{len(cate_data['list'])}")
+ print(f"视频分类总页数:{cate_data['pagecount']}")
+
+ # 测试搜索功能(修复语法错误后)
+ print("\n=== 测试搜索(关键词:测试) ===")
+ search_data = spider.searchContent(key="测试", quick=False, pg=1)
+ print(f"搜索结果数:{len(search_data['list'])}")
+ print(f"搜索总页数:{search_data['pagecount']}")
diff --git a/PyramidStore/plugin/adult/小红薯APP.py b/PyramidStore/plugin/adult/小红薯APP.py
new file mode 100644
index 0000000..adc57c7
--- /dev/null
+++ b/PyramidStore/plugin/adult/小红薯APP.py
@@ -0,0 +1,186 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import string
+import sys
+import time
+from base64 import b64decode
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = self.getdid()
+ self.token,self.phost,self.host = self.gettoken()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ hs = ['fhoumpjjih', 'dyfcbkggxn', 'rggwiyhqtg', 'bpbbmplfxc']
+
+ def homeContent(self, filter):
+ data = self.fetch(f'{self.host}/api/video/queryClassifyList?mark=4', headers=self.headers()).json()['encData']
+ data1 = self.aes(data)
+ result = {}
+ classes = []
+ for k in data1['data']:
+ classes.append({'type_name': k['classifyTitle'], 'type_id': k['classifyId']})
+ result['class'] = classes
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ path=f'/api/short/video/getShortVideos?classifyId={tid}&videoMark=4&page={pg}&pageSize=20'
+ result = {}
+ videos = []
+ data=self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
+ vdata=self.aes(data)
+ for k in vdata['data']:
+ videos.append({"vod_id": k['videoId'], 'vod_name': k.get('title'), 'vod_pic': self.getProxyUrl() + '&url=' + k['coverImg'],
+ 'vod_remarks': self.dtim(k.get('playTime')),'style': {"type": "rect", "ratio": 1.33}})
+ result["list"] = videos
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ path = f'/api/video/getVideoById?videoId={ids[0]}'
+ data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
+ v = self.aes(data)
+ d=f'{v["title"]}$auth_key={v["authKey"]}&path={v["videoUrl"]}'
+ vod = {'vod_name': v["title"], 'type_name': ''.join(v.get('tagTitles',[])),'vod_play_from': v.get('nickName') or "小红书官方", 'vod_play_url': d}
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg='1'):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ h=self.headers()
+ h['Authorization'] = h.pop('aut')
+ del h['deviceid']
+ result = {"parse": 0, "url": f"{self.host}/api/m3u8/decode/authPath?{id}", "header": h}
+ return result
+
+ def localProxy(self, param):
+ return self.action(param)
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def aes(self, word):
+ key = b64decode("SmhiR2NpT2lKSVV6STFOaQ==")
+ iv = key
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted = unpad(cipher.decrypt(b64decode(word)), AES.block_size)
+ return json.loads(decrypted.decode('utf-8'))
+
+ def dtim(self, seconds):
+ try:
+ seconds = int(seconds)
+ hours = seconds // 3600
+ remaining_seconds = seconds % 3600
+ minutes = remaining_seconds // 60
+ remaining_seconds = remaining_seconds % 60
+
+ formatted_minutes = str(minutes).zfill(2)
+ formatted_seconds = str(remaining_seconds).zfill(2)
+
+ if hours > 0:
+ formatted_hours = str(hours).zfill(2)
+ return f"{formatted_hours}:{formatted_minutes}:{formatted_seconds}"
+ else:
+ return f"{formatted_minutes}:{formatted_seconds}"
+ except:
+ return ''
+
+ def getdid(self):
+ did = self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def getsign(self):
+ t=str(int(time.time() * 1000))
+ return self.md5(t[3:8]),t
+
+ def gettoken(self, i=0, max_attempts=10):
+ if i >= len(self.hs) or i >= max_attempts:
+ return ''
+ current_domain = f"https://{''.join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(5, 10)))}.{self.hs[i]}.work"
+ try:
+ sign,t=self.getsign()
+ url = f'{current_domain}/api/user/traveler'
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/xhs/ver=1.2.6',
+ 'deviceid': self.did, 't': t, 's': sign, }
+ data = {'deviceId': self.did, 'tt': 'U', 'code': '', 'chCode': 'dafe13'}
+ data1 = self.post(url, json=data, headers=headers)
+ data1.raise_for_status()
+ data2 = data1.json()['data']
+ return data2['token'], data2['imgDomain'],current_domain
+ except:
+ return self.gettoken(i+1, max_attempts)
+
+ def headers(self):
+ sign,t=self.getsign()
+ henda = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/xhs/ver=1.2.6',
+ 'deviceid': self.did, 't': t, 's': sign, 'aut': self.token}
+ return henda
+
+ def action(self, param):
+ headers = {
+ 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'}
+ data = self.fetch(f'{self.phost}{param["url"]}', headers=headers)
+ type=data.headers.get('Content-Type').split(';')[0]
+ base64_data = self.img(data.content, 100, '2020-zq3-888')
+ return [200, type, base64_data]
+
+ def img(self, data: bytes, length: int, key: str):
+ GIF = b'\x47\x49\x46'
+ JPG = b'\xFF\xD8\xFF'
+ PNG = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
+
+ def is_dont_need_decode_for_gif(data):
+ return len(data) > 2 and data[:3] == GIF
+
+ def is_dont_need_decode_for_jpg(data):
+ return len(data) > 7 and data[:3] == JPG
+
+ def is_dont_need_decode_for_png(data):
+ return len(data) > 7 and data[1:8] == PNG[1:8]
+
+ if is_dont_need_decode_for_png(data):
+ return data
+ elif is_dont_need_decode_for_gif(data):
+ return data
+ elif is_dont_need_decode_for_jpg(data):
+ return data
+ else:
+ key_bytes = key.encode('utf-8')
+ result = bytearray(data)
+ for i in range(length):
+ result[i] ^= key_bytes[i % len(key_bytes)]
+ return bytes(result)
diff --git a/PyramidStore/plugin/adult/推特APP.py b/PyramidStore/plugin/adult/推特APP.py
new file mode 100644
index 0000000..ec4c155
--- /dev/null
+++ b/PyramidStore/plugin/adult/推特APP.py
@@ -0,0 +1,246 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import string
+import sys
+import time
+from base64 import b64decode
+from urllib.parse import quote
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = self.getdid()
+ self.token,self.phost,self.host = self.gettoken()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ hs=['wcyfhknomg','pdcqllfomw','alxhzjvean','bqeaaxzplt','hfbtpixjso']
+
+ ua='Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/twitter/ver=1.4.4'
+
+ def homeContent(self, filter):
+ data = self.fetch(f'{self.host}/api/video/classifyList', headers=self.headers()).json()['encData']
+ data1 = self.aes(data)
+ result = {'filters': {"1": [{"key": "fl", "name": "分类",
+ "value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
+ {"n": "好评榜", "v": "3"}]}], "2": [{"key": "fl", "name": "分类",
+ "value": [
+ {"n": "最近更新", "v": "1"},
+ {"n": "最多播放", "v": "2"},
+ {"n": "好评榜", "v": "3"}]}],
+ "3": [{"key": "fl", "name": "分类",
+ "value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
+ {"n": "好评榜", "v": "3"}]}], "4": [{"key": "fl", "name": "分类",
+ "value": [
+ {"n": "最近更新", "v": "1"},
+ {"n": "最多播放", "v": "2"},
+ {"n": "好评榜", "v": "3"}]}],
+ "5": [{"key": "fl", "name": "分类",
+ "value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
+ {"n": "好评榜", "v": "3"}]}], "6": [{"key": "fl", "name": "分类",
+ "value": [
+ {"n": "最近更新", "v": "1"},
+ {"n": "最多播放", "v": "2"},
+ {"n": "好评榜", "v": "3"}]}],
+ "7": [{"key": "fl", "name": "分类",
+ "value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
+ {"n": "好评榜", "v": "3"}]}], "jx": [{"key": "type", "name": "精选",
+ "value": [{"n": "日榜", "v": "1"},
+ {"n": "周榜", "v": "2"},
+ {"n": "月榜", "v": "3"},
+ {"n": "总榜",
+ "v": "4"}]}]}}
+ classes = [{'type_name': "精选", 'type_id': "jx"}]
+ for k in data1['data']:
+ classes.append({'type_name': k['classifyTitle'], 'type_id': k['classifyId']})
+ result['class'] = classes
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ path = f'/api/video/queryVideoByClassifyId?pageSize=20&page={pg}&classifyId={tid}&sortType={extend.get("fl", "1")}'
+ if 'click' in tid:
+ path = f'/api/video/queryPersonVideoByType?pageSize=20&page={pg}&userId={tid.replace("click", "")}'
+ if tid == 'jx':
+ path = f'/api/video/getRankVideos?pageSize=20&page={pg}&type={extend.get("type", "1")}'
+ data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
+ data1 = self.aes(data)['data']
+ result = {}
+ videos = []
+ for k in data1:
+ id = f'{k.get("videoId")}?{k.get("userId")}?{k.get("nickName")}'
+ if 'click' in tid:
+ id = id + 'click'
+ videos.append({"vod_id": id, 'vod_name': k.get('title'), 'vod_pic': self.getProxyUrl() + f"&url={k.get('coverImg')[0]}",
+ 'vod_remarks': self.dtim(k.get('playTime')),'style': {"type": "rect", "ratio": 1.33}})
+ result["list"] = videos
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vid = ids[0].replace('click', '').split('?')
+ path = f'/api/video/can/watch?videoId={vid[0]}'
+ data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
+ data1 = self.aes(data)['playPath']
+ clj = '[a=cr:' + json.dumps({'id': vid[1] + 'click', 'name': vid[2]}) + '/]' + vid[2] + '[/a]'
+ if 'click' in ids[0]:
+ clj = vid[2]
+ vod = {'vod_director': clj, 'vod_play_from': "推特", 'vod_play_url': vid[2] + "$" + data1}
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg='1'):
+ path = f'/api/search/keyWord?pageSize=20&page={pg}&searchWord={quote(key)}&searchType=1'
+ data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
+ data1 = self.aes(data)['videoList']
+ result = {}
+ videos = []
+ for k in data1:
+ id = f'{k.get("videoId")}?{k.get("userId")}?{k.get("nickName")}'
+ videos.append({"vod_id": id, 'vod_name': k.get('title'), 'vod_pic': self.getProxyUrl() + f"&url={k.get('coverImg')[0]}",
+ 'vod_remarks': self.dtim(k.get('playTime')), 'style': {"type": "rect", "ratio": 1.33}})
+ result["list"] = videos
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ return {"parse": 0, "url": id, "header": self.headers()}
+
+ def localProxy(self, param):
+ return self.imgs(param)
+
+ def getsign(self):
+ t = str(int(time.time() * 1000))
+ sign = self.md5(t)
+ return sign, t
+
+ def headers(self):
+ sign, t = self.getsign()
+ return {'User-Agent': self.ua,'deviceid': self.did, 't': t, 's': sign, 'aut': self.token}
+
+ def aes(self, word):
+ key = b64decode("SmhiR2NpT2lKSVV6STFOaQ==")
+ iv = key
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted = unpad(cipher.decrypt(b64decode(word)), AES.block_size)
+ return json.loads(decrypted.decode('utf-8'))
+
+ def dtim(self, seconds):
+ try:
+ seconds = int(seconds)
+ hours = seconds // 3600
+ remaining_seconds = seconds % 3600
+ minutes = remaining_seconds // 60
+ remaining_seconds = remaining_seconds % 60
+
+ formatted_minutes = str(minutes).zfill(2)
+ formatted_seconds = str(remaining_seconds).zfill(2)
+
+ if hours > 0:
+ formatted_hours = str(hours).zfill(2)
+ return f"{formatted_hours}:{formatted_minutes}:{formatted_seconds}"
+ else:
+ return f"{formatted_minutes}:{formatted_seconds}"
+ except:
+ return "666"
+
+ def gettoken(self, i=0, max_attempts=10):
+ if i >= len(self.hs) or i >= max_attempts:
+ return ''
+ current_domain = f"https://{''.join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(5, 10)))}.{self.hs[i]}.work"
+ try:
+ url = f'{current_domain}/api/user/traveler'
+ sign, t = self.getsign()
+ headers = {
+ 'User-Agent': self.ua,
+ 'Accept': 'application/json',
+ 'deviceid': self.did,
+ 't': t,
+ 's': sign,
+ }
+ data = {
+ 'deviceId': self.did,
+ 'tt': 'U',
+ 'code': '##X-4m6Goo4zzPi1hF##',
+ 'chCode': 'tt09'
+ }
+ response = self.post(url, json=data, headers=headers)
+ response.raise_for_status()
+ data1 = response.json()['data']
+ return data1['token'], data1['imgDomain'], current_domain
+ except Exception as e:
+ return self.gettoken(i + 1, max_attempts)
+
+ def getdid(self):
+ did = self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def imgs(self, param):
+ headers = {'User-Agent': self.ua}
+ url = param['url']
+ data = self.fetch(f"{self.phost}{url}",headers=headers)
+ bdata = self.img(data.content, 100, '2020-zq3-888')
+ return [200, data.headers.get('Content-Type'), bdata]
+
+ def img(self, data: bytes, length: int, key: str):
+ GIF = b'\x47\x49\x46'
+ JPG = b'\xFF\xD8\xFF'
+ PNG = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
+
+ def is_dont_need_decode_for_gif(data):
+ return len(data) > 2 and data[:3] == GIF
+
+ def is_dont_need_decode_for_jpg(data):
+ return len(data) > 7 and data[:3] == JPG
+
+ def is_dont_need_decode_for_png(data):
+ return len(data) > 7 and data[1:8] == PNG[1:8]
+
+ if is_dont_need_decode_for_png(data):
+ return data
+ elif is_dont_need_decode_for_gif(data):
+ return data
+ elif is_dont_need_decode_for_jpg(data):
+ return data
+ else:
+ key_bytes = key.encode('utf-8')
+ result = bytearray(data)
+ for i in range(length):
+ result[i] ^= key_bytes[i % len(key_bytes)]
+ return bytes(result)
diff --git a/PyramidStore/plugin/adult/浴火社APP.py b/PyramidStore/plugin/adult/浴火社APP.py
new file mode 100644
index 0000000..4698b09
--- /dev/null
+++ b/PyramidStore/plugin/adult/浴火社APP.py
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+import threading
+import time
+from base64 import b64decode, b64encode
+import requests
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = self.getdid()
+ self.token=self.gettoken()
+ domain=self.domain()
+ self.phost=self.host_late(domain['domain_preview'])
+ self.bhost=domain['domain_original']
+ self.names=domain['name_original']
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://lulu-api-92mizw.jcdwn.com'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.11.0',
+ 'referer': 'https://app.nova-traffic-1688.com',
+ }
+
+ def homeContent(self, filter):
+ BASE_CATEGORIES = [
+ {'type_name': '片商', 'type_id': 'makers'},
+ {'type_name': '演员', 'type_id': 'actor'}
+ ]
+
+ SORT_OPTIONS = {
+ 'key': 'sortby',
+ 'name': 'sortby',
+ 'value': [
+ {'n': '最新', 'v': 'on_shelf_at'},
+ {'n': '最热', 'v': 'hot'}
+ ]
+ }
+
+ tags = self.getdata('/api/v1/video/tag?current=1&pageSize=100&level=1')
+ producers = self.getdata('/api/v1/video/producer?current=1&pageSize=100&status=1')
+ regions = self.getdata('/api/v1/video/region?current=1&pageSize=100')
+ result = {'class': [], 'filters': {}}
+ result['class'].extend(BASE_CATEGORIES)
+ for category in BASE_CATEGORIES:
+ result['filters'][category['type_id']] = [SORT_OPTIONS]
+ if tags.get('data'):
+ main_tag = tags['data'][0]
+ result['class'].append({
+ 'type_name': '发现',
+ 'type_id': f'{main_tag["id"]}_tag'
+ })
+ tag_values = [
+ {'n': tag['name'], 'v': f"{tag['id']}_tag"}
+ for tag in tags['data'][1:]
+ if tag.get('id')
+ ]
+ result['filters'][f'{main_tag["id"]}_tag'] = [
+ {'key': 'tagtype', 'name': 'tagtype', 'value': tag_values},
+ SORT_OPTIONS
+ ]
+
+ region_filter = {
+ 'key': 'region_ids',
+ 'name': 'region_ids',
+ 'value': [
+ {'n': region['name'], 'v': region['id']}
+ for region in regions['data'][1:]
+ if region.get('id')
+ ]
+ }
+ self.aid=regions['data'][0]['id']
+ result['filters']['actor'].append({
+ 'key': 'region_id',
+ 'name': 'region_id',
+ 'value': region_filter['value'][:2]
+ })
+ complex_sort = {
+ 'key': 'sortby',
+ 'name': 'sortby',
+ 'value': [
+ {'n': '综合', 'v': 'complex'},
+ *SORT_OPTIONS['value']
+ ]
+ }
+ producer_filters = [region_filter, complex_sort]
+ for producer in producers['data']:
+ result['class'].append({
+ 'type_name': producer['name'],
+ 'type_id': f'{producer["id"]}_sx'
+ })
+ result['filters'][f'{producer["id"]}_sx'] = producer_filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.getdata('/api/v1/video?current=1&pageSize=60®ion_ids=&sortby=complex')
+ return {'list':self.getlist(data)}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ if 'act' in tid:
+ data=self.getact(tid, pg, filter, extend)
+ elif 'tag' in tid:
+ data=self.gettag(tid, pg, filter, extend)
+ elif 'sx' in tid:
+ data=self.getsx(tid, pg, filter, extend)
+ elif 'make' in tid:
+ data=self.getmake(tid, pg, filter, extend)
+ result = {}
+ result['list'] = data
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ v=self.getdata(f'/api/v1/video?current=1&pageSize=1&id={ids[0]}&detail=1')
+ v=v['data'][0]
+ vod = {
+ 'vod_name': v.get('title'),
+ 'type_name': '/'.join(v.get('tag_names',[])),
+ 'vod_play_from': '浴火社',
+ 'vod_play_url': ''
+ }
+ p=[]
+ for i,j in enumerate(self.bhost):
+ p.append(f'{self.names[i]}${j}{v.get("highres_url") or v.get("preview_url")}@@@{v["id"]}')
+ vod['vod_play_url'] = '#'.join(p)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getdata(f'/api/v1/video?current={pg}&pageSize=30&title={key}')
+ return {'list':self.getlist(data),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ url=f'{self.getProxyUrl()}&url={self.e64(id)}&type=m3u8'
+ return {'parse': 0, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ if param.get('type')=='image':
+ data=self.fetch(param.get('url'), headers=self.headers).text
+ content=b64decode(data.encode('utf-8'))
+ return [200, 'image/png', content]
+ if param.get('type')=='m3u8':
+ ids=self.d64(param.get('url')).split('@@@')
+ data=self.fetch(ids[0], headers=self.headers).text
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if 'URI=' in string:
+ replacement = f'URI="{self.getProxyUrl()}&id={ids[1]}&type=mkey"'
+ lines[index]=re.sub(r'URI="[^"]+"', replacement, string)
+ continue
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = ids[0].rfind('/')
+ lpath = ids[0][:last_slash_index + 1]
+ lines[index] = f'{lpath}{string}'
+ data = '\n'.join(lines)
+ return [200, 'audio/x-mpegurl', data]
+ if param.get('type')=='mkey':
+ id=param.get('id')
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'authdog': self.token
+ }
+ response = self.fetch(f'{self.host}/api/v1/video/key/{id}', headers=headers)
+ type=response.headers.get('Content-Type')
+ return [200, type, response.content]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def getdid(self):
+ did = self.md5(str(int(time.time() * 1000)))
+ try:
+ if self.getCache('did'):
+ return self.getCache('did')
+ else:
+ self.setCache('did', did)
+ return did
+ except Exception as e:
+ self.setCache('did', did)
+ return did
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def domain(self):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ }
+ response = self.fetch(f'{self.host}/api/v1/system/domain', headers=headers)
+ return self.aes(response.content)
+
+ def aes(self, word):
+ key = b64decode("amtvaWc5ZnJ2Ym5taml1eQ==")
+ iv = b64decode("AAEFAwQFCQcICQoLDA0ODw==")
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted = unpad(cipher.decrypt(word), AES.block_size)
+ return json.loads(decrypted.decode('utf-8'))
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def gettoken(self):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'cookei': self.md5(f'{self.did}+android'),
+ 'siteid': '11',
+ 'siteauthority': 'lls888.tv'
+ }
+
+ json_data = {
+ 'app_id': 'jukjoe.zqgpi.hfzvde.sdot',
+ 'phone_device': 'Redmi M2012K10C',
+ 'device_id': self.did,
+ 'device_type': 'android',
+ 'invite_code': 'oi1o',
+ 'is_first': 1,
+ 'os_version': '11',
+ 'version': '8.59',
+ }
+ response = self.post(f'{self.host}/api/v1/member/device', headers=headers, json=json_data)
+ tdata = self.aes(response.content)
+ return f'{tdata["token_type"]} {tdata["access_token"]}'
+
+ def getdata(self, path):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'authdog': self.token
+ }
+ response = self.fetch(f'{self.host}{path}', headers=headers)
+ return self.aes(response.content)
+
+ def getimg(self, path):
+ if not path.startswith('/'):
+ path = f'/{path}'
+ return f'{self.getProxyUrl()}&url={self.phost}{path}&type=image'
+
+ def getlist(self,data):
+ videos = []
+ for i in data['data']:
+ videos.append({
+ 'vod_id': i['id'],
+ 'vod_name': i['title'],
+ 'vod_pic': self.getimg(i.get('coverphoto_h' or i.get('coverphoto_v'))),
+ 'style': {"type": "rect", "ratio": 1.33}})
+ return videos
+
+ def geticon(self, data, st='',style=None):
+ if style is None:style = {"type": "oval"}
+ videos = []
+ for i in data['data']:
+ videos.append({
+ 'vod_id': f'{i["id"]}{st}',
+ 'vod_name': i['name'],
+ 'vod_pic': self.getimg(i.get('icon_path')),
+ 'vod_tag': 'folder',
+ 'style': style})
+ return videos
+
+ def getact(self, tid, pg, filter, extend):
+ if tid == 'actor' and pg=='1':
+ data = self.getdata(f'/api/v1/video/actor?current=1&pageSize=999®ion_id={extend.get("region_id",self.aid)}&discover_page={pg}')
+ return self.geticon(data, '_act')
+ elif '_act' in tid:
+ data = self.getdata(f'/api/v1/video?current={pg}&pageSize=50&actor_ids={tid.split("_")[0]}&sortby={extend.get("sortby","on_shelf_at")}')
+ return self.getlist(data)
+
+ def gettag(self, tid, pg, filter, extend):
+ if '_tag' in tid:
+ tid=extend.get('tagtype',tid)
+ data=self.getdata(f'/api/v1/video/tag?current={pg}&pageSize=100&level=2&parent_id={tid.split("_")[0]}')
+ return self.geticon(data, '_stag',{"type": "rect", "ratio": 1.33})
+ elif '_stag' in tid:
+ data = self.getdata(f'/api/v1/video?current={pg}&pageSize=50&tag_ids={tid.split("_")[0]}&sortby={extend.get("sortby","on_shelf_at")}')
+ return self.getlist(data)
+
+ def getsx(self, tid, pg, filter, extend):
+ data=self.getdata(f'/api/v1/video?current={pg}&pageSize=20&producer_ids={tid.split("_")[0]}®ion_ids={extend.get("region_ids","")}&sortby={extend.get("sortby","complex")}')
+ return self.getlist(data)
+
+ def getmake(self, tid, pg, filter, extend):
+ if pg=='1':
+ data=self.getdata('/api/v1/video/producer?current=1&pageSize=100&status=1')
+ return self.geticon(data, '_sx',{"type": "rect", "ratio": 1.33})
+
diff --git a/PyramidStore/plugin/adult/花都.py b/PyramidStore/plugin/adult/花都.py
new file mode 100644
index 0000000..b1209f3
--- /dev/null
+++ b/PyramidStore/plugin/adult/花都.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+import threading
+import time
+from base64 import b64encode, b64decode
+from urllib.parse import urlparse
+import requests
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ '''
+ 如果一直访问不了,手动访问导航页:https://a.hdys.top,替换:
+ self.host = 'https://xxx.xxx.xxx'
+ '''
+ self.session = requests.Session()
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"Android"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'dnt': '1',
+ 'sec-ch-ua-mobile': '?1',
+ 'sec-fetch-site': 'same-origin',
+ 'sec-fetch-mode': 'no-cors',
+ 'sec-fetch-dest': 'script',
+ 'accept-language': 'zh-CN,zh;q=0.9',
+ 'priority': 'u=2',
+ }
+ try:self.proxies = json.loads(extend)
+ except:self.proxies = {}
+ self.hsot=self.gethost()
+ # self.hsot='https://hd.hdys2.com'
+ self.headers.update({'referer': f"{self.hsot}/"})
+ self.session.proxies.update(self.proxies)
+ self.session.headers.update(self.headers)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ pheader={
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"Android"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'dnt': '1',
+ 'sec-ch-ua-mobile': '?1',
+ 'origin': 'https://jx.8852.top',
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'accept-language': 'zh-CN,zh;q=0.9',
+ 'priority': 'u=1, i',
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq(self.session.get(self.hsot))
+ cdata=data('.stui-header__menu.type-slide li')
+ ldata=data('.stui-vodlist.clearfix li')
+ result = {}
+ classes = []
+ for k in cdata.items():
+ i=k('a').attr('href')
+ if i and 'type' in i:
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': re.search(r'\d+', i).group(0)
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(ldata)
+ return result
+
+ def homeVideoContent(self):
+ return {'list':''}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.getpq(self.session.get(f"{self.hsot}/vodshow/{tid}--------{pg}---.html"))
+ result = {}
+ result['list'] = self.getlist(data('.stui-vodlist.clearfix li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.getpq(self.session.get(f"{self.hsot}{ids[0]}"))
+ v=data('.stui-vodlist__box a')
+ vod = {
+ 'vod_play_from': '花都影视',
+ 'vod_play_url': f"{v('img').attr('alt')}${v.attr('href')}"
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getpq(self.session.get(f"{self.hsot}/vodsearch/{key}----------{pg}---.html"))
+ return {'list':self.getlist(data('.stui-vodlist.clearfix li')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ data=self.getpq(self.session.get(f"{self.hsot}{id}"))
+ jstr=data('.stui-player.col-pd script').eq(0).text()
+ jsdata=json.loads(jstr.split("=", maxsplit=1)[-1])
+ p,url=0,jsdata['url']
+ if '.m3u8' in url:url=self.proxy(url,'m3u8')
+ except Exception as e:
+ print(f"{str(e)}")
+ p,url=1,f"{self.hsot}{id}"
+ return {'parse': p, 'url': url, 'header': self.pheader}
+
+ def liveContent(self, url):
+ pass
+
+ def localProxy(self, param):
+ url = self.d64(param['url'])
+ if param.get('type') == 'm3u8':
+ return self.m3Proxy(url)
+ else:
+ return self.tsProxy(url,param['type'])
+
+ def gethost(self):
+ params = {
+ 'v': '1',
+ }
+ self.headers.update({'referer': 'https://a.hdys.top/'})
+ response = self.session.get('https://a.hdys.top/assets/js/config.js',proxies=self.proxies, params=params, headers=self.headers)
+ return self.host_late(response.text.split(';')[:-4])
+
+ def getlist(self,data):
+ videos=[]
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('img').attr('alt'),
+ 'vod_pic': self.proxy(i('img').attr('data-original')),
+ 'vod_year': i('.pic-tag-t').text(),
+ 'vod_remarks': i('.pic-tag-b').text()
+ })
+ return videos
+
+ def getpq(self, data):
+ try:
+ return pq(data.text)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.text.encode('utf-8'))
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ url=re.findall(r'"([^"]*)"', url)[0]
+ start_time = time.time()
+ self.headers.update({'referer': f'{url}/'})
+ response = requests.head(url,proxies=self.proxies,headers=self.headers,timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def m3Proxy(self, url):
+ ydata = requests.get(url, headers=self.pheader, proxies=self.proxies, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = requests.get(url, headers=self.pheader, proxies=self.proxies).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ for index, string in enumerate(lines):
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain=last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ lines[index] = self.proxy(string, string.split('.')[-1].split('?')[0])
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def tsProxy(self, url,type):
+ h=self.pheader.copy()
+ if type=='img':h=self.headers.copy()
+ data = requests.get(url, headers=h, proxies=self.proxies, stream=True)
+ return [200, data.headers['Content-Type'], data.content]
+
+ def proxy(self, data, type='img'):
+ if data and len(self.proxies):return f"{self.getProxyUrl()}&url={self.e64(data)}&type={type}"
+ else:return data
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
diff --git a/PyramidStore/plugin/adult/香蕉APP.py b/PyramidStore/plugin/adult/香蕉APP.py
new file mode 100644
index 0000000..277fc08
--- /dev/null
+++ b/PyramidStore/plugin/adult/香蕉APP.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import random
+import string
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host,self.headers = self.getat()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data=self.fetch(f'{self.host}/vod/listing-0-0-0-0-0-0-0-0-0-0',headers=self.headers).json()
+ result = {}
+ classes = [{
+ 'type_name': '全部',
+ 'type_id': '0'
+ }]
+ filters = {}
+ ft=[]
+ filter_keys = ['orders', 'areas', 'years', 'definitions', 'durations', 'mosaics', 'langvoices']
+ for key in filter_keys:
+ if key in data['data']:
+ filter_item = {
+ 'key': key,
+ 'name': key,
+ 'value': []
+ }
+ for item in data['data'][key]:
+ first_two = dict(list(item.items())[:2])
+ filter_item['value'].append({
+ 'v': list(first_two.values())[0],
+ 'n': list(first_two.values())[1]
+ })
+ ft.append(filter_item)
+ filters['0']=ft
+ for k in data['data']['categories']:
+ classes.append({
+ 'type_name': k['catename'],
+ 'type_id': k['cateid']
+ })
+ filters[k['cateid']]=ft
+
+ result['class'] = classes
+ result['filters'] =filters
+ result['list'] = self.getlist(data['data']['vodrows'])
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.fetch(f'{self.host}/vod/listing-{tid}-{extend.get("areas","0")}-{extend.get("years","0")}-1-{extend.get("definitions","0")}-{extend.get("durations","0")}-{extend.get("mosaics","0")}-{extend.get("langvoices","0")}-{extend.get("orders","0")}-{pg}',headers=self.headers).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['vodrows'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f'{self.host}/vod/reqplay/{ids[0]}',headers=self.headers).json()
+ vod = {
+ 'vod_play_from': data['errmsg'],
+ 'vod_play_url': '#'.join([f"{i['hdtype']}${i['httpurl']}" for i in data['data']['httpurls']]),
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.host}/search?page={pg}&wd={key}',headers=self.headers).json()
+ return {'list':self.getlist(data['data']['vodrows']),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'parse': 0, 'url': id, 'header': {'User-Agent':'ExoPlayer'}}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self,data):
+ vlist=[]
+ for i in data:
+ if i['isvip'] !='1':
+ vlist.append({
+ 'vod_id': i['vodid'],
+ 'vod_name': i['title'],
+ 'vod_pic': i['coverpic'],
+ 'vod_year': i.get('duration'),
+ 'vod_remarks': i.get('catename'),
+ 'style': {"type": "rect", "ratio": 1.33}
+ })
+ return vlist
+
+ def getat(self):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'x-auth-uuid': self.random_str(32),
+ 'x-system': 'Android',
+ 'x-version': '5.0.5',
+ 'x-channel': 'xj2',
+ 'x-requested-with': 'com.uyvzkv.pnjzdv',
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
+ }
+ host=f'https://{self.random_str(6)}.bjhpz.com'
+ data=self.fetch(f'{host}/init',headers=headers).json()
+ headers.update({'x-cookie-auth': data['data']['globalData'].get('xxx_api_auth')})
+ return host,headers
+
+ def random_str(self,length=16):
+ chars = string.ascii_lowercase + string.digits
+ return ''.join(random.choice(chars) for _ in range(length))
+
diff --git a/PyramidStore/plugin/adult/黑料不打样.py b/PyramidStore/plugin/adult/黑料不打样.py
new file mode 100644
index 0000000..15fc547
--- /dev/null
+++ b/PyramidStore/plugin/adult/黑料不打样.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+import json,re,sys,base64,requests
+from Crypto.Cipher import AES
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+ SELECTORS=['.video-item','.video-list .item','.list-item','.post-item']
+ def getName(self):return"黑料不打烊"
+ def init(self,extend=""):pass
+ def homeContent(self,filter):
+ cateManual={"最新黑料":"hlcg","今日热瓜":"jrrs","每日TOP10":"mrrb","周报精选":"zbjx","月榜热瓜":"ybrg","反差女友":"fczq","校园黑料":"xycg","网红黑料":"whhl","明星丑闻":"mxcw","原创社区":"ycsq","推特社区":"ttsq","社会新闻":"shxw","官场爆料":"gchl","影视短剧":"ysdj","全球奇闻":"qqqw","黑料课堂":"hlkt","每日大赛":"mrds","激情小说":"jqxs","桃图杂志":"ttzz","深夜综艺":"syzy","独家爆料":"djbl"}
+ return{'class':[{'type_name':k,'type_id':v}for k,v in cateManual.items()]}
+ def homeVideoContent(self):return{}
+ def categoryContent(self,tid,pg,filter,extend):
+ url=f'https://heiliao.com/{tid}/'if int(pg)==1 else f'https://heiliao.com/{tid}/page/{pg}/'
+ videos=self.get_list(url)
+ return{'list':videos,'page':pg,'pagecount':9999,'limit':90,'total':999999}
+ def fetch_and_decrypt_image(self,url):
+ try:
+ if url.startswith('//'):url='https:'+url
+ elif url.startswith('/'):url='https://heiliao.com'+url
+ r=requests.get(url,headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36','Referer':'https://heiliao.com/'},timeout=15,verify=False)
+ if r.status_code!=200:return b''
+ return AES.new(b'f5d965df75336270',AES.MODE_CBC,b'97b60394abc2fbe1').decrypt(r.content)
+ except: return b''
+ def _extract_img_from_onload(self,node):
+ try:
+ m=re.search(r"load(?:Share)?Img\s*\([^,]+,\s*['\"]([^'\"]+)['\"]",(node.attr('onload')or''))
+ return m.group(1)if m else''
+ except:return''
+ def _should_decrypt(self,url:str)->bool:
+ u=(url or'').lower();return any(x in u for x in['pic.gylhaa.cn','new.slfpld.cn','/upload_01/','/upload/'])
+ def _abs(self,u:str)->str:
+ if not u:return''
+ if u.startswith('//'):return'https:'+u
+ if u.startswith('/'):return'https://heiliao.com'+u
+ return u
+ def e64(self,s:str)->str:
+ try:return base64.b64encode((s or'').encode()).decode()
+ except:return''
+ def d64(self,s:str)->str:
+ try:return base64.b64decode((s or'').encode()).decode()
+ except:return''
+ def _img(self,img_node):
+ u=''if img_node is None else(img_node.attr('src')or img_node.attr('data-src')or'')
+ enc=''if img_node is None else self._extract_img_from_onload(img_node)
+ t=enc or u
+ return f"{self.getProxyUrl()}&url={self.e64(t)}&type=hlimg"if t and(enc or self._should_decrypt(t))else self._abs(t)
+ def _parse_items(self,root):
+ vids=[]
+ for sel in self.SELECTORS:
+ for it in root(sel).items():
+ title=it.find('.title, h3, h4, .video-title').text()
+ if not title:continue
+ link=it.find('a').attr('href')
+ if not link:continue
+ vids.append({'vod_id':self._abs(link),'vod_name':title,'vod_pic':self._img(it.find('img')),'vod_remarks':it.find('.date, .time, .remarks, .duration').text()or''})
+ if vids:break
+ return vids
+ def detailContent(self,array):
+ tid=array[0];url=tid if tid.startswith('http')else f'https://heiliao.com{tid}'
+ rsp=self.fetch(url)
+ if not rsp:return{'list':[]}
+ rsp.encoding='utf-8';html_text=rsp.text
+ try:root_text=pq(html_text)
+ except:root_text=None
+ try:root_content=pq(rsp.content)
+ except:root_content=None
+ title=(root_text('title').text()if root_text else'')or''
+ if' - 黑料网'in title:title=title.replace(' - 黑料网','')
+ pic=''
+ if root_text:
+ og=root_text('meta[property="og:image"]').attr('content')
+ if og and(og.endswith('.png')or og.endswith('.jpg')or og.endswith('.jpeg')):pic=og
+ else:pic=self._img(root_text('.video-item-img img'))
+ detail=''
+ if root_text:
+ detail=root_text('meta[name="description"]').attr('content')or''
+ if not detail:detail=root_text('.content').text()[:200]
+ play_from,play_url=[],[]
+ if root_content:
+ for i,p in enumerate(root_content('.dplayer').items()):
+ c=p.attr('config')
+ if not c:continue
+ try:s=(c.replace('"','"').replace('"','"').replace('&','&').replace('&','&').replace('<','<').replace('<','<').replace('>','>').replace('>','>'));u=(json.loads(s).get('video',{})or{}).get('url','')
+ except:m=re.search(r'"url"\s*:\s*"([^"]+)"',c);u=m.group(1)if m else''
+ if u:
+ u=u.replace('\\/','/');u=self._abs(u)
+ play_from.append(f'视频{i+1}');play_url.append(u)
+ if not play_url:
+ for pat in[r'https://hls\.[^"\']+\.m3u8[^"\']*',r'https://[^"\']+\.m3u8\?auth_key=[^"\']+',r'//hls\.[^"\']+\.m3u8[^"\']*']:
+ for u in re.findall(pat,html_text):
+ u=self._abs(u);play_from.append(f'视频{len(play_from)+1}');play_url.append(u)
+ if len(play_url)>=3:break
+ if play_url:break
+ if not play_url:
+ js_patterns=[r'video[\s\S]{0,500}?url[\s"\'`:=]+([^"\'`\s]+)',r'videoUrl[\s"\'`:=]+([^"\'`\s]+)',r'src[\s"\'`:=]+([^"\'`\s]+\.m3u8[^"\'`\s]*)']
+ for pattern in js_patterns:
+ js_urls=re.findall(pattern,html_text)
+ for js_url in js_urls:
+ if'.m3u8'in js_url:
+ if js_url.startswith('//'):js_url='https:'+js_url
+ elif js_url.startswith('/'):js_url='https://heiliao.com'+js_url
+ elif not js_url.startswith('http'):js_url='https://'+js_url
+ play_from.append(f'视频{len(play_from)+1}');play_url.append(js_url)
+ if len(play_url)>=3:break
+ if play_url:break
+ if not play_url:
+ play_from.append('示例视频');play_url.append("https://hls.obmoti.cn/videos5/b9699667fbbffcd464f8874395b91c81/b9699667fbbffcd464f8874395b91c81.m3u8?auth_key=1760372539-68ed273b94e7a-0-3a53bc0df110c5f149b7d374122ef1ed&v=2")
+ return{'list':[{'vod_id':tid,'vod_name':title,'vod_pic':pic,'vod_content':detail,'vod_play_from':'$$$'.join(play_from),'vod_play_url':'$$$'.join(play_url)}]}
+ def searchContent(self,key,quick,pg="1"):
+ rsp=self.fetch(f'https://heiliao.com/index/search?word={key}')
+ if not rsp:return{'list':[]}
+ return{'list':self._parse_items(pq(rsp.text))}
+ def playerContent(self,flag,id,vipFlags):
+ return{"parse":0,"playUrl":"","url":id,"header":{"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36","Referer":"https://heiliao.com/"}}
+ def get_list(self,url):
+ rsp=self.fetch(url)
+ return[]if not rsp else self._parse_items(pq(rsp.text))
+ def fetch(self,url,params=None,cookies=None,headers=None,timeout=5,verify=True,stream=False,allow_redirects=True):
+ h=headers or{"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36","Referer":"https://heiliao.com/"}
+ return super().fetch(url,params=params,cookies=cookies,headers=h,timeout=timeout,verify=verify,stream=stream,allow_redirects=allow_redirects)
+ def localProxy(self,param):
+ try:
+ if param.get('type')=='hlimg':
+ url=self.d64(param.get('url'))
+ if url.startswith('//'):url='https:'+url
+ elif url.startswith('/'):url='https://heiliao.com'+url
+ r=requests.get(url,headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36","Referer":"https://heiliao.com/"},timeout=15,verify=False)
+ if r.status_code!=200:return[404,'text/plain','']
+ b=AES.new(b'f5d965df75336270',AES.MODE_CBC,b'97b60394abc2fbe1').decrypt(r.content)
+ ct='image/jpeg'
+ if b.startswith(b'\x89PNG'):ct='image/png'
+ elif b.startswith(b'GIF8'):ct='image/gif'
+ return[200,ct,b]
+ except:pass
+ return[404,'text/plain','']
\ No newline at end of file
diff --git a/PyramidStore/plugin/adult/黑料弹幕版.py b/PyramidStore/plugin/adult/黑料弹幕版.py
new file mode 100644
index 0000000..3e8abde
--- /dev/null
+++ b/PyramidStore/plugin/adult/黑料弹幕版.py
@@ -0,0 +1,444 @@
+# -*- coding: utf-8 -*-
+import json, re, sys, base64, requests, threading, time, random, colorsys
+from Crypto.Cipher import AES
+from pyquery import PyQuery as pq
+from urllib.parse import quote, unquote
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+ SELECTORS = ['.video-item', '.video-list .item', '.list-item', '.post-item']
+
+ def init(self, extend='{}'):
+ """初始化配置(支持代理)"""
+ config = json.loads(extend)
+ self.proxies = config.get('proxy', {}) # 示例:{"http": "http://127.0.0.1:7890", "https": "http://127.0.0.1:7890"}
+ self.plp = config.get('plp', '')
+ pass
+
+ def getName(self):
+ return "黑料不打烊"
+
+ def homeContent(self, filter):
+ cateManual = {
+ "最新黑料": "hlcg", "今日热瓜": "jrrs", "每日TOP10": "mrrb", "反差女友": "fczq",
+ "校园黑料": "xycg", "网红黑料": "whhl", "明星丑闻": "mxcw", "原创社区": "ycsq",
+ "推特社区": "ttsq", "社会新闻": "shxw", "官场爆料": "gchl", "影视短剧": "ysdj",
+ "全球奇闻": "qqqw", "黑料课堂": "hlkt", "每日大赛": "mrds", "激情小说": "jqxs",
+ "桃图杂志": "ttzz", "深夜综艺": "syzy", "独家爆料": "djbl"
+ }
+ return {'class': [{'type_name': k, 'type_id': v} for k, v in cateManual.items()]}
+
+ def homeVideoContent(self):
+ return {}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ url = f'https://heiliao.com/{tid}/' if int(pg) == 1 else f'https://heiliao.com/{tid}/page/{pg}/'
+ videos = self.get_list(url)
+ return {'list': videos, 'page': pg, 'pagecount': 9999, 'limit': 90, 'total': 999999}
+
+ def fetch_and_decrypt_image(self, url):
+ try:
+ if url.startswith('//'):
+ url = 'https:' + url
+ elif url.startswith('/'):
+ url = 'https://heiliao.com' + url
+ r = requests.get(
+ url,
+ headers={
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
+ 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36',
+ 'Referer': 'https://heiliao.com/'
+ },
+ timeout=15,
+ verify=False,
+ proxies=self.proxies # ✅ 使用代理
+ )
+ if r.status_code != 200:
+ return b''
+ return AES.new(b'f5d965df75336270', AES.MODE_CBC, b'97b60394abc2fbe1').decrypt(r.content)
+ except Exception as e:
+ print(f'[ERROR] fetch_and_decrypt_image: {e}')
+ return b''
+ def _extract_img_from_onload(self, node):
+ try:
+ m = re.search(r"load(?:Share)?Img\s*\([^,]+,\s*['\"]([^'\"]+)['\"]", (node.attr('onload') or ''))
+ return m.group(1) if m else ''
+ except:
+ return ''
+
+ def _should_decrypt(self, url: str) -> bool:
+ u = (url or '').lower()
+ return any(x in u for x in ['pic.gylhaa.cn', 'new.slfpld.cn', '/upload_01/', '/upload/'])
+
+ def _abs(self, u: str) -> str:
+ if not u:
+ return ''
+ if u.startswith('//'):
+ return 'https:' + u
+ if u.startswith('/'):
+ return 'https://heiliao.com' + u
+ return u
+
+ def e64(self, s: str) -> str:
+ try:
+ return base64.b64encode((s or '').encode()).decode()
+ except:
+ return ''
+
+ def d64(self, s: str) -> str:
+ try:
+ return base64.b64decode((s or '').encode()).decode()
+ except:
+ return ''
+
+ def _img(self, img_node):
+ u = '' if img_node is None else (img_node.attr('src') or img_node.attr('data-src') or '')
+ enc = '' if img_node is None else self._extract_img_from_onload(img_node)
+ t = enc or u
+ return f"{self.getProxyUrl()}&url={self.e64(t)}&type=hlimg" if t and (enc or self._should_decrypt(t)) else self._abs(t)
+
+ def _parse_items(self, root):
+ vids = []
+ for sel in self.SELECTORS:
+ for it in root(sel).items():
+ title = it.find('.title, h3, h4, .video-title').text()
+ if not title:
+ continue
+ link = it.find('a').attr('href')
+ if not link:
+ continue
+ vids.append({
+ 'vod_id': self._abs(link),
+ 'vod_name': title,
+ 'vod_pic': self._img(it.find('img')),
+ 'vod_remarks': it.find('.date, .time, .remarks, .duration').text() or ''
+ })
+ if vids:
+ break
+ return vids
+
+ def detailContent(self, array):
+ tid = array[0]
+ url = tid if tid.startswith('http') else f'https://heiliao.com{tid}'
+ rsp = self.fetch(url)
+ if not rsp:
+ return {'list': []}
+ rsp.encoding = 'utf-8'
+ html_text = rsp.text
+ try:
+ root_text = pq(html_text)
+ except:
+ root_text = None
+ try:
+ root_content = pq(rsp.content)
+ except:
+ root_content = None
+ title = (root_text('title').text() if root_text else '') or ''
+ if ' - 黑料网' in title:
+ title = title.replace(' - 黑料网', '')
+ pic = ''
+ if root_text:
+ og = root_text('meta[property="og:image"]').attr('content')
+ if og and (og.endswith('.png') or og.endswith('.jpg') or og.endswith('.jpeg')):
+ pic = og
+ else:
+ pic = self._img(root_text('.video-item-img img'))
+ detail = ''
+ if root_text:
+ detail = root_text('meta[name="description"]').attr('content') or ''
+ if not detail:
+ detail = root_text('.content').text()[:200]
+
+ play_from, play_url = [], []
+ if root_content:
+ for i, p in enumerate(root_content('.dplayer').items()):
+ c = p.attr('config')
+ if not c:
+ continue
+ try:
+ s = (c.replace('"', '"')
+ .replace('"', '"')
+ .replace('&', '&')
+ .replace('&', '&')
+ .replace('<', '<')
+ .replace('<', '<')
+ .replace('>', '>')
+ .replace('>', '>'))
+ u = (json.loads(s).get('video', {}) or {}).get('url', '')
+ except:
+ m = re.search(r'"url"\s*:\s*"([^"]+)"', c)
+ u = m.group(1) if m else ''
+ if u:
+ u = u.replace('\\/', '/')
+ u = self._abs(u)
+ article_id = self._extract_article_id(tid)
+ if article_id:
+ play_from.append(f'视频{i + 1}')
+ play_url.append(f"{article_id}_dm_{u}")
+ else:
+ play_from.append(f'视频{i + 1}')
+ play_url.append(u)
+ if not play_url:
+ for pat in [
+ r'https://hls\.[^"\']+\.m3u8[^"\']*',
+ r'https://[^"\']+\.m3u8\?auth_key=[^"\']+',
+ r'//hls\.[^"\']+\.m3u8[^"\']*'
+ ]:
+ for u in re.findall(pat, html_text):
+ u = self._abs(u)
+ article_id = self._extract_article_id(tid)
+ if article_id:
+ play_from.append(f'视频{len(play_from) + 1}')
+ play_url.append(f"{article_id}_dm_{u}")
+ else:
+ play_from.append(f'视频{len(play_from) + 1}')
+ play_url.append(u)
+ if len(play_url) >= 3:
+ break
+ if play_url:
+ break
+
+ if not play_url:
+ js_patterns = [
+ r'video[\s\S]{0,500}?url[\s"\'`:=]+([^"\'`\s]+)',
+ r'videoUrl[\s"\'`:=]+([^"\'`\s]+)',
+ r'src[\s"\'`:=]+([^"\'`\s]+\.m3u8[^"\'`\s]*)'
+ ]
+ for pattern in js_patterns:
+ js_urls = re.findall(pattern, html_text)
+ for js_url in js_urls:
+ if '.m3u8' in js_url:
+ if js_url.startswith('//'):
+ js_url = 'https:' + js_url
+ elif js_url.startswith('/'):
+ js_url = 'https://heiliao.com' + js_url
+ elif not js_url.startswith('http'):
+ js_url = 'https://' + js_url
+ article_id = self._extract_article_id(tid)
+ if article_id:
+ play_from.append(f'视频{len(play_from) + 1}')
+ play_url.append(f"{article_id}_dm_{js_url}")
+ else:
+ play_from.append(f'视频{len(play_from) + 1}')
+ play_url.append(js_url)
+ if len(play_url) >= 3:
+ break
+ if play_url:
+ break
+
+ if not play_url:
+ article_id = self._extract_article_id(tid)
+ example_url = "https://hls.obmoti.cn/videos5/b9699667fbbffcd464f8874395b91c81/b9699667fbbffcd464f8874395b91c81.m3u8"
+ if article_id:
+ play_from.append('示例视频')
+ play_url.append(f"{article_id}_dm_{example_url}")
+ else:
+ play_from.append('示例视频')
+ play_url.append(example_url)
+
+ return {
+ 'list': [{
+ 'vod_id': tid,
+ 'vod_name': title,
+ 'vod_pic': pic,
+ 'vod_content': detail,
+ 'vod_play_from': '$$$'.join(play_from),
+ 'vod_play_url': '$$$'.join(play_url)
+ }]
+ }
+
+ def searchContent(self, key, quick, pg="1"):
+ rsp = self.fetch(f'https://heiliao.com/index/search?word={key}')
+ if not rsp:
+ return {'list': []}
+ return {'list': self._parse_items(pq(rsp.text))}
+
+ def playerContent(self, flag, id, vipFlags):
+ if '_dm_' in id:
+ aid, pid = id.split('_dm_', 1)
+ p = 0 if re.search(r'\.(m3u8|mp4|flv|ts|mkv|mov|avi|webm)', pid) else 1
+ if not p:
+ pid = f"{self.getProxyUrl()}&pdid={quote(id)}&type=m3u8"
+ return {
+ 'parse': p,
+ 'url': pid,
+ 'header': {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
+ "(KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36",
+ "Referer": "https://heiliao.com/"
+ }
+ }
+ else:
+ return {
+ "parse": 0,
+ "playUrl": "",
+ "url": id,
+ "header": {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
+ "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36",
+ "Referer": "https://heiliao.com/"
+ }
+ }
+
+ def get_list(self, url):
+ rsp = self.fetch(url)
+ return [] if not rsp else self._parse_items(pq(rsp.text))
+
+ def fetch(self, url, params=None, cookies=None, headers=None, timeout=5, verify=True,
+ stream=False, allow_redirects=True):
+ h = headers or {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
+ "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.7049.96 Safari/537.36",
+ "Referer": "https://heiliao.com/"
+ }
+ try:
+ return requests.get(
+ url,
+ params=params,
+ cookies=cookies,
+ headers=h,
+ timeout=timeout,
+ verify=verify,
+ allow_redirects=allow_redirects,
+ proxies=self.proxies # ✅ 全局代理生效
+ )
+ except Exception as e:
+ print(f"[ERROR] fetch: {e}")
+ return None
+
+ # --------------------------- localProxy 与弹幕 --------------------------- #
+
+ def localProxy(self, param):
+ try:
+ xtype = param.get('type', '')
+ if xtype == 'hlimg':
+ url = self.d64(param.get('url'))
+ if url.startswith('//'):
+ url = 'https:' + url
+ elif url.startswith('/'):
+ url = 'https://heiliao.com' + url
+ r = requests.get(
+ url,
+ headers={"User-Agent": "Mozilla/5.0", "Referer": "https://heiliao.com/"},
+ timeout=15,
+ verify=False,
+ proxies=self.proxies
+ )
+ if r.status_code != 200:
+ return [404, 'text/plain', '']
+ b = AES.new(b'f5d965df75336270', AES.MODE_CBC, b'97b60394abc2fbe1').decrypt(r.content)
+ ct = 'image/jpeg'
+ if b.startswith(b'\x89PNG'):
+ ct = 'image/png'
+ elif b.startswith(b'GIF8'):
+ ct = 'image/gif'
+ return [200, ct, b]
+
+ elif xtype == 'm3u8':
+ path, url = unquote(param['pdid']).split('_dm_', 1)
+ data = requests.get(
+ url,
+ headers={"User-Agent": "Mozilla/5.0", "Referer": "https://heiliao.com/"},
+ timeout=10,
+ proxies=self.proxies
+ ).text
+ lines = data.strip().split('\n')
+ times = 0.0
+ for i in lines:
+ if i.startswith('#EXTINF:'):
+ times += float(i.split(':')[-1].replace(',', ''))
+ thread = threading.Thread(target=self.some_background_task, args=(path, int(times)))
+ thread.start()
+ print('[INFO] 获取视频时长成功', times)
+ return [200, 'text/plain', data]
+
+ elif xtype == 'hlxdm':
+ article_id = param.get('path', '')
+ times = int(param.get('times', 0))
+ comments = self._fetch_heiliao_comments(article_id)
+ return self._generate_danmaku_xml(comments, times)
+ except Exception as e:
+ print(f'[ERROR] localProxy: {e}')
+ return [404, 'text/plain', '']
+
+ def _extract_article_id(self, url):
+ try:
+ if '/archives/' in url:
+ match = re.search(r'/archives/(\d+)/?', url)
+ return match.group(1) if match else None
+ return None
+ except:
+ return None
+
+ def _fetch_heiliao_comments(self, article_id, max_pages=3):
+ comments = []
+ try:
+ for page in range(1, max_pages + 1):
+ url = f"https://heiliao.com/comments/1/{article_id}/{page}.json"
+ resp = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=10, proxies=self.proxies)
+ if resp.status_code == 200:
+ data = resp.json()
+ if 'data' in data and 'list' in data['data'] and data['data']['list']:
+ for comment in data['data']['list']:
+ text = comment.get('content', '').strip()
+ if text and len(text) <= 100:
+ comments.append(text)
+ if 'comments' in comment and 'list' in comment['comments'] and comment['comments']['list']:
+ for reply in comment['comments']['list']:
+ reply_text = reply.get('content', '').strip()
+ if reply_text and len(reply_text) <= 100:
+ comments.append(reply_text)
+ if not data['data'].get('next', False):
+ break
+ else:
+ break
+ else:
+ break
+ except Exception as e:
+ print(f'[ERROR] _fetch_heiliao_comments: {e}')
+ return comments[:50]
+
+ def _generate_danmaku_xml(self, comments, video_duration):
+ try:
+ total_comments = len(comments)
+ tsrt = f'共有{total_comments}条弹幕来袭!!!'
+ danmu_xml = f'\n\n'
+ danmu_xml += '\tchat.heiliao.com\n\t88888888\n'
+ danmu_xml += '\t0\n\t99999\n\t0\n'
+ danmu_xml += '\t0\n\theiliao\n'
+ danmu_xml += f'\t{tsrt}\n'
+ for i, comment in enumerate(comments):
+ base_time = (i / total_comments) * video_duration if total_comments > 0 else 0
+ dm_time = round(max(0, min(base_time + random.uniform(-3, 3), video_duration)), 1)
+ dm_color = self._get_danmaku_color()
+ dm_text = re.sub(r'[<>&\u0000\b]', '', comment)
+ danmu_xml += f'\t{dm_text}\n'
+ danmu_xml += ''
+ return [200, "text/xml", danmu_xml]
+ except Exception as e:
+ print(f'[ERROR] _generate_danmaku_xml: {e}')
+ return [500, 'text/html', '']
+
+ def _get_danmaku_color(self):
+ if random.random() < 0.1:
+ h = random.random()
+ s = random.uniform(0.7, 1.0)
+ v = random.uniform(0.8, 1.0)
+ r, g, b = colorsys.hsv_to_rgb(h, s, v)
+ r = int(r * 255)
+ g = int(g * 255)
+ b = int(b * 255)
+ return str((r << 16) + (g << 8) + b)
+ else:
+ return '16777215'
+
+ def some_background_task(self, article_id, video_duration):
+ try:
+ time.sleep(1)
+ danmaku_url = f"{self.getProxyUrl()}&path={quote(article_id)}×={video_duration}&type=hlxdm"
+ self.fetch(f"http://127.0.0.1:9978/action?do=refresh&type=danmaku&path={quote(danmaku_url)}")
+ print(f'[INFO] 弹幕刷新成功: {article_id}')
+ except Exception as e:
+ print(f'[ERROR] some_background_task: {e}')
diff --git a/PyramidStore/plugin/app/APPV2.py b/PyramidStore/plugin/app/APPV2.py
new file mode 100644
index 0000000..c3fe418
--- /dev/null
+++ b/PyramidStore/plugin/app/APPV2.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ '''
+ example:
+ {
+ "key": "py_appV2",
+ "name": "xxx",
+ "type": 3,
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "api": "./py/APPV2.py",
+ "ext": "http://cmsyt.lyyytv.cn"
+ }
+
+ '''
+ self.host=extend
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'User-Agent': 'okhttp/4.12.0',
+ }
+
+ def homeContent(self, filter):
+ data = self.fetch(f"{self.host}//api.php/app/nav?token=",headers=self.headers).json()
+ keys = ["class", "area", "lang", "year", "letter", "by", "sort"]
+ filters = {}
+ classes = []
+ for item in data['list']:
+ has_non_empty_field = False
+ jsontype_extend = item["type_extend"]
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in keys:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in keys and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dkey, "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f"{self.host}/api.php/app/index_video?token=",headers=self.headers).json()
+ videos=[]
+ for item in data['list']:videos.extend(item['vlist'])
+ return {'list':videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {'tid':tid,'class':extend.get('class',''),'area':extend.get('area',''),'lang':extend.get('lang',''),'year':extend.get('year',''),'limit':'18','pg':pg}
+ data=self.fetch(f"{self.host}/api.php/app/video",params=params,headers=self.headers).json()
+ return data
+
+ def detailContent(self, ids):
+ data=self.fetch(f"{self.host}/api.php/app/video_detail?id={ids[0]}",headers=self.headers).json()
+ return {'list':[data['data']]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f"{self.host}/api.php/app/search?text={key}&pg={pg}",headers=self.headers).json()
+ videos=data['list']
+ for item in data['list']:
+ item.pop('type', None)
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'jx':1,'playUrl':'','parse': 1, 'url': id, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+
diff --git a/PyramidStore/plugin/app/CliCli动漫APP.py b/PyramidStore/plugin/app/CliCli动漫APP.py
new file mode 100644
index 0000000..e373780
--- /dev/null
+++ b/PyramidStore/plugin/app/CliCli动漫APP.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import base64
+import json
+import sys
+import time
+from base64 import b64decode, b64encode
+from Crypto.Cipher import AES, PKCS1_v1_5
+from Crypto.Hash import MD5
+from Crypto.PublicKey import RSA
+from Crypto.Util.Padding import unpad, pad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did=self.getdid()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://60.204.242.79:8091'
+
+ def homeContent(self, filter):
+ res = self.fetch(f'{self.host}/app/channel?top-level=true', headers=self.getheaders()).text
+ data = self.getdata(res)
+ result = {}
+ classes = []
+ filters = {}
+ sortsn = ['最新','最热','高分']
+ for k in data['data']:
+ classes.append({
+ 'type_name': k['name'],
+ 'type_id': k['id']
+ })
+ filters[k['id']] = []
+ k['sorts']=['addtime','hits','gold']
+ for key,value in k.items():
+ if type(value) == list:
+ filters[k['id']].append({
+ 'name': key,
+ 'key': key,
+ 'value': [{'v': x,'n': x if key !='sorts' else sortsn[i]} for i,x in enumerate(value) if x]
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ res=self.fetch(f'{self.host}/app/banners/0',headers=self.getheaders()).text
+ data=self.getdata(res)
+ videos=[]
+ for i in data['data']:
+ videos.append({
+ 'vod_id': i['vid'],
+ 'vod_name': i['vname'],
+ 'vod_pic': i['img'],
+ 'vod_remarks': i['continu']
+ })
+ return {'list':videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params={'channel':tid,'type':extend.get('types',''),'area':extend.get('areas',''),'year':extend.get('years',''),'sort':extend.get('sorts','addtime'),'limit':'30','page':pg}
+ data=self.fetch(f'{self.host}/app/video/list',params=params,headers=self.getheaders()).text
+ data=self.getdata(data)
+ videos=[]
+ for i in data['data']['items']:
+ videos.append({
+ 'vod_id': i.get('id'),
+ 'vod_name': i.get('name'),
+ 'vod_pic': i.get('pic'),
+ 'vod_year': i.get('year'),
+ 'vod_remarks': i.get('continu')
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f'{self.host}/app/video/detail?id={ids[0]}',headers=self.getheaders()).text
+ data=self.getdata(data)
+ v=data['data']
+ vod = {
+ 'type_name': v.get('type'),
+ 'vod_year': v.get('year'),
+ 'vod_area': v.get('area'),
+ 'vod_remarks': v.get('continu'),
+ 'vod_actor': v.get('actor'),
+ 'vod_director': v.get('director'),
+ 'vod_content': v.get('content'),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ parts,names = [],[]
+ for i in v['parts']:
+ names.append(i['play_zh'])
+ p=[]
+ for j,x in enumerate(i['part']):
+ params={'id':ids[0],'play':i['play'],'part':x}
+ p.append(f'{x}${self.e64(json.dumps(params))}')
+ parts.append('#'.join(p))
+ vod['vod_play_from'] = '$$$'.join(names)
+ vod['vod_play_url'] = '$$$'.join(parts)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params={'key':key,'limit':'25','page':pg}
+ data=self.fetch(f'{self.host}/app/video/search',params=params,headers=self.getheaders()).text
+ data=self.getdata(data)
+ videos = []
+ for i in data['data']['items']:
+ videos.append({
+ 'vod_id': i.get('id'),
+ 'vod_name': i.get('name'),
+ 'vod_pic': i.get('pic'),
+ 'vod_year': i.get('year'),
+ 'vod_remarks': i.get('continu')
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ params= json.loads(self.d64(id))
+ data=self.fetch(f'{self.host}/app/video/play',params=params,headers=self.getheaders()).text
+ data=self.getdata(data)
+ urls=[]
+ for i in data['data']:
+ if i.get('url'):urls.extend([i['resolution'],i['url']])
+ return {'parse': 0, 'url': urls, 'header': {'User-Agent': 'Dart/3.6 (dart:io)'}}
+
+ def liveContent(self, url):
+ pass
+
+ def localProxy(self, param):
+ pass
+
+ def getheaders(self):
+ t=str(int(time.time() * 1000))
+ stinf=f"3.0.0.2-{t}-Android-1.0.4.5-{self.did}"
+ authentication=self.aes_encrypt(self.e64(stinf))
+ headers = {
+ 'User-Agent': 'Dart/3.6 (dart:io)',
+ 'x-version': '2020-09-17',
+ 'appid': '4150439554430614',
+ 'ts': t,
+ 'authentication': authentication,
+ 'content-type': 'application/json; charset=utf-8',
+ }
+ return headers
+
+ def aes_encrypt(self, text):
+ key = b'ziISjqkXPsGUMRNGyWigxDGtJbfTdcGv'
+ iv = b'WonrnVkxeIxDcFbv'
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+
+ def aes_decrypt(self, key,text):
+ iv=key[::-1].encode("utf-8")
+ key=key.encode("utf-8")
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return json.loads(pt.decode("utf-8"))
+
+ def rsa_decrypt(self, encrypted_data):
+ try:
+ private_key_string = '''-----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA5xpfniKIMYdjTytUBu5rsLbMtcCRW9B9DB78QEdf4wW5jO8r
+ Mw7j+/mYk3ghi0xrxpjtHm1R2KgNT1b0akJCExTH7gBVcjVywpmXdNXbcuCGfVCK
+ S6vYfMypmj5lNBgalCHe5AVc0ghhP3FG5j8Q5B7q00+tk4nT9nFsTmTeNcAKSH9h
+ aM6a0fbiJ3eXbxEr2o8raAjck10act35t/MIUOkcrQjHx5E9Yvqgs3qbq4yDakaG
+ 4qfMAV4DAkkmdZ8N3fdEQ+rFJ67Spd4zzowj81+YO9wMUP2hNgfXmLOGLS5Lyi+x
+ vrwwWZXAIRUkhdQEAYQlhGs8wV9P4bJnTzplewIDAQABAoIBAEnRzNUwZpybiIdT
+ acXFBrUtzvoHhubzE955T04g/mn//CMeiogGq6BjO+9vIhfi01Jequ9bMBeqpoW/
+ WtdOTtjVfH9zr9eJZxzt/skdPrnVKmCBB4vgWoiSv2I7qAwZ3vOOVioz5FBayOWB
+ A4qsfnK/xXa2LtW/4usHk/b+lVRJZhHl3eKio2CnVBrgRb2DTx1GAwpvaRXp0oHm
+ LXDEtngxN4/rh2irPKgaG/lgrCBISKUHtwtgytcpltsHMASMXIKAjZjNgCA98fA3
+ te96U58wGHzQBQ5XtwTf0PiFEfJ7yOhgNRgCtiwsjGOhJFJFiiXYKzTef1GnVxPa
+ wuPc0TECgYEA+KCts3ArkWLqWbi4bVDpekP71geEnQIklSAk3RRZ0eiC1pmmkuTh
+ +q/4jOfoQHGuYCc8GvJqxQ8Y+aspPptbsAeRMSVovjQUvpRMqD0SWT8o3W2xGfqd
+ 0W4p14CIF7oXjMqQVeY468AYzxUdNsaulrp9Wnpa5njzE5D5WGDu0IcCgYEA7fSq
+ kvz1oXjlljlskBwJ8gDB8j53PhuqV6Ori71G/qIGpYuOVjHSfPD/04a9T3M9olpk
+ vlLOLn7GS7xa4pjugmp0EDdxBIJJtTHbbi4NL4ZoYg+vHkiemkjGLis4x5qRKjg6
+ jNUEhnpksm68IUMSyO2toasfR0nVUmkb+ylKhG0CgYEAqNDZAJSyUHZcb21YdIlS
+ 7rzIe2wBZGZ3FnaL8T0HO9rnM/WCQA1/Tys61doFPfSylQEu85EUZBc7OxM33xW3
+ 7M9Gi5s+Ap/0Ue76GeXV1plnEuqPLPeZPwHREU1pmsq1gNhtppW6ooB9l+ZbPr0r
+ AJdB1DRuEj2ftvJiC9tNbHMCgYEAvHaliply6hrYq6x7gX/TmKpk8bnrs3Mx7Qui
+ WKDm09H8Na1cZIQ9U9uEo0H6OizpyeaSF/N5fXXHFEDwMrwxW3V4y0c96fZO7oW4
+ Z4FtzBBGKDSH3BJkG4o7/GEbLWwMQUYbiWNFnETf8DqoIif/fshQVtUzhsDBhe3d
+ zYUckdkCgYAJlTYhJz0qXcO8a5KsQ20/hEGRtOcq+mfPOdGYBOv6LB2ThuDKunbY
+ WsmAvqSo1qoJONnhQVMSpzKWEjCYV6hcifV9aeFofD4kNmG1gWC18QIYfrihLyOU
+ E4GDW7QN8HO2YiQpopGP/muKsIlCmxKP6DasgCCO36xs87Wi8gu1DA==
+ -----END RSA PRIVATE KEY-----'''
+ private_key = RSA.import_key(private_key_string)
+ cipher = PKCS1_v1_5.new(private_key)
+ encrypted_bytes = base64.b64decode(encrypted_data)
+ decrypted_bytes = cipher.decrypt(encrypted_bytes, None)
+ return decrypted_bytes.decode('utf-8')
+ except:
+ return ""
+
+ def getdata(self, data):
+ ds=data.split('.')
+ key=self.rsa_decrypt(ds[0])
+ result=self.aes_decrypt(key,ds[1])
+ return result
+
+ def getdid(self):
+ did=self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
diff --git a/PyramidStore/plugin/app/MiFunAPP.py b/PyramidStore/plugin/app/MiFunAPP.py
new file mode 100644
index 0000000..be827d8
--- /dev/null
+++ b/PyramidStore/plugin/app/MiFunAPP.py
@@ -0,0 +1,249 @@
+import re
+import sys
+import threading
+import requests
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ self.did=self.getdid()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ response = self.fetch('https://miget-1313189639.cos.ap-guangzhou.myqcloud.com/mifun.txt',headers=headers).text
+ return self.host_late(response.split('\n'))
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ url = url.strip()
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def getdid(self):
+ did=self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def aes(self, text, b=None):
+ key = b"GETMIFUNGEIMIFUN"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer": self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "516", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.did,
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param, header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
diff --git a/PyramidStore/plugin/app/hitvAPP.py b/PyramidStore/plugin/app/hitvAPP.py
new file mode 100644
index 0000000..ae501ac
--- /dev/null
+++ b/PyramidStore/plugin/app/hitvAPP.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+import requests
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ return "hitv"
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ # "直播": "live",
+ '排行榜': 'rank',
+ "电影": "1",
+ "剧集": "2",
+ "综艺": "3",
+ "动画": "4",
+ "短片": "5"
+ }
+ classes = []
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ result['class'] = classes
+ return result
+
+ host = "https://wys.upfuhn.com"
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/80.0.3987.149 Safari/537.36"
+ }
+
+ def list(self, list):
+ videos = []
+ for it in list:
+ videos.append({
+ "vod_id": it['video_site_id'],
+ "vod_name": it['video_name'],
+ "vod_pic": it['video_horizontal_url'] or it['video_vertical_url'],
+ "vod_remarks": it['newest_series_num'],
+ "vod_year": it['years'],
+ })
+ return videos
+
+ def homeVideoContent(self):
+ url = f'{self.host}/v1/ys_video_sites/hot?t=1'
+ data = requests.get(url, headers=self.headers).json()
+ videos = self.list(data['data']['data'])
+ result = {'list': videos}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ path = f'/v1/ys_video_sites?t={tid}&s_t=0&a&y&o=0&ps=21&pn={pg}'
+ rank = False
+ if tid == 'rank':
+ if pg == 1:
+ path = f'/v1/ys_video_sites/ranking'
+ rank = True
+ else:
+ path = ''
+ # elif tid == 'live' and pg == 1:
+ # path = f'/v1/ys_live_tvs'
+ videos = []
+ result = {}
+ try:
+ data = requests.get(self.host + path, headers=self.headers).json()
+ if rank:
+ for video in data['data']:
+ videos.extend(data['data'][video])
+ else:
+ videos = data['data']['data']
+ result = {}
+ result['list'] = self.list(videos)
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ except:
+ result['list'] = []
+ return result
+
+ def detailContent(self, ids):
+ tid = ids[0]
+ url = f'{self.host}/v1/ys_video_series/by_vid/{tid}'
+ data = requests.get(url, headers=self.headers).json()
+ data1 = data['data']['ys_video_site']
+ urls = []
+ for it in data['data']['data']:
+ urls.append(it['series_num'] + '$' + it['video_url'])
+ vod = {
+ 'vod_name': data1['video_name'],
+ 'type_name': data1['tag'],
+ 'vod_year': data1['years'],
+ 'vod_area': data1['area'],
+ 'vod_director': data1['main_actor'],
+ 'vod_content': data1['video_desc'],
+ 'vod_play_from': '嗷呜在线',
+ 'vod_play_url': '#'.join(urls),
+ }
+ result = {
+ 'list': [
+ vod
+ ]
+ }
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ url = f'{self.host}/v1/ys_video_sites/search?s={key}&o=0&ps=200&pn={pg}'
+ data = requests.get(url, headers=self.headers).json()
+ videos = data['data']['video_sites']
+ if data['data']['first_video_series'] is not None:
+ videos = [data['data']['first_video_series']] + videos
+ result = {}
+ result['list'] = self.list(videos)
+ result['page'] = pg
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ result = {
+ 'url': id,
+ 'parse': 0,
+ 'header': self.headers
+ }
+ return result
+
+ def localProxy(self, param):
+ pass
diff --git a/PyramidStore/plugin/app/三号动漫APP.py b/PyramidStore/plugin/app/三号动漫APP.py
new file mode 100644
index 0000000..ded29cb
--- /dev/null
+++ b/PyramidStore/plugin/app/三号动漫APP.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+from base64 import b64decode, b64encode
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import unpad, pad
+from concurrent.futures import ThreadPoolExecutor
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ self.hkey,self.playerinfos=self.getinfo()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'User-Agent': 'Dalvik/1.4.0 (Linux; U; Android 11; Redmi Build/M2012K10C)',
+ 'version': '1.4.0'
+ }
+
+ keys=['rectangleadsadxa','aassddwwxxllsx1x']
+
+ def homeContent(self, filter):
+ cdata=self.getdata('/api.php/v1.home/types')
+ result = {}
+ classes = []
+ filters = {}
+ for i in cdata['data']['types'][1:]:
+ classes.append({
+ 'type_id': i['type_id'],
+ 'type_name': i['type_name']
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ futures = [executor.submit(self.getf, i['type_id'])
+ for i in classes]
+ for future in futures:
+ try:
+ type_id, filter_data = future.result()
+ if len(filter_data):filters[type_id] = filter_data
+ except Exception as e:
+ print(f'处理筛选数据失败: {e}')
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.getdata('/api.php/v1.home/data?type_id=26')
+ return {'list':data['data']['banners']}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ json_data = {
+ 'area': extend.get('area', '全部地区'),
+ 'lang': extend.get('lang', '全部语言'),
+ 'rank': extend.get('rank', '最新'),
+ 'type': extend.get('type', '全部类型'),
+ 'type_id': int(tid),
+ 'year': extend.get('year', '全部年代'),
+ }
+ data=self.getdata(f'/api.php/v1.classify/content?page={pg}',method=False,json_data=json_data)
+ result = {}
+ result['list'] = data['data']['video_list']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.getdata(f'/api.php/v1.player/details?vod_id={ids[0]}')
+ vod = data['data']['detail']
+ plist,names = [],[]
+ for i in vod['play_url_list']:
+ names.append(i['show'])
+ plist.append('#'.join([f"{j['name']}${i['from']}@@{j['url']}" for j in i['urls']]))
+ vod.pop('play_url_list', None)
+ vod.update({'vod_play_from': '$$$'.join(names), 'vod_play_url': '$$$'.join(plist)})
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getdata(f'/api.php/v1.search/data?wd={key}&type_id=0&page={pg}')
+ return {'list': data['data']['search_data'], 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids=id.split('@@')
+ try:
+ body={'parse':self.getparse(ids[0]),'url':ids[-1],'matching':''}
+ data=self.getdata(f'/shark/api.php?action=parsevod',method=False,data=body)
+ url=data.get('url') or data['data'].get('url')
+ if not url:
+ raise ValueError("解析失败")
+ p=0
+ except:
+ p,url = 1,ids[-1]
+ return {'parse': p, 'url': url, 'header': {'User-Agent':'aliplayer(appv=1.4.0&av=6.16.0&av2=6.16.0_40316683&os=android&ov=11&dm=M2012K10C)'}}
+
+ def localProxy(self, param):
+ pass
+
+ def getparse(self,id):
+ for i in self.playerinfos:
+ if i['playername']==id:
+ j= i['playerjiekou']
+ return self.aes(j,self.hkey)
+ return ''
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/4.11.0',
+ 'Connection': 'Keep-Alive'
+ }
+ response = self.fetch('https://shopv1.oss-accelerate.aliyuncs.com/api.txt', headers=headers).text
+ host=json.loads(self.aes(response.strip(),self.keys[0]))[0]
+ return host
+
+ def getinfo(self):
+ data=self.post(f'{self.host}/shark/api.php?action=configs',headers=self.headers,data={'username':'','token':''}).text
+ datas=json.loads(self.aes(data))
+ hkey = datas['config']['hulue'].split('&')[0]
+ playerinfos = datas['playerinfos']
+ return hkey,playerinfos
+
+ def getdata(self,parh,method=True,data=None,json_data=None):
+ url = f'{self.host}{parh}'
+ if method:
+ response = self.fetch(url, headers=self.headers).text
+ else:
+ response = self.post(url, headers=self.headers, data=data, json=json_data).text
+ return json.loads(self.aes(response))
+
+ def getf(self, type_id):
+ try:
+ fdata = self.getdata(f'/api.php/v1.classify/types?type_id={type_id}')
+ filter_list = []
+ for key, value in fdata['data'].items():
+ if len(value):
+ filter_list.append({
+ 'key': key.split('_')[0],
+ 'name': key.split('_')[0],
+ 'value': [{'n': j['type_name'], 'v': j['type_name']} for j in value if j.get('type_name')]
+ })
+ return type_id, filter_list
+ except Exception as e:
+ print(f'获取type_id={type_id}的筛选数据失败: {e}')
+ return type_id, []
+
+ def aes(self, word,key=None, b=True):
+ if not key:key=self.keys[1]
+ cipher = AES.new(key.encode(), AES.MODE_ECB)
+ word = word.encode('utf-8-sig').decode('ascii', errors='ignore')
+ if b:
+ word = b64decode(word)
+ decrypted = cipher.decrypt(word)
+ word = unpad(decrypted, AES.block_size).decode()
+ else:
+ padded = pad(word.encode(), AES.block_size)
+ encrypted = cipher.encrypt(padded)
+ word = b64encode(encrypted).decode()
+ return word
+
diff --git a/PyramidStore/plugin/app/云端APP.py b/PyramidStore/plugin/app/云端APP.py
new file mode 100644
index 0000000..596a860
--- /dev/null
+++ b/PyramidStore/plugin/app/云端APP.py
@@ -0,0 +1,245 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+import threading
+import requests
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ response = self.fetch('https://ydysdynamicdomainname.68.gy:10678/c9m2js298x82h6/l9m8bx23j2o2p9q/dynamicdomainname.txt',
+ headers=headers).text
+ return self.host_late(response.split('\n'))
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url,timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def aes(self, text, b=None):
+ key = b"k9o3p2c8b7m3z0o8"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer": self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "140", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param, header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
diff --git a/PyramidStore/plugin/app/云速影视APP.py b/PyramidStore/plugin/app/云速影视APP.py
new file mode 100644
index 0000000..a87909f
--- /dev/null
+++ b/PyramidStore/plugin/app/云速影视APP.py
@@ -0,0 +1,219 @@
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ self.did=self.getdid()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ response = self.fetch('https://jingyu-1312635929.cos.ap-nanjing.myqcloud.com/1.json',headers=headers).text
+ return response.strip()
+
+ def getdid(self):
+ did=self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def aes(self, text, b=None):
+ key = b"4d83b87c4c5ea111"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer": self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "300", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.did,
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param, header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
diff --git a/PyramidStore/plugin/app/光速APP.py b/PyramidStore/plugin/app/光速APP.py
new file mode 100644
index 0000000..33b1204
--- /dev/null
+++ b/PyramidStore/plugin/app/光速APP.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append('..')
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent']=itt["player_info"].get("user_agent")
+ it["parse"]=itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ host = self.fetch('https://jingyu-1312635929.cos.ap-nanjing.myqcloud.com/1.json',
+ headers=headers).text.strip()
+ return host
+
+ phend = {
+ 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)',
+ 'allowCrossProtocolRedirects': 'true'
+ }
+
+ def aes(self, operation, text):
+ key = "4d83b87c4c5ea111".encode("utf-8")
+ iv = key
+ if operation == "encrypt":
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ elif operation == "decrypt":
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer":self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "300", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes("encrypt", t),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes("decrypt", vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
diff --git a/PyramidStore/plugin/app/剧多短剧APP.py b/PyramidStore/plugin/app/剧多短剧APP.py
new file mode 100644
index 0000000..a25f44f
--- /dev/null
+++ b/PyramidStore/plugin/app/剧多短剧APP.py
@@ -0,0 +1,313 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import base64
+import binascii
+import json
+import random
+import sys
+import time
+import uuid
+from base64 import b64decode, b64encode
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad, pad
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.ut = False
+ # self.did, self.ntid =self.getdid()
+ self.did, self.ntid = 'e59eb2465f61b9ca','65a0de19b3a2ec93fa479ad6'
+ self.token, self.uid = self.gettoken()
+ self.phost, self.phz,self.mphost=self.getpic()
+ # self.phost, self.phz,self.mphost = ('https://dbtp.tgydy.com','.log','https://dplay.nbzsmc.com')
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://192.151.245.34:8089'
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def uuid(self):
+ return str(uuid.uuid4())
+
+ def getdid(self):
+ did = self.random_str(16)
+ ntid = self.random_str(24)
+ return did, ntid
+ # try:
+ # if self.getCache('did'):
+ # return self.getCache('did'), self.getCache('ntid')
+ # else:
+ # self.setCache('did', did)
+ # self.setCache('ntid', ntid)
+ # return did, ntid
+ # except Exception as e:
+ # self.setCache('did', did)
+ # self.setCache('ntid', ntid)
+ # return did, ntid
+
+ def aes(self, text, bool=True):
+ key = b64decode('c0k4N1RfKTY1U1cjJERFRA==')
+ iv = b64decode('VzIjQWRDVkdZSGFzSEdEVA==')
+ if bool:
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ ptt=json.loads(pt.decode("utf-8"))
+ return ptt
+
+ def random_str(self,length=24):
+ hex_chars = '0123456789abcdef'
+ return ''.join(random.choice(hex_chars) for _ in range(length))
+
+ def gettoken(self):
+ params={"deviceId":self.did,"deviceModel":"8848钛晶手机","devicePlatform":"1","tenantId":self.ntid}
+ data=self.getdata('/supports/anonyLogin',params)
+ self.ut=True
+ return data['data']['token'], data['data']['userId']
+
+ def getdata(self,path,params=None):
+ t = int(time.time()*1000)
+ n=self.md5(f'{self.uuid()}{t}')
+ if params:
+ ct=self.aes(json.dumps(params))
+ else:
+ ct=f'{t}{n}'
+ s=self.md5(f'{ct}8j@78m.367HGDF')
+ headers = {
+ 'User-Agent': 'okhttp-okgo/jeasonlzy',
+ 'Connection': 'Keep-Alive',
+ 'Accept-Language': 'zh-CN,zh;q=0.8',
+ 'tenantId': self.ntid,
+ 'n': n,
+ 't': str(int(t/1000)),
+ 's': s,
+ }
+ if self.ut:
+ headers['ta-token'] = self.token
+ headers['userId'] = self.uid
+ if params:
+ params={'ct':ct}
+ response = self.post(f'{self.host}{path}', headers=headers, json=params).text
+ else:
+ response = self.fetch(f'{self.host}{path}', headers=headers).text
+ data=self.aes(response[1:-1],False)
+ return data
+
+ def getpic(self):
+ try:
+ at = int(time.time() * 1000)
+ t=str(int(at/ 1000))
+ n = self.md5(f'{self.uuid()}{at}')
+ headers = {
+ 'Host': '192.151.245.34:8089',
+ 'User-Agent': 'okhttp-okgo/jeasonlzy',
+ 'Connection': 'Keep-Alive',
+ 'Accept-Language': 'zh-CN,zh;q=0.8',
+ 'tenantId': self.ntid,
+ 'userId': self.uid,
+ 'ta-token': self.token,
+ 'n': n,
+ 't': t,
+ 's': self.md5(f'{t}{n}8j@78m.367HGDF')
+ }
+ params = {
+ 'tenantId': self.ntid,
+ }
+ response = self.fetch(f'{self.host}/supports/configs', params=params, headers=headers).text
+ data=self.aes(response[1:-1],False)
+ config = {
+ 'image_cdn': '',
+ 'image_cdn_path': '',
+ 'cdn-domain': ''
+ }
+ for item in data.get('data', []):
+ name = item.get('name')
+ records = item.get('records', [])
+
+ if name in config and records:
+ value = records[0].get('value', '')
+ if name == 'cdn-domain':
+ value = value.split('#')[0]
+ config[name] = value
+
+ return config['image_cdn'], config['image_cdn_path'], config['cdn-domain']
+
+ except Exception as e:
+ print(f"Error in getpic: {e}")
+ return 'https://dbtp.tgydy.com', '.log', 'https://dplay.nbzsmc.com'
+
+ def getlist(self,data):
+ vod=[]
+ for i in data:
+ vod.append({
+ 'vod_id': f'{i.get("movieId")}@{i.get("entryNum")}',
+ 'vod_name': i.get('title'),
+ 'vod_pic': f'{self.getProxyUrl()}&path={i.get("thumbnail")}',
+ 'vod_year': i.get('score'),
+ 'vod_remarks': f'{i.get("entryNum")}集'
+ })
+ return vod
+
+ def homeContent(self, filter):
+ data=self.getdata('/movies/classifies')
+ result = {}
+ cateManual = {
+ "榜单": "ranking/getTodayHotRank",
+ "专辑": "getTMovieFolderPage",
+ "剧场": "getClassMoviePage2",
+ "演员": "follow/getRecommendActorPage",
+ }
+ classes = []
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ filters = {}
+ if data.get('data'):
+ filters["getClassMoviePage2"] = [
+ {
+ "key": "type",
+ "name": "分类",
+ "value": [
+ {"n": item["name"], "v": item["classifyId"]}
+ for item in data["data"]
+ ]
+ }
+ ]
+ filters["ranking/getTodayHotRank"] = [
+ {
+ "key": "type",
+ "name": "榜单",
+ "value": [
+ {"n": "播放榜", "v": "getWeekHotPlayRank"},
+ {"n": "高赞榜", "v": "getWeekStarRank"},
+ {"n": "追剧榜", "v": "getSubTMoviePage"},
+ {"n": "高分榜", "v": "ranking/getScoreRank"}
+ ]
+ }
+ ]
+ filters["follow/getRecommendActorPage"] = [
+ {
+ "key": "type",
+ "name": "性别",
+ "value": [
+ {"n": "男", "v": "0"},
+ {"n": "女", "v": "1"}
+ ]
+ }
+ ]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ params = {"pageNo":"1","pageSize":"30","platform":"1","deviceId":self.did,"tenantId":self.ntid}
+ data=self.getdata('/news/getRecommendTMoviePage',params)
+ vod=self.getlist(data['data']['records'])
+ return {'list':vod}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params={}
+ path = f'/news/{tid}'
+ if tid=='getClassMoviePage2':
+ parama={"pageNo":pg,"pageSize":"30","orderFlag":"0","haveActor":"-1","classifyId":extend.get('type','-1'),"tagId":""}
+ elif 'rank' in tid:
+ path=f'/news/{extend.get("type") or tid}'
+ parama={"pageNo":pg,"pageSize":"30"}
+ elif 'follow' in tid:
+ parama={"pageNo":pg,"pageSize":"20"}
+ if extend.get('type'):
+ path=f'/news/getActorPage'
+ parama={"pageNo":pg,"pageSize":"50","sex":extend.get('type')}
+ elif tid=='getTMovieFolderPage':
+ parama={"pageNo":pg,"pageSize":"20"}
+ elif '@' in tid:
+ path='/news/getActorTMoviePage'
+ parama={"id":tid.split('@')[0],"pageNo":pg,"pageSize":"30"}
+ params['platform'] = '1'
+ params['deviceId'] = self.did
+ params['tenantId'] = self.ntid
+ data=self.getdata(path,parama)
+ vods=[]
+ if 'follow' in tid:
+ for i in data['data']['records']:
+ vods.append({
+ 'vod_id': f'{i.get("id")}@',
+ 'vod_name': i.get('name'),
+ 'vod_pic': f"{self.getProxyUrl()}&path={i.get('avatar')}",
+ 'vod_tag': 'folder',
+ 'vod_remarks': f'作品{i.get("movieNum")}',
+ 'style': {"type": "oval"}
+ })
+ else:
+ vdata=data['data']['records']
+ if tid=='getTMovieFolderPage':
+ vdata=[j for i in data['data']['records'] for j in i['movieList']]
+ vods=self.getlist(vdata)
+ result = {}
+ result['list'] = vods
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids=ids[0].split('@')
+ params = {"pageNo": "1", "pageSize": ids[1], "movieId": ids[0], "platform": "1", "deviceId": self.did, "tenantId": self.ntid}
+ data = self.getdata('/news/getEntryPage', params)
+ print(data)
+ plist=[f'第{i.get("entryNum")}集${i.get("mp4PlayAddress") or i.get("playAddress")}' for i in data['data']['records']]
+ vod = {
+ 'vod_play_from': '嗷呜爱看短剧',
+ 'vod_play_url': '#'.join(plist),
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {"pageNo": pg, "pageSize": "20", "keyWord": key, "orderFlag": "0", "platform": "1", "deviceId": self.did, "tenantId": self.ntid}
+ data = self.getdata('/news/searchTMoviePage', params)
+ vod = self.getlist(data['data']['records'])
+ return {'list':vod,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'parse': 0, 'url': f'{self.mphost}{id}', 'header': {'User-Agent':'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'}}
+
+ def localProxy(self, param):
+ data=self.fetch(f'{self.phost}{param.get("path")}{self.phz}',headers={'User-Agent':'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'})
+ def decrypt(encrypted_text):
+ try:
+ key = base64.urlsafe_b64decode("iM41VipvCFtToAFFRExEXw==")
+ iv = base64.urlsafe_b64decode("0AXRTXzmMSrlRSemWb4sVQ==")
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted_padded = cipher.decrypt(encrypted_text)
+ decrypted_data = unpad(decrypted_padded, AES.block_size)
+ return decrypted_data
+ except (binascii.Error, ValueError):
+ return None
+ return [200, data.headers.get('Content-Type'), decrypt(data.content)]
+
diff --git a/PyramidStore/plugin/app/哇哇APP.py b/PyramidStore/plugin/app/哇哇APP.py
new file mode 100644
index 0000000..a49ed1f
--- /dev/null
+++ b/PyramidStore/plugin/app/哇哇APP.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import time
+import uuid
+from base64 import b64decode, b64encode
+from concurrent.futures import ThreadPoolExecutor, as_completed
+
+from Crypto.Cipher import AES
+from Crypto.Hash import SHA256, MD5
+from Crypto.PublicKey import RSA
+from Crypto.Signature import pkcs1_15
+from Crypto.Util.Padding import unpad
+
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host, self.appKey, self.rsakey = self.userinfo()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.fetch(f"{self.host}/api.php/zjv6.vod/types", headers=self.getheader()).json()
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序", }
+ filters = {}
+ classes = []
+ json_data = data['data']['list']
+ for item in json_data:
+ has_non_empty_field = False
+ jsontype_extend = item["type_extend"]
+ jsontype_extend['by'] = '按更新,按播放,按评分,按收藏'
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ sl = {'按更新': 'time', '按播放': 'hits', '按评分': 'score', '按收藏': 'store_num'}
+ value_array = [
+ {"n": value.strip(), "v": sl[value.strip()] if dkey == "by" else value.strip()}
+ for value in values
+ if value.strip() != ""
+ ]
+ filters[str(item["type_id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result = {"class": classes, "filters": filters}
+ return result
+
+ def homeVideoContent(self):
+ data = self.fetch(f"{self.host}/api.php/zjv6.vod/vodPhbAll", headers=self.getheader()).json()
+ return {'list': data['data']['list'][0]['vod_list']}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {
+ "type": tid,
+ "class": extend.get('class', ''),
+ "lang": extend.get('lang', ''),
+ "area": extend.get('area', ''),
+ "year": extend.get('year', ''),
+ "by": extend.get('by', ''),
+ "page": pg,
+ "limit": "12"
+ }
+ data = self.fetch(f"{self.host}/api.php/zjv6.vod", headers=self.getheader(), params=params).json()
+ result = {}
+ result['list'] = data['data']['list']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.fetch(f"{self.host}/api.php/zjv6.vod/detail?vod_id={ids[0]}&rel_limit=10",
+ headers=self.getheader()).json()
+ vod = data['data']
+ v, np = {'vod_play_from': [], 'vod_play_url': []}, {}
+ for i in vod['vod_play_list']:
+ n = i['player_info']['show']
+ np[n] = []
+ for j in i['urls']:
+ j['parse'] = i['player_info']['parse2']
+ nm = j.pop('name')
+ np[n].append(f"{nm}${self.e64(json.dumps(j))}")
+ for key, value in np.items():
+ v['vod_play_from'].append(key)
+ v['vod_play_url'].append('#'.join(value))
+ v['vod_play_from'] = '$$$'.join(v['vod_play_from'])
+ v['vod_play_url'] = '$$$'.join(v['vod_play_url'])
+ vod.update(v)
+ vod.pop('vod_play_list', None)
+ vod.pop('type', None)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.fetch(f"{self.host}/api.php/zjv6.vod?page={pg}&limit=20&wd={key}", headers=self.getheader()).json()
+ return {'list': data['data']['list'], 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ target_url = ids['url']
+ try:
+ parse_str = ids.get('parse', '')
+ if parse_str:
+ parse_urls = parse_str.split(',')
+ result_url = self.try_all_parses(parse_urls, target_url)
+ if result_url:
+ return {
+ 'parse': 0,
+ 'url': result_url,
+ 'header': {'User-Agent': 'dart:io'}
+ }
+ return {
+ 'parse': 1,
+ 'url': target_url,
+ 'header': {'User-Agent': 'dart:io'}
+ }
+
+ except Exception as e:
+ print(e)
+ return {
+ 'parse': 1,
+ 'url': target_url,
+ 'header': {'User-Agent': 'dart:io'}
+ }
+
+ def liveContent(self, url):
+ pass
+
+ def localProxy(self, param):
+ pass
+
+ def userinfo(self):
+ t = str(int(time.time() * 1000))
+ uid = self.generate_uid()
+ sign = self.md5(f"appKey=3bbf7348cf314874883a18d6b6fcf67a&uid={uid}&time={t}")
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',
+ 'Connection': 'Keep-Alive',
+ 'appKey': '3bbf7348cf314874883a18d6b6fcf67a',
+ 'uid': uid,
+ 'time': t,
+ 'sign': sign,
+ }
+
+ params = {
+ 'access_token': '74d5879931b9774be10dee3d8c51008e',
+ }
+
+ response = self.fetch('https://gitee.com/api/v5/repos/aycapp/openapi/contents/wawaconf.txt', params=params,
+ headers=headers).json()
+ data = json.loads(self.decrypt(response['content']))
+ return data['baseUrl'], data['appKey'], data['appSecret']
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def generate_uid(self):
+ return uuid.uuid4().hex
+
+ def getheader(self):
+ t = str(int(time.time() * 1000))
+ uid = self.generate_uid()
+ sign = self.sign_message(f"appKey={self.appKey}&time={t}&uid={uid}")
+ headers = {
+ 'User-Agent': 'okhttp/4.9.3',
+ 'Connection': 'Keep-Alive',
+ 'uid': uid,
+ 'time': t,
+ 'appKey': self.appKey,
+ 'sign': sign,
+ }
+ return headers
+
+ def decrypt(self, encrypted_data):
+ key = b64decode('Crm4FXWkk5JItpYirFDpqg==')
+ cipher = AES.new(key, AES.MODE_ECB)
+ encrypted = bytes.fromhex(self.d64(encrypted_data))
+ decrypted = cipher.decrypt(encrypted)
+ unpadded = unpad(decrypted, AES.block_size)
+ return unpadded.decode('utf-8')
+
+ def sign_message(self, message):
+ private_key_str = f"-----BEGIN PRIVATE KEY-----\n{self.rsakey}\n-----END PRIVATE KEY-----"
+ private_key = RSA.import_key(private_key_str)
+ message_hash = SHA256.new(message.encode('utf-8'))
+ signature = pkcs1_15.new(private_key).sign(message_hash)
+ signature_b64 = b64encode(signature).decode('utf-8')
+ return signature_b64
+
+ def fetch_url(self, parse_url, target_url):
+ try:
+ response = self.fetch(f"{parse_url.replace('..', '.')}{target_url}",
+ headers={"user-agent": "okhttp/4.1.0/luob.app"}, timeout=5)
+ if response.status_code == 200:
+ try:
+ data = response.json()
+ result_url = data.get('url') or data.get('data', {}).get('url')
+ if result_url:
+ return result_url
+ except:
+ pass
+ return None
+ except:
+ return None
+
+ def try_all_parses(self, parse_urls, target_url):
+ with ThreadPoolExecutor(max_workers=(len(parse_urls))) as executor:
+ future_to_url = {
+ executor.submit(self.fetch_url, parse_url.strip(), target_url): parse_url
+ for parse_url in parse_urls if parse_url.strip()
+ }
+
+ for future in as_completed(future_to_url):
+ try:
+ result = future.result()
+ if result:
+ return result
+ except:
+ continue
+ return None
+
diff --git a/PyramidStore/plugin/app/国外剧APP.py b/PyramidStore/plugin/app/国外剧APP.py
new file mode 100644
index 0000000..eae5fc7
--- /dev/null
+++ b/PyramidStore/plugin/app/国外剧APP.py
@@ -0,0 +1,216 @@
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = 'https://guowaiju.com'
+ self.did=self.getdid()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in reversed(data["vod_play_list"]):
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ headers = {"User-Agent": "okhttp/3.14.9"}
+ url = self.d64(param['url'])
+ ydata = self.fetch(url, headers=headers, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=headers).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ for index, string in enumerate(lines):
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain = last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ if string.split('.')[-1].split('?')[0] == 'm3u8':
+ string = self.Mproxy(string)
+ lines[index] = string
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def getdid(self):
+ did=self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def aes(self, text, b=None):
+ key = b"7xv16h7qgkrs9b1p"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "110", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.did,
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
+ }
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+
diff --git a/PyramidStore/plugin/app/奇迹APP.py b/PyramidStore/plugin/app/奇迹APP.py
new file mode 100644
index 0000000..b3ff4c7
--- /dev/null
+++ b/PyramidStore/plugin/app/奇迹APP.py
@@ -0,0 +1,216 @@
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host='https://www.qj1080.top'
+ self.did=self.getdid()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ headers = {"User-Agent": "okhttp/3.14.9"}
+ url = self.d64(param['url'])
+ ydata = self.fetch(url, headers=headers, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=headers).content.decode('utf-8')
+ lines = data.strip().split('\n')
+ last_r = url[:url.rfind('/')]
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ for index, string in enumerate(lines):
+ if '#EXT' not in string:
+ if 'http' not in string:
+ domain = last_r if string.count('/') < 2 else durl
+ string = domain + ('' if string.startswith('/') else '/') + string
+ if string.split('.')[-1].split('?')[0] == 'm3u8':
+ string = self.Mproxy(string)
+ lines[index] = string
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def getdid(self):
+ did=self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def aes(self, text, b=None):
+ key = b"8t2L9x5Qz4A7p3y6"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "666", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.did,
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
+ }
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+
diff --git a/PyramidStore/plugin/app/小苹果APP.py b/PyramidStore/plugin/app/小苹果APP.py
new file mode 100644
index 0000000..d673fe3
--- /dev/null
+++ b/PyramidStore/plugin/app/小苹果APP.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://item.xpgcom.com'
+
+ headers = {
+ "User-Agent": "okhttp/3.12.11"
+ }
+
+ def homeContent(self, filter):
+ data = self.fetch(f"{self.host}/api.php/v2.vod/androidtypes",headers=self.headers,).json()
+ dy = {
+ "classes": "类型",
+ "areas": "地区",
+ "years": "年份",
+ "sortby": "排序",
+ }
+ filters = {}
+ classes = []
+ for item in data['data']:
+ has_non_empty_field = False
+ item['sortby'] = ['updatetime', 'hits', 'score']
+ demos = ['时间', '人气', '评分']
+ classes.append({"type_name": item["type_name"], "type_id": str(item["type_id"])})
+ for key in dy:
+ if key in item and len(item[key]) > 1:
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in item:
+ if dkey in dy and len(item[dkey]) > 1:
+ values = item[dkey]
+ value_array = [
+ {"n": demos[idx] if dkey == "sortby" else value.strip(), "v": value.strip()}
+ for idx, value in enumerate(values)
+ if value.strip() != ""
+ ]
+ filters[str(item["type_id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ rsp = self.fetch(f"{self.host}/api.php/v2.main/androidhome", headers=self.headers).json()
+ videos = []
+ for i in rsp['data']['list']:videos.extend(self.getlist(i['list']))
+ return {'list':videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {
+ "page": pg,
+ "type": tid,
+ "area":extend.get('areaes',''),
+ "year":extend.get('yeares',''),
+ "sortby":extend.get('sortby',''),
+ "class":extend.get('classes','')
+ }
+ params={i:v for i,v in params.items() if v}
+ rsp = self.fetch(f'{self.host}/api.php/v2.vod/androidfilter10086', headers=self.headers, params=params).json()
+ result = {}
+ result['list'] = self.getlist(rsp['data'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ rsp = self.fetch(f'{self.host}/api.php/v3.vod/androiddetail2?vod_id={ids[0]}', headers=self.headers).json()
+ v = rsp['data']
+ vod = {
+ 'vod_year':v.get('year'),
+ 'vod_area':v.get('area'),
+ 'vod_lang':v.get('lang'),
+ 'type_name':v.get('className'),
+ 'vod_actor':v.get('actor'),
+ 'vod_director':v.get('director'),
+ 'vod_content':v.get('content'),
+ 'vod_play_from': '小苹果',
+ 'vod_play_url': '#'.join([f"{i['key']}${i['url']}" for i in v['urls']])
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg='1'):
+ rsp = self.fetch(f'{self.host}/api.php/v2.vod/androidsearch10086?page={pg}&wd={key}', headers=self.headers).json()
+ return {'list':self.getlist(rsp['data']),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ header = {
+ 'user_id': 'XPGBOX',
+ 'token2': 'SnAXiSW8vScXE0Z9aDOnK5xffbO75w1+uPom3WjnYfVEA1oWtUdi2Ihy1N8=',
+ 'version': 'XPGBOX com.phoenix.tv1.5.7',
+ 'hash': 'd78a',
+ 'screenx': '2345',
+ 'user-agent': 'Lavf/58.12.100',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
+ 'token': 'ElEDlwCVgXcFHFhddiq2JKteHofExRBUrfNlmHrWetU3VVkxnzJAodl52N9EUFS+Dig2A/fBa/V9RuoOZRBjYvI+GW8kx3+xMlRecaZuECdb/3AdGkYpkjW3wCnpMQxf8vVeCz5zQLDr8l8bUChJiLLJLGsI+yiNskiJTZz9HiGBZhZuWh1mV1QgYah5CLTbSz8=',
+ 'timestamp': '1743060300',
+ 'screeny': '1065',
+ }
+ if 'http' not in id:id=f"http://c.xpgtv.net/m3u8/{id}.m3u8"
+ return {"parse": 0, "url": id, "header": header}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self,data):
+ videos = []
+ for vod in data:
+ r=f"更新至{vod.get('updateInfo')}" if vod.get('updateInfo') else ''
+ videos.append({
+ "vod_id": vod['id'],
+ "vod_name": vod['name'],
+ "vod_pic": vod['pic'],
+ "vod_remarks": r or vod['score']
+ })
+ return videos
+
+
diff --git a/PyramidStore/plugin/app/恋鱼影视APP.py b/PyramidStore/plugin/app/恋鱼影视APP.py
new file mode 100644
index 0000000..bfefb89
--- /dev/null
+++ b/PyramidStore/plugin/app/恋鱼影视APP.py
@@ -0,0 +1,212 @@
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = 'http://47.122.22.78'
+ self.did=self.getdid()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def aes(self, text, b=None):
+ key = b"1234567890123456"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer": self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "101", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.did,
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdid(self):
+ did=self.getCache('did')
+ if not did:
+ t = str(int(time.time()))
+ did = self.md5(t)
+ self.setCache('did', did)
+ return did
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param, header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
diff --git a/PyramidStore/plugin/app/悠悠APP.py b/PyramidStore/plugin/app/悠悠APP.py
new file mode 100644
index 0000000..f4940fa
--- /dev/null
+++ b/PyramidStore/plugin/app/悠悠APP.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent']=itt["player_info"].get("user_agent")
+ it["parse"]=itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ host = self.fetch('http://host.yyys.news/250123.txt', headers=headers).text
+ return host.strip()
+
+ phend = {
+ 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)',
+ 'allowCrossProtocolRedirects': 'true'
+ }
+
+ def aes(self, text,b=None):
+ key = b"RuN9LRvwTRgpQnpK"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else :
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer":self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "547", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes(t,True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
diff --git a/PyramidStore/plugin/app/海马影视APP.py b/PyramidStore/plugin/app/海马影视APP.py
new file mode 100644
index 0000000..801ffaf
--- /dev/null
+++ b/PyramidStore/plugin/app/海马影视APP.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import string
+import sys
+from base64 import b64decode, b64encode
+from urllib.parse import quote, unquote
+sys.path.append('..')
+import concurrent.futures
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://w.dcmovie.top'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.9.1',
+ 'mark-time': 'null',
+ 'fn-api-version': '1.3.2',
+ 'versionCode': '5',
+ 'product': 'gysg',
+ }
+
+ def homeContent(self, filter):
+ data=self.fetch(f"{self.host}/api.php/vod/type", headers=self.headers).json()
+ result,filters,videos = {},{},[]
+ classes = [{'type_id': i['type_name'], 'type_name': i['type_name']} for i in data['list'][1:]]
+ body={'token':'', 'type_id':data['list'][0]['type_id']}
+ ldata=self.post(f"{self.host}/api.php/vod/category", data=body, headers=self.headers).json()
+ for i in ldata['data']['banner']:
+ videos.append({
+ 'vod_id':i.get('vod_id'),
+ 'vod_name':i.get('vod_name'),
+ 'vod_pic':i.get('vod_pic_thumb')
+ })
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ future_to_aid = {executor.submit(self.fts, aid): aid for aid in classes}
+ for future in concurrent.futures.as_completed(future_to_aid):
+ aid = future_to_aid[future]
+ try:
+ aid_id, fts = future.result()
+ filters[aid_id] = fts
+ except Exception as e:
+ print(f"Error processing aid {aid}: {e}")
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = videos
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params={'state':extend.get('state',tid) or tid,'class':extend.get('classes','全部'),'area':extend.get('area','全部'),'year':extend.get('year','全部'),'lang':extend.get('lang','全部'),'version':extend.get('version','全部'),'pg':pg}
+ data=self.fetch(f"{self.host}/api.php/vod/list", params=params, headers=self.headers).json()
+ result = {}
+ videos = []
+ for i in data['data']['list']:
+ if str(i.get('vod_id', 0)) != '0':
+ videos.append({
+ 'vod_id': i.get('vod_id'),
+ 'vod_name': i.get('vod_name'),
+ 'vod_pic': i.get('vod_pic'),
+ 'vod_year': f"{i.get('vod_score')}分",
+ 'vod_remarks': i.get('vod_remarks')
+ })
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body={'ids':ids[0],'uni_code':self.getunc(),'ac':'detail','token':''}
+ data=self.post(f"{self.host}/api.php/vod/detail2", data=body, headers=self.headers).json()
+ v=data['data']
+ vod = {
+ 'type_name': v.get('type_name'),
+ 'vod_year': v.get('vod_year'),
+ 'vod_area': v.get('vod_area'),
+ 'vod_lang': v.get('vod_lang'),
+ 'vod_remarks': v.get('vod_remarks'),
+ 'vod_actor': v.get('vod_actor'),
+ 'vod_director': v.get('vod_director'),
+ 'vod_content': v.get('vod_content')
+ }
+ n,p=[],[]
+ for i in v['vod_play_list']:
+ pp=i['player_info']
+ n.append(pp['show'])
+ np=[]
+ for j in i['urls']:
+ cd={'parse':pp.get('parse'),'url':j['url'],'headers':pp.get('headers')}
+ np.append(f"{j['name']}${self.e64(json.dumps(cd))}")
+ p.append('#'.join(np))
+ vod.update({'vod_play_from':'$$$'.join(n),'vod_play_url':'$$$'.join(p)})
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f"{self.host}/api.php/vod/search", params={'keywords':key,'type':'1','pg':pg}, headers=self.headers).json()
+ return {'list':data['list'],'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids=json.loads(self.d64(id))
+ headers = {}
+ urls=ids['url']
+ if ids.get('headers'):
+ hs=ids['headers'].split('=>',1)
+ headers[hs[0].strip()]=hs[-1].strip()
+ if isinstance(ids.get('parse'), list) and len(ids['parse']) > 0:
+ urls=[]
+ for i,x in enumerate(ids['parse']):
+ su=f"{self.getProxyUrl()}&url={quote(x+ids['url'])}"
+ urls.extend([f'解析{i+1}',su])
+ return {'parse': 0, 'url': urls, 'header': headers}
+
+ def localProxy(self, param):
+ try:
+ body = {'url':unquote(param['url'])}
+ data=self.post(f"{self.host}/api.php/vod/m_jie_xi", data=body, headers=self.headers).json()
+ url=data.get('url') or data['data'].get('url')
+ return [302,'video/MP2T',None,{'Location':url}]
+ except:
+ return []
+
+ def liveContent(self, url):
+ pass
+
+ def fts(self, tdata):
+ params={'state':tdata['type_id'],'pg':'1'}
+ data = self.fetch(f"{self.host}/api.php/vod/list", params=params, headers=self.headers).json()
+ ftks = ["classes", "area", "lang", "year", "version", "state"]
+ filter = [
+ {
+ 'name': k,
+ 'key': k,
+ 'value': [{'n': i, 'v': i} for i in v.split(',')]
+ }
+ for k, v in data['data']['classes']["type_extend"].items()
+ if k in ftks and v
+ ]
+ return tdata['type_id'],filter
+
+ def getunc(self):
+ chars = string.ascii_lowercase + string.digits
+ data = ''.join(random.choice(chars) for _ in range(16))
+ return self.e64(data)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
diff --git a/PyramidStore/plugin/app/火车太顺APP.py b/PyramidStore/plugin/app/火车太顺APP.py
new file mode 100644
index 0000000..6a80179
--- /dev/null
+++ b/PyramidStore/plugin/app/火车太顺APP.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+from urllib.parse import urlparse
+sys.path.append("..")
+import re
+import hashlib
+import hmac
+import random
+import string
+from Crypto.Util.Padding import unpad
+from concurrent.futures import ThreadPoolExecutor
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import PKCS1_v1_5, AES
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.device = self.device_id()
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ filters = {}
+ classes = []
+ bba = self.url()
+ data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
+ data1 = self.aes(data)
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ data1['data']['movie_screen']['sort'].pop(0)
+ for item in data1['data']['movie_screen']['sort']:
+ item['n'] = item.pop('name')
+ item['v'] = item.pop('value')
+ for item in data1['data']['movie_screen']['filter']:
+ has_non_empty_field = False
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in item and item[key]:
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ filters[str(item["id"])].append(
+ {"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
+ for dkey in item:
+ if dkey in dy and item[dkey]:
+ item[dkey].pop(0)
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in item[dkey]
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ bba = self.url()
+ url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()
+ videos = []
+ for item in data['data']:
+ if len(item['list']) > 0:
+ for it in item['list']:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
+ "area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
+ "pageSize": "21"}
+ result = {}
+ list = []
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data']['list']
+ for item in data:
+ list.append(self.voides(item))
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = {"id": ids[0]}
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ video = {'vod_name': data.get('name'), 'type_name': data.get('type_name'), 'vod_year': data.get('year'),
+ 'vod_area': data.get('area'), 'vod_remarks': data.get('dynami'), 'vod_content': data.get('content')}
+ play = []
+ names = []
+ tasks = []
+ for itt in data["play_from"]:
+ name = itt["name"]
+ a = []
+ if len(itt["list"]) > 0:
+ names.append(name)
+ play.append(self.playeach(itt['list']))
+ else:
+ tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
+ names.append(name)
+ if tasks:
+ with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
+ results = executor.map(self.playlist, tasks)
+ for result in results:
+ if result:
+ play.append(result)
+ else:
+ play.append("")
+ video["vod_play_from"] = "$$$".join(names)
+ video["vod_play_url"] = "$$$".join(play)
+ result = {"list": [video]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
+ "res_type": "by_movie_name"}
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data'].get('list')
+ videos = []
+ for it in data:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if not re.search(r"\.m3u8|\.mp4", url):
+ try:
+ data = json.loads(b64decode(id.encode('utf-8')).decode('utf-8'))
+ bba = self.url(data)
+ data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
+ headers=self.header()).json()['data']
+ url = data2.get('play_url') or data2.get('download_url')
+ except Exception as e:
+ pass
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'user-agent': 'okhttp/4.9.2'}
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def Mproxy(self, url):
+ return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def device_id(self):
+ characters = string.ascii_lowercase + string.digits
+ random_string = ''.join(random.choices(characters, k=32))
+ return random_string
+
+ def gethost(self):
+ try:
+ url = 'https://dns.alidns.com/dns-query'
+ headers = {
+ 'User-Agent': 'okhttp/4.9.2',
+ 'Accept': 'application/dns-message'
+ }
+ params = {
+ 'dns': 'AAABAAABAAAAAAAACWJmbTExYXM5ZgdmdXFpeXVuAmNuAAAcAAE'
+ }
+ response = self.fetch(url, headers=headers, params=params)
+ host=self.parse_dns_name(response.content, 12)
+ return f"https://{host}"
+ except:
+ return "https://bfm11as9f.fuqiyun.cn"
+
+ def parse_dns_name(self, data, offset):
+ parts = []
+ while True:
+ length = data[offset]
+ if length == 0:
+ break
+ offset += 1
+ parts.append(data[offset:offset + length].decode('utf-8'))
+ offset += length
+ return '.'.join(parts)
+
+ def header(self):
+ headers = {
+ 'User-Agent': 'Android',
+ 'Accept': 'application/prs.55App.v2+json',
+ 'timestamp': str(int(time.time())),
+ 'x-client-setting': '{"pure-mode":0}',
+ 'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
+ 'x-client-version': '3096 '
+ }
+ return headers
+
+ def url(self, id=None):
+ if not id:
+ id = {}
+ id["timestamp"] = str(int(time.time()))
+ public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
+ encrypted_text = json.dumps(id)
+ public_key = RSA.import_key(b64decode(public_key))
+ cipher = PKCS1_v1_5.new(public_key)
+ encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
+ encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
+ result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
+ key = '635a580fcb5dc6e60caa39c31a7bde48'
+ sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
+ return result, sign
+
+ def playlist(self, body):
+ try:
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ return self.playeach(data)
+ except Exception:
+ return []
+
+ def playeach(self, data):
+ play_urls = []
+ for it in data:
+ if re.search(r"mp4|m3u8", it["play_url"]):
+ play_urls.append(f"{it['episode_name']}${it['play_url']}")
+ else:
+ vd={"from_code": it['from_code'], "play_url": it['play_url'], "episode_id": it['episode_id'], "type": "play"}
+ play_urls.append(
+ f"{it['episode_name']}${b64encode(json.dumps(vd).encode('utf-8')).decode('utf-8')}"
+ )
+ return '#'.join(play_urls)
+
+ def voides(self, item):
+ if item['name'] or item['title']:
+ voide = {
+ "vod_id": item.get('id') or item.get('click'),
+ 'vod_name': item.get('name') or item.get('title'),
+ 'vod_pic': item.get('cover') or item.get('image'),
+ 'vod_year': item.get('year') or item.get('label'),
+ 'vod_remarks': item.get('dynamic') or item.get('sub_title')
+ }
+ return voide
+
+ def aes(self, text):
+ text = text.replace('-', '+').replace('_', '/') + '=='
+ key = b"e6d5de5fcc51f53d"
+ iv = b"2f13eef7dfc6c613"
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
+ return json.loads(pt)
diff --git a/PyramidStore/plugin/app/热播APP.py b/PyramidStore/plugin/app/热播APP.py
new file mode 100644
index 0000000..5b5725d
--- /dev/null
+++ b/PyramidStore/plugin/app/热播APP.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import time
+import requests
+from base64 import b64decode, b64encode
+from Crypto.Hash import MD5
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://v.rbotv.cn'
+
+ headers = {
+ 'User-Agent': 'okhttp-okgo/jeasonlzy',
+ 'Accept-Language': 'zh-CN,zh;q=0.8'
+ }
+
+ def homeContent(self, filter):
+ data=requests.post(f'{self.host}/v3/type/top_type',headers=self.headers,files=self.getfiles({'': (None, '')})).json()
+ result = {}
+ classes = []
+ filters = {}
+ for k in data['data']['list']:
+ classes.append({
+ 'type_name': k['type_name'],
+ 'type_id': k['type_id']
+ })
+ fts = []
+ for i,x in k.items():
+ if isinstance(x, list) and len(x)>2:
+ fts.append({
+ 'name': i,
+ 'key': i,
+ 'value': [{'n': j, 'v': j} for j in x if j and j!= '全部']
+ })
+ if len(fts):filters[k['type_id']] = fts
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=requests.post(f'{self.host}/v3/type/tj_vod',headers=self.headers,files=self.getfiles({'': (None, '')})).json()
+ return {'list':self.getv(data['data']['cai']+data['data']['loop'])}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ files = {
+ 'type_id': (None, tid),
+ 'limit': (None, '12'),
+ 'page': (None, pg)
+ }
+ for k,v in extend.items():
+ if k=='extend':k='class'
+ files[k] = (None, v)
+ data=requests.post(f'{self.host}/v3/home/type_search',headers=self.headers,files=self.getfiles(files)).json()
+ result = {}
+ result['list'] = self.getv(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=requests.post(f'{self.host}/v3/home/vod_details',headers=self.headers,files=self.getfiles({'vod_id': (None, ids[0])})).json()
+ v=data['data']
+ vod = {
+ 'vod_name': v.get('vod_name'),
+ 'type_name': v.get('type_name'),
+ 'vod_year': v.get('vod_year'),
+ 'vod_area': v.get('vod_area'),
+ 'vod_remarks': v.get('vod_remarks'),
+ 'vod_actor': v.get('vod_actor'),
+ 'vod_director': v.get('vod_director'),
+ 'vod_content': pq(pq(v.get('vod_content','无') or '无').text()).text()
+ }
+ n,p=[],[]
+ for o,i in enumerate(v['vod_play_list']):
+ n.append(f"线路{o+1}({i.get('flag')})")
+ c=[]
+ for j in i.get('urls'):
+ d={'url':j.get('url'),'p':i.get('parse_urls'),'r':i.get('referer'),'u':i.get('ua')}
+ c.append(f"{j.get('name')}${self.e64(json.dumps(d))}")
+ p.append('#'.join(c))
+ vod.update({'vod_play_from':'$$$'.join(n),'vod_play_url':'$$$'.join(p)})
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ files = {
+ 'limit': (None, '12'),
+ 'page': (None, pg),
+ 'keyword': (None, key),
+ }
+ data=requests.post(f'{self.host}/v3/home/search',headers=self.headers,files=self.getfiles(files)).json()
+ return {'list':self.getv(data['data']['list']),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids=json.loads(self.d64(id))
+ url=ids['url']
+ if isinstance(ids['p'],list) and len(ids['p']):
+ url=[]
+ for i,x in enumerate(ids['p']):
+ up={'url':ids['url'],'p':x,'r':ids['r'],'u':ids['u']}
+ url.extend([f"解析{i+1}",f"{self.getProxyUrl()}&data={self.e64(json.dumps(up))}"])
+ h={}
+ if ids.get('r'):
+ h['Referer'] = ids['r']
+ if ids.get('u'):
+ h['User-Agent'] = ids['u']
+ return {'parse': 0, 'url': url, 'header': h}
+
+ def localProxy(self, param):
+ data=json.loads(self.d64(param['data']))
+ h = {}
+ if data.get('r'):
+ h['Referer'] = data['r']
+ if data.get('u'):
+ h['User-Agent'] = data['u']
+ res=self.fetch(f"{data['p']}{data['url']}",headers=h).json()
+ url=res.get('url') or res['data'].get('url')
+ return [302,'video/MP2T',None,{'Location':url}]
+
+ def liveContent(self, url):
+ pass
+
+ def getfiles(self, p=None):
+ if p is None:p = {}
+ t=str(int(time.time()))
+ h = MD5.new()
+ h.update(f"7gp0bnd2sr85ydii2j32pcypscoc4w6c7g5spl{t}".encode('utf-8'))
+ s = h.hexdigest()
+ files = {
+ 'sign': (None, s),
+ 'timestamp': (None, t)
+ }
+ p.update(files)
+ return p
+
+ def getv(self,data):
+ videos = []
+ for i in data:
+ if i.get('vod_id') and str(i['vod_id']) != '0':
+ videos.append({
+ 'vod_id': i['vod_id'],
+ 'vod_name': i.get('vod_name'),
+ 'vod_pic': i.get('vod_pic') or i.get('vod_pic_thumb'),
+ 'vod_year': i.get('tag'),
+ 'vod_remarks': i.get('vod_remarks')
+ })
+ return videos
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
\ No newline at end of file
diff --git a/PyramidStore/plugin/app/爱瓜TVAPP.py b/PyramidStore/plugin/app/爱瓜TVAPP.py
new file mode 100644
index 0000000..e3b1e51
--- /dev/null
+++ b/PyramidStore/plugin/app/爱瓜TVAPP.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+# 温馨提示:搜索只能搜拼音联想
+# 播放需要挂代理
+import sys
+import time
+import uuid
+from Crypto.Hash import MD5
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.uid = self.getuid()
+ self.token, self.code = self.getuserinfo()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://tvapi211.magicetech.com'
+
+ headers = {'User-Agent': 'okhttp/3.11.0'}
+
+ def homeContent(self, filter):
+ body = {'token': self.token, 'authcode': self.code}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-header', json=self.getbody(body),
+ headers=self.headers).json()
+ result = {}
+ classes = []
+ filters = {}
+ for k in data['data']:
+ classes.append({
+ 'type_name': k['channel_name'],
+ 'type_id': str(k['channel_id']),
+ })
+ filters[str(k['channel_id'])] = []
+ for i in k['search_box']:
+ if len(i['list']):
+ filters[str(k['channel_id'])].append({
+ 'key': i['field'],
+ 'name': i['label'],
+ 'value': [{'n': j['display'], 'v': str(j['value'])} for j in i['list'] if j['value']]
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ body = {'token': self.token, 'authcode': self.code}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/index-tv', json=self.getbody(body),
+ headers=self.headers).json()
+ return {'list': self.getlist(data['data'][0]['banner'])}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {'token': self.token, 'authcode': self.code, 'channel_id': tid, 'area': extend.get('area', '0'),
+ 'year': extend.get('year', '0'), 'sort': extend.get('sort', '0'), 'tag': extend.get('tag', 'hot'),
+ 'status': extend.get('status', '0'), 'page_num': pg, 'page_size': '24'}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-video', json=self.getbody(body),
+ headers=self.headers).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@')
+ body = {'token': self.token, 'authcode': self.code, 'channel_id': ids[0], 'video_id': ids[1]}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/detail', json=self.getbody(body),
+ headers=self.headers).json()
+ vdata = {}
+ for k in data['data']['chapters']:
+ i = k['sourcelist']
+ for j in i:
+ if j['source_name'] not in vdata: vdata[j['source_name']] = []
+ vdata[j['source_name']].append(f"{k['title']}${j['source_url']}")
+ plist, names = [], []
+ for key, value in vdata.items():
+ names.append(key)
+ plist.append('#'.join(value))
+ vod = {
+ 'vod_play_from': '$$$'.join(names),
+ 'vod_play_url': '$$$'.join(plist),
+ }
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ body = {'token': self.token, 'authcode': self.code, 'keyword': key, 'page_num': pg}
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/search/letter-result', json=self.getbody(body),
+ headers=self.headers).json()
+ return {'list': self.getlist(data['data']['list'])}
+
+ def playerContent(self, flag, id, vipFlags):
+ # https://rysp.tv
+ # https://aigua.tv
+ result = {
+ "parse": 0,
+ "url": id,
+ "header": {
+ "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)",
+ "Origin": "https://aigua.tv",
+ "Referer": "https://aigua.tv/"
+ }
+ }
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getuserinfo(self):
+ data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/user/auth-login', json=self.getbody(),
+ headers=self.headers).json()
+ v = data['data']
+ return v['user_token'], v['authcode']
+
+ def getuid(self):
+ uid = self.getCache('uid')
+ if not uid:
+ uid = str(uuid.uuid4())
+ self.setCache('uid', uid)
+ return uid
+
+ def getbody(self, json_data=None):
+ if json_data is None: json_data = {}
+ params = {"product": "4", "ver": "1.1.0", "debug": "1", "appId": "1", "osType": "3", "marketChannel": "tv",
+ "sysVer": "11", "time": str(int(time.time())), "packageName": "com.gzsptv.gztvvideo",
+ "udid": self.uid, }
+ json_data.update(params)
+ sorted_json = dict(sorted(json_data.items(), key=lambda item: item[0]))
+ text = '&'.join(f"{k}={v}" for k, v in sorted_json.items() if v != '')
+ md5_hash = self.md5(f"jI7POOBbmiUZ0lmi{text}D9ShYdN51ksWptpkTu11yenAJu7Zu3cR").upper()
+ json_data.update({'sign': md5_hash})
+ return json_data
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def getlist(self, data):
+ videos = []
+ for i in data:
+ if type(i.get('video')) == dict: i = i['video']
+ videos.append({
+ 'vod_id': f"{i.get('channel_id')}@{i.get('video_id')}",
+ 'vod_name': i.get('video_name'),
+ 'vod_pic': i.get('cover'),
+ 'vod_year': i.get('score'),
+ 'vod_remarks': i.get('flag'),
+ })
+ return videos
+
diff --git a/PyramidStore/plugin/app/皮皮虾APP.py b/PyramidStore/plugin/app/皮皮虾APP.py
new file mode 100644
index 0000000..6d8c14d
--- /dev/null
+++ b/PyramidStore/plugin/app/皮皮虾APP.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = "http://ppx.bjx365.top"
+ pass
+
+ def getName(self):
+ pass
+
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent']=itt["player_info"].get("user_agent")
+ it["parse"]=itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h={"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ url = ids['url']
+ p=1
+ try:
+ if re.search(r'\?url=', ids['parse_api_url']):
+ data=self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url=data.get('url') or data['data'].get('url')
+ elif not re.search(r'\.m3u8|\.mp4', ids.get('url')):
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes('encrypt', ids['url']))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ p=0
+ except Exception as e:
+ print('错误信息:',e)
+ pass
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def aes(self, operation, text):
+ key = "pipixia217522324".encode("utf-8")
+ iv = key
+ if operation == "encrypt":
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ elif operation == "decrypt":
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer":self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "300", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes("encrypt", t),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes("decrypt", vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+
diff --git a/PyramidStore/plugin/app/美帕APP.py b/PyramidStore/plugin/app/美帕APP.py
new file mode 100644
index 0000000..bcb3a51
--- /dev/null
+++ b/PyramidStore/plugin/app/美帕APP.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+ def getName(self):
+ return "mp"
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://g.c494.com'
+
+ header = {
+ 'User-Agent': 'Dart/2.10 (dart:io)',
+ 'platform_version': 'RP1A.200720.011',
+ 'version': '2.2.3',
+ 'copyright': 'xiaogui',
+ 'platform': 'android',
+ 'client_name': '576O5p+P5b2x6KeG',
+ }
+
+ def homeContent(self, filter):
+ data = self.fetch(f'{self.host}/api.php/app/nav?token=', headers=self.header).json()
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["list"]
+ for item in json_data:
+ has_non_empty_field = False
+ jsontype_extend = item["type_extend"]
+ classes.append({"type_name": item["type_name"], "type_id": str(item["type_id"])})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ rsp = self.fetch(f"{self.host}/api.php/app/index_video?token=", headers=self.header)
+ root = rsp.json()['list']
+ videos = [item for vodd in root for item in vodd['vlist']]
+ return {'list': videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ parms = {"pg": pg, "tid": tid, "class": extend.get("class", ""), "area": extend.get("area", ""),
+ "lang": extend.get("lang", ""), "year": extend.get("year", ""), "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video', params=parms, headers=self.header).json()
+ return data
+
+ def detailContent(self, ids):
+ parms = {"id": ids[0], "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video_detail', params=parms, headers=self.header).json()
+ vod = data['data']
+ vod.pop('pause_advert_list', None)
+ vod.pop('init_advert_list', None)
+ vod.pop('vod_url_with_player', None)
+ return {"list": [vod]}
+
+ def searchContent(self, key, quick, pg='1'):
+ parms = {'pg': pg, 'text': key, 'token': ''}
+ data = self.fetch(f'{self.host}/api.php/app/search', params=parms, headers=self.header).json()
+ return data
+
+ def playerContent(self, flag, id, vipFlags):
+ return {"parse": 0, "url": id, "header": {'User-Agent': 'User-Agent: Lavf/58.12.100'}}
+
+ def localProxy(self, param):
+ pass
diff --git a/PyramidStore/plugin/app/胖虎APP.py b/PyramidStore/plugin/app/胖虎APP.py
new file mode 100644
index 0000000..a76035f
--- /dev/null
+++ b/PyramidStore/plugin/app/胖虎APP.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+sys.path.append('..')
+from base.spider import Spider
+from Cryptodome.Cipher import AES
+from Cryptodome.Util.Padding import pad, unpad
+from base64 import b64encode, b64decode
+import json
+import time
+
+
+class Spider(Spider):
+ def getName(self):
+ return "py_胖虎"
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def aes(self, operation, text):
+ key = "ihIwTbt2YAe9TGea".encode('utf-8')
+ iv = key
+
+ if operation == 'encrypt':
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ ct_bytes = cipher.encrypt(pad(text.encode('utf-8'), AES.block_size))
+ ct = b64encode(ct_bytes).decode('utf-8')
+ return ct
+ elif operation == 'decrypt':
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode('utf-8')
+
+ host = "http://sm.physkan.top:3389"
+ t = str(int(time.time()))
+
+ def homeContent(self, filter):
+ self.header = {
+ 'User-Agent': 'okhttp/3.14.9',
+ 'app-version-code': '402',
+ 'app-ui-mode': 'light',
+ 'app-user-device-id': '25f869d32598d3d3089a929453dff0bb7',
+ 'app-api-verify-time': self.t,
+ 'app-api-verify-sign': self.aes('encrypt', self.t),
+ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
+ }
+ data = self.fetch("{0}/api.php/getappapi.index/initV119".format(self.host), headers=self.header).content.decode(
+ 'utf-8')
+ data1 = json.loads(data)['data']
+ print(data1)
+ data2 = self.aes('decrypt', data1)
+ dy = {
+ "class": "类型",
+ "area": "地区",
+ "lang": "语言",
+ "year": "年份",
+ "letter": "字母",
+ "by": "排序",
+ "sort": "排序"
+ }
+
+ filter = {}
+ classes = []
+ json_data = json.loads(data2)['type_list']
+ self.homedata = json.loads(data2)['banner_list']
+
+ for item in json_data:
+ if item['type_name'] == '全部':
+ continue
+
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item['type_extend'])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+
+ classes.append({
+ "type_name": item['type_name'],
+ "type_id": item['type_id']
+ })
+
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+
+ if has_non_empty_field:
+ filter[str(item['type_id'])] = []
+
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(',')
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in values if value.strip() != ''
+ ]
+
+ filter[str(item['type_id'])].append({
+ "key": dkey,
+ "name": dy[dkey],
+ "value": value_array
+ })
+ result = {}
+ result['class'] = classes
+ result['filter'] = filter
+ return result
+
+ def homeVideoContent(self):
+ result = {
+ 'list': self.homedata
+ }
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = f"area={extend.get('area', '全部')}&year={extend.get('year', '全部')}&type_id={tid}&page={pg}&sort={extend.get('sort', '最新')}&lang={extend.get('lang', '全部')}&class={extend.get('class', '全部')}"
+ result = {}
+ url = '{0}/api.php/getappapi.index/typeFilterVodList'.format(self.host)
+ data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = self.aes('decrypt', data1)
+ result['list'] = json.loads(data2)['recommend_list']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ print(body)
+ url = '{0}/api.php/getappapi.index/vodDetail'.format(self.host)
+ data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = json.loads(self.aes('decrypt', data1))
+ print(data2)
+ vod = data2['vod']
+ print(vod)
+ play = []
+ names = []
+ for itt in data2['vod_play_list']:
+ a = []
+ names.append(itt['player_info']['show'])
+ parse = itt['player_info']['parse']
+ for it in itt['urls']:
+ if re.search(r'mp4|m3u8', it['url']):
+ a.append(f"{it['name']}${it['url']}")
+ elif re.search(r'www.yemu.xyz', it['parse_api_url']):
+ a.append(f"{it['name']}${it['parse_api_url']}")
+ else:
+ a.append(
+ f"{it['name']}${'parse_api=' + parse + '&url=' + self.aes('encrypt', it['url']) + '&token=' + it['token']}")
+ play.append('#'.join(a))
+ vod['vod_play_from'] = '$$$'.join(names)
+ vod['vod_play_url'] = '$$$'.join(play)
+ result = {
+ 'list': [
+ vod
+ ]
+ }
+ return result
+
+ def searchContent(self, key, quick, pg='1'):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ url = '{0}/api.php/getappapi.index/searchList'.format(self.host)
+ data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = self.aes('decrypt', data1)
+ result = {
+ 'list': json.loads(data2)['search_list']
+ }
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ def edu(str):
+ def replacer(match):
+ from urllib.parse import quote_plus
+ return match.group(1) + quote_plus(match.group(2)) + match.group(3)
+
+ return re.sub(r'(url=)(.*?)(&token)', replacer, str)
+
+ url = id
+ parse = 0
+ if 'm3u8' not in url and 'mp4' not in url:
+ try:
+ body = edu(url)
+ print(body)
+ data = self.post('{0}/api.php/getappapi.index/vodParse'.format(self.host), headers=self.header,
+ data=body).content.decode('utf-8')
+ data1 = json.loads(data)['data']
+ data2 = json.loads(self.aes('decrypt', data1))['json']
+ url = json.loads(data2)['url']
+ except:
+ url = id
+ parse = 1
+ if not id.startswith('https://www.yemu.xyz'):
+ url = 'https://www.yemu.xyz/?url={0}'.format(id)
+ result = {}
+ print(url)
+ headers = self.header.copy()
+ del headers['Content-type']
+ result["parse"] = parse
+ result["url"] = url
+ result["header"] = headers
+ return result
+
+ def localProxy(self, param):
+ pass
diff --git a/PyramidStore/plugin/app/若惜追剧APP.py b/PyramidStore/plugin/app/若惜追剧APP.py
new file mode 100644
index 0000000..1b33d5d
--- /dev/null
+++ b/PyramidStore/plugin/app/若惜追剧APP.py
@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor
+import json
+import time
+from base.spider import Spider
+
+class Spider(Spider):
+
+ '''
+ sites照常配置,
+ lives配置:
+ {
+ "name": "xxxx",
+ "type": 3,
+ "api": "路径/若惜追剧APP.py",
+ "ext": ""
+ }
+ '''
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent']=itt["player_info"].get("user_agent")
+ it["parse"]=itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h={"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data=self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url=data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'],True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url:raise ValueError(f"解析失败: {url}")
+ p=0
+ except Exception as e:
+ print('错误信息:',e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def liveContent(self, url):
+ id=self.homeContent(True)['class'][-1]['type_id']
+ vlist=self.categoryContent(id,1,False,{})['list']
+ results = []
+ with ThreadPoolExecutor(max_workers=len(vlist)) as executor:
+ futures = [executor.submit(self.livedetailContent, item['vod_name'], item['vod_id']) for item in vlist]
+ for future in futures:
+ try:
+ detail = future.result()
+ if detail:
+ results.append(detail)
+ except Exception as e:
+ print(f"处理详情数据失败: {str(e)}")
+ return '\n'.join(results)
+
+ def livedetailContent(self, name,id):
+ try:
+ print(f"获取直播源:{name}")
+ body = f"vod_id={id}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ play = [f"{name},#genre#"]
+ for itt in data["vod_play_list"]:
+ for it in itt['urls']:
+ play.append(f"{it['name']}, {it['url']}")
+ except Exception as e:
+ print(f"获取直播源失败:{str(e)}")
+ play=[]
+ return '\n'.join(play)
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ host = self.fetch('https://rxysyyds.oss-cn-chengdu.aliyuncs.com/getapp.txt', headers=headers).text
+ return host.strip()
+
+ def aes(self, text,b=None):
+ key = b"ebad3f1a58b13933"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else :
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer":self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "140", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes(t,True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
diff --git a/PyramidStore/plugin/app/视觉APP.py b/PyramidStore/plugin/app/视觉APP.py
new file mode 100644
index 0000000..8d6df57
--- /dev/null
+++ b/PyramidStore/plugin/app/视觉APP.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append("..")
+import re
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from base64 import b64encode, b64decode
+import json
+from base.spider import Spider
+from urllib.parse import quote
+
+
+class Spider(Spider):
+
+ def getName(self):
+ return "视觉"
+
+ def init(self, extend=""):
+ self.host = self.host()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.fetch(
+ f"{self.host}/api/v3/drama/getCategory?orderBy=type_id",
+ headers=self.headers,
+ ).json()
+ dy = {
+ "class": "类型",
+ "area": "地区",
+ "lang": "语言",
+ "year": "年份",
+ "letter": "字母",
+ "by": "排序",
+ "sort": "排序",
+ }
+ filters = {}
+ classes = []
+ for item in data["data"]:
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["converUrl"])
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in values
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ data = self.fetch(f"{self.host}/api/ex/v3/security/tag/list", headers=self.headers).json()["data"]
+ data1 = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)
+ list = []
+ for item in data1[0]['carousels']:
+ id = item['link'].split("id=")[1]
+ list.append({
+ "vod_id": id,
+ 'vod_name': item.get("title"),
+ 'vod_pic': item.get("cover"),
+ 'vod_remarks': item.get("sort"),
+ })
+ result = {"list": list}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = []
+ if extend.get('area'):
+ params.append(f"vodArea={extend['area']}")
+ if extend.get('classs'):
+ params.append(f"vodClass={extend['class']}")
+ params.append("pagesize=20")
+ params.append(f"typeId1={tid}")
+ params.append(f"page={pg}")
+ if extend.get('year'):
+ params.append(f"vodYear={extend['year']}")
+ body = '&'.join(params)
+ path = self.aes(self.aes(body, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
+ data = self.fetch(f"{self.host}/api/ex/v3/security/drama/list?query={path}", headers=self.headers).json()[
+ "data"]
+ data = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['list']
+ list = []
+ for item in data:
+ list.append({
+ 'vod_id': item.get("id"),
+ 'vod_pic': item["coverImage"].get("path"),
+ 'vod_name': item.get("name"),
+ 'vod_year': item.get("year"),
+ 'vod_remarks': item.get("remark")
+ })
+ result = {}
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ url = f"{self.host}/api/v3/drama/getDetail?id={ids[0]}"
+ data = self.post(url, headers=self.headers).json()["data"]
+ vod = {
+ 'vod_name': data.get("name"),
+ 'vod_area': data.get("area"),
+ 'type_name': data.get("clazz"),
+ 'vod_actor': data.get("actor"),
+ 'vod_director': data.get("director"),
+ 'vod_content': data.get("brief").strip(),
+ }
+ play = []
+ names = []
+ plays = {}
+ for itt in data["videos"]:
+ if itt["sourceCn"] not in names:
+ plays[itt["source"]] = []
+ names.append(itt["sourceCn"])
+ url = f"vodPlayFrom={itt['source']}&playUrl={itt['path']}"
+ if re.search(r"\.(mp4|m3u8|flv)$", itt["path"]):
+ url = itt["path"]
+ plays[itt["source"]].append(f"{itt['titleOld']}${url}")
+ for it in plays:
+ play.append("#".join(plays[it]))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = f"pagesize=20&page={pg}&searchKeys={key}"
+ path = self.aes(self.aes(body, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
+ data = self.fetch(f"{self.host}/api/ex/v3/security/drama/list?query={path}", headers=self.headers).json()[
+ "data"]
+ data = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['list']
+ list = []
+ for item in data:
+ list.append({
+ 'vod_id': item.get("id"),
+ 'vod_pic': item["coverImage"].get("path"),
+ 'vod_name': item.get("name"),
+ 'vod_year': item.get("year"),
+ 'vod_remarks': item.get("remark")
+ })
+ result = {"list": list, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if "vodPlayFrom" in url:
+ try:
+ path = self.aes(self.aes(id, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
+ data = \
+ self.fetch(f"{self.host}/api/ex/v3/security/videoUsableUrl?query={path}", headers=self.headers).json()[
+ "data"]
+ url = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['playUrl']
+ # try:
+ # url1 = self.fetch(url, headers=self.headers, timeout=5, allow_redirects=False).headers['Location']
+ # if "http" in url1 and url1:
+ # url = url1
+ # except:
+ # pass
+ except Exception as e:
+ pass
+ if '.jpg' in url or '.jpeg' in url or '.png' in url:
+ url = self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'User-Agent': 'okhttp/3.12.1'}
+ return result
+
+ def localProxy(self, param):
+ url = b64decode(param["url"]).decode('utf-8')
+ durl = url[:url.rfind('/')]
+ data = self.fetch(url, headers=self.headers).content.decode("utf-8")
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ lines[index] = durl + ('' if string.startswith('/') else '/') + string
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def host(self):
+ try:
+ url = self.fetch('https://www.shijue.pro/token.txt', headers=self.headers).json()['domain']
+ return url
+ except:
+ return "http://118.25.18.217:6632"
+
+ headers = {
+ 'User-Agent': 'okhttp/3.12.1',
+ 'Content-Type': 'application/json;'
+ }
+ key = ['TFLYWVJ5EG5YB1PLZLVVMGVLBGRIDCSW', 'nj6E5K4yYYT5W4ScJ3J3rJ2zrzcJkpTk']
+
+ def aes(self, word, key, mode='decrypt', bool=False):
+ key = key.encode('utf-8')
+ if mode == 'decrypt':
+ word = b64decode(word)
+ cipher = AES.new(key, AES.MODE_ECB)
+ decrypted = cipher.decrypt(word)
+ word = unpad(decrypted, AES.block_size).decode('utf-8')
+ if bool:
+ word = json.loads(word)
+ elif mode == 'encrypt':
+ cipher = AES.new(key, AES.MODE_ECB)
+ padded = pad(word.encode('utf-8'), AES.block_size)
+ encrypted = cipher.encrypt(padded)
+ word = b64encode(encrypted).decode('utf-8')
+ if bool:
+ word = quote(word)
+ return word
+
+
diff --git a/PyramidStore/plugin/app/边缘影视APP.py b/PyramidStore/plugin/app/边缘影视APP.py
new file mode 100644
index 0000000..0ebb1dd
--- /dev/null
+++ b/PyramidStore/plugin/app/边缘影视APP.py
@@ -0,0 +1,340 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import binascii
+import json
+import os
+import re
+import sys
+import time
+import uuid
+from urllib.parse import urlparse
+from concurrent.futures import ThreadPoolExecutor
+sys.path.append('..')
+from base.spider import Spider
+from base64 import b64encode, b64decode
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import AES, PKCS1_v1_5
+from Crypto.Util.Padding import unpad, pad
+from Crypto.Hash import MD5
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'AppID': '534',
+ 'app_id': '534',
+ 'version': '1.0.3',
+ 'package': 'com.hjmore.wallpaper',
+ 'user_id': '3507f394e83d2424',
+ 'user-id': '3507f394e83d2424',
+ 'app_name': 'lanlan',
+ 'app-name': 'lanlan',
+ 'Content-Type': 'application/json; charset=utf-8;',
+ 'User-Agent': 'okhttp/4.9.0'
+ }
+
+ def homeContent(self, filter):
+ hdata=self.getdata('/api.php/provide/index',self.getbody({'tid':'0'}))
+ vlist=hdata['data'].get('tj',[])
+ result = {}
+ classes = []
+ filters = {}
+ for i in hdata['data']['sub_data']:
+ id=str(i['type_id'])
+ classes.append({'type_id': id, 'type_name': i['type_name']})
+ if len(i['data']):
+ vlist.extend(i['data'])
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = vlist
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body={
+ "tid": tid,
+ "type": extend.get('type'),
+ "lang": extend.get('lang'),
+ "area": extend.get('area'),
+ "year": extend.get('year'),
+ "pg": pg
+ }
+ body = {k: v for k, v in body.items() if v is not None and v != ""}
+ data=self.getdata('/api.php/provide/nav',self.getbody(body))
+ result = {}
+ result['list'] = data['data']['data']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+ pass
+
+ def detailContent(self, ids):
+ data=self.getdata('/api.php/provide/vod',self.getbody({'ids':ids[0]}))
+ vod=data['data']
+ plist=[]
+ names=[]
+ for i in vod['vod_play_url']:
+ ulist=[]
+ names.append(i['name'].split(' ')[0])
+ jdata={'parse':''}
+ if i.get('parse') and isinstance(i['parse'], list) and len(i['parse']):
+ jdata['parse']=self.e64(json.dumps(i['parse']))
+ for j in i['data']:
+ jdata['url']=j['url']
+ ulist.append(f'{j["name"]}${self.e64(json.dumps(jdata))}')
+ plist.append('#'.join(ulist))
+ vod['vod_play_from']='$$$'.join(names)
+ vod['vod_play_url']='$$$'.join(plist)
+ vod.pop('cover_list', None)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ body={"wd":key,"tid":"0","pg":pg}
+ data=self.getdata('/api.php/provide/search',self.getbody(body))
+ vlist=[]
+ for i in data['data']:
+ i.pop('vod_play_from', None)
+ vlist.append(i)
+ return {'list':vlist,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ data=json.loads(self.d64(id))
+ parse=data.get('parse')
+ url,p,head = data.get('url'),1,''
+ if parse:
+ parse=json.loads(self.d64(parse))
+ if not re.search(r'\.m3u8|.mp4|\.flv', url) and parse:
+ for p in parse:
+ try:
+ data=self.fetch(f'{p}{url}',self.headers).json()
+ url=data.get('data',{}).get('url') or data.get('url')
+ head=data.get('data',{}).get('header') or data.get('header')
+ p=0
+ break
+ except:
+ p,url=1,data.get('url')
+ head = {'User-Agent': 'okhttp/4.9.0'}
+ return {'parse': p, 'url': url, 'header': head}
+
+ def localProxy(self, param):
+ pass
+
+ def getf(self, map):
+ ft,id =[], map['type_id']
+ try:
+ fdata = self.getdata('/api.php/provide/nav', self.getbody({'tid': id, 'pg': '1'}))
+ dy = ['area', 'year', 'lang', 'type']
+ fd = fdata['data']['type_extend']
+ has_non_empty_field = False
+ for key in dy:
+ if key in fd and fd[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ for dkey in fd:
+ if dkey in dy and fd[dkey].strip() != "":
+ values = fd[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ ft.append({"key": dkey, "name": dkey, "value": value_array})
+ return (id, ft)
+ except:
+ return (id, ft)
+
+ def getskey(self):
+ random_bytes = os.urandom(16)
+ return binascii.hexlify(random_bytes).decode()
+
+ def getohost(self):
+ url='https://bianyuan001.oss-cn-beijing.aliyuncs.com/huidu1.0.0.json'
+ response = self.fetch(url, headers=self.headers).json()
+ return response['servers'][0]
+
+ def gethost(self):
+ body={
+ "gr_rp_size": "1080*2272",
+ "gr_app_list": "%E5%B1%8F%E5%B9%95%E5%BD%95%E5%88%B6%EF%BC%88com.miui.screenrecorder%29%0A%E5%A4%B8%E5%85%8B%EF%BC%88com.quark.browser%29%0A%E8%BE%B9%E7%BC%98%E8%A7%86%E9%A2%91%EF%BC%88com.hjmore.wallpaper%29%0A%E5%93%94%E5%93%A9%E5%93%94%E5%93%A9%EF%BC%88tv.danmaku.bili%29%0A%E7%81%AB%E6%98%9F%E6%90%9C%E9%A2%98%EF%BC%88com.fenbi.android.souti%29%0A%E6%94%AF%E4%BB%98%E5%AE%9D%EF%BC%88com.eg.android.AlipayGphone%29%0AWPS%20Office%EF%BC%88cn.wps.moffice_eng%29",
+ "gr_lal": "0.0%2C0.0",
+ "gr_system_type": "android",
+ "gr_device_imei": "3507f394e83d2424",
+ "gr_app_version": "1.0.3",
+ "gr_device_model": "Xiaomi%20M2012K10C%20%28Android%20%E7%89%88%E6%9C%AC%3A%2011%2C%20SDK%E7%89%88%E6%9C%AC%3A%2030%29",
+ "gr_city": "%E8%B4%B5%E5%B7%9E%2C%E6%9C%AA%E7%9F%A5%2C%E6%9C%AA%E7%9F%A5",
+ "requestId": self.uuid(),
+ "timeStamp": str(int(time.time() * 1000)),
+ "version": "1.0.3",
+ "package": "com.hjmore.wallpaper",
+ "userLoginToken": "",
+ "app_id": "534",
+ "appName": 2131951658,
+ "device_id": "3507f394e83d2424",
+ "device-id": "3507f394e83d2424",
+ "oaid": "",
+ "imei": "",
+ "referer_shop": "边缘影视",
+ "referer-shop": "边缘影视",
+ "access_fine_location": 0,
+ "access-fine-location": 0
+ }
+ ohost = self.getohost()
+ data=self.getdata(f'/api.php/settings/grayscale_list',body,ohost)
+ parsed_url = urlparse(data['data']['grayscale']['server_url'][0])
+ domain = parsed_url.scheme + "://" + parsed_url.netloc
+ return domain
+
+ def drsa(self, encrypted_data):
+ private_key_pem = """-----BEGIN RSA PRIVATE KEY-----
+ MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDA5NWiAwRjH50/
+ IJY1N0zLopa4jpuWE7kWMn1Qunu6SjBgTvNRmRUoPDHn54haLfbfXIa2X+/sIaMB
+ /O3HhrpVsz55E5W2vpZ5fBYWh+M65bQERKTW+l72H7GR9x0yj3QPByzzfsj/QkyP
+ 81prpwR9i8yMe7yG9TFKqUQCPE+/GrhNU1Qf6nFmV+vMnlP9DantkwAt4fPOMZn3
+ j4da65/1YQV+F5bYzaLenNVKbHf8U8fVYLZWIy4yk2Vpe4R2Z+JX/eHWsChE9hOu
+ iFm02eTW5NJLZlWUxYrSE23VXi8oXSEdON3UEOrwSdAUh4SXxLZ9U7KpNVdTwWyR
+ AS4GyzJ/AgMBAAECggEBAKzmcXefLLeNBu4mz30z7Go7es5DRcLoOudiqmFKRs1c
+ 4q/xFLj3drdx/WnZZ6ctvDPKRBYFOJF4NRz7Ekfew/c9i6oLnA8KFuceCs53T37j
+ ltCclwT7t1L2ZbxovIsteuJdlDVOV+w2CVqez1Xfh27heKAT6ZEvBtfdkVBPr0uj
+ oVwa2+XlJmYZw5dHeB7ySVeAQ+69zDuADB8OWxPWsv6Del+Fhf0kTHAw4WgqcYsd
+ JUunCjgLdJUlDgXzH/M/Nj8NYVEuq6QpmhaktJ4fwn/F7u3lQllVCFKj5lr0Xb92
+ y7lvQlGqMKX1oxf+P5c5/vie1kDx1Rj4S++flIcVlUECgYEA4BuxCZ1c8oOF98bs
+ KTAONnnZniQ1BRt7rA+O9+++lDjxJhxkuthwjB9YzrnZtxHJtvIIie9Jv8MVfzHa
+ p2woDtiEh3YYwmIlgNUFvTcGe++tTiEiLDcGc/xNhpvfbLaw9QB7/HQ+LT1QCMxJ
+ ufdBrR98l0khIGjYqxDW3W5pV70CgYEA3Ff/9+GM2XI/EUSTYrpnwp5R5OsXz1DL
+ 3CFFgp1EPCNk/c3YNWnrUtTkfmKAlRqWIHfphvH/jS6jpGrfRxDggPwGMtBc134b
+ brIM5i4KNj/EcE+w5g03HaKBf1ZihHDQ53c6wTn6IFOHJNSPRLqMNqRymfbclNyO
+ lBMHQmB8yOsCgYBCdZPTwRnuRTi2WQRx1nFwkEQL1Lrwb80GInsIZc2DkTtaTPNG
+ QadmtmkUrSK2Wo0SNsZ3eUHKn2TBmpw4KCfc9zKeJVSEWKy8fu+7xBSlLlebotHK
+ gOrl/H1VHOZuC+OAVItwO1yw98zDPynh/0Q3ve2pw6MSRGV0nYLKmdKdlQKBgQCJ
+ Ty1rw1qKhu9WS22tMIxIc3CFPxtvTeI8I1+1rVtAPq5Im2YIoyDKVXCucaO/RvoW
+ 8aLNPTELQe0oIJFTL+k3d9ZFBCNXBncB3GK9biNe+w3nD0IlmkamaQZZ2/M4pTUJ
+ iPtMPlzomCS3ht5g7f9CbegcmgGLooYXMGRtsMMSUQKBgQCoj+3UciH2i+HyUla5
+ 1FxivjH3MqSTE4Q7OdzrELb6DoLYzjgWAbpG8HIuodD4uG5xz1oR5H7vkblf1itB
+ hwOwDEiabyX76e/I3Q0ovwBV+9PMjM4UVU0kHoiu3Z2s90ckwNh58w3QH5fn9E0b
+ fqMnB6uWze+xrXWijaOzVZhIZg==
+ -----END RSA PRIVATE KEY-----"""
+ private_key = RSA.import_key(private_key_pem)
+ cipher = PKCS1_v1_5.new(private_key)
+ decrypted_data = cipher.decrypt(b64decode(encrypted_data), None)
+ return decrypted_data.decode('utf-8')
+
+ def ersa(self, data):
+ public_key = """-----BEGIN PUBLIC KEY-----
+ MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+0QMb3WDXjNBRovRhTLH
+ g3d+CliZAva2tepWNNN0Pj6DgE3ZTnPR34iL/cjo9Jbd3dqAJs/YkKnFurGkDxz5
+ TthIqvmz244wiFcHt+FGWoJsj5ZVvrH3pPwH85ggmI1DjxSJEUhB12Z9X6FGli8D
+ drR9xeLe5y8vFekux8xCQ7pwH1mNQu4Wy32WVM8aLjmRjNzEWOvEMAWCRuwymEdS
+ zlWoH53qk1dqd6DAmOJhWU2hH6Yt2ZY9LTaDGiHrS+g0DuwajAQzhbM8eonGYMph
+ nP4q0UTHWEfaGR3HoILmeM32M+qF/UCGfgfR6tCMiXPoHwnD2zoxbZ2p+QlYuTZL
+ vQIDAQAB
+ -----END PUBLIC KEY-----"""
+ key = RSA.importKey(public_key)
+ cipher = PKCS1_v1_5.new(key)
+ encrypted = cipher.encrypt(data.encode())
+ return b64encode(encrypted).decode()
+
+ def eaes(self, data, key):
+ key = key.encode('utf-8')
+ cipher = AES.new(key, AES.MODE_ECB)
+ padded = pad(data.encode('utf-8'), AES.block_size)
+ encrypted = cipher.encrypt(padded)
+ word = b64encode(encrypted).decode('utf-8')
+ return word
+
+ def daes(self, encrypted_data, key):
+ key = key.encode('utf-8')
+ cipher = AES.new(key, AES.MODE_ECB)
+ encrypted = b64decode(encrypted_data)
+ decrypted = cipher.decrypt(encrypted)
+ unpadded = unpad(decrypted, AES.block_size)
+ return unpadded.decode('utf-8')
+
+ def getbody(self,params=None):
+ body = {
+ "requestId": self.uuid(),
+ "timeStamp": str(int(time.time()*1000)),
+ "version": "1.0.3",
+ "package": "com.hjmore.wallpaper",
+ "userLoginToken": "",
+ "app_id": "534",
+ "appName": 2131951658,
+ "device_id": "3507f394e83d2424",
+ "device-id": "3507f394e83d2424",
+ "oaid": "",
+ "imei": "",
+ "referer_shop": "边缘影视",
+ "referer-shop": "边缘影视",
+ "access_fine_location": 0,
+ "access-fine-location": 0
+ }
+ if params:
+ body.update(params)
+ return body
+
+ def getdata(self, path, body,host=None):
+ jdata=json.dumps(body)
+ msign = self.md5(jdata)
+ skey = self.getskey()
+ jsign={'key': skey,'sign': msign}
+ Sign=self.ersa(json.dumps(jsign))
+ header=self.headers.copy()
+ header['Sign']=Sign
+ dbody=self.eaes(jdata, skey)
+ response = self.post(f'{host or self.host}{path}', headers=header, data=dbody)
+ rdata=response.text
+ if response.headers.get('Sign'):
+ dkey=self.drsa(response.headers['Sign'])
+ rdata=self.daes(rdata, dkey)
+ return json.loads(rdata)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self,text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
+
+ def uuid(self):
+ return str(uuid.uuid4())
+
+
+
+
diff --git a/PyramidStore/plugin/app/零度影视APP.py b/PyramidStore/plugin/app/零度影视APP.py
new file mode 100644
index 0000000..02c71f6
--- /dev/null
+++ b/PyramidStore/plugin/app/零度影视APP.py
@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import sys
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ did=self.getdid()
+ self.headers.update({'deviceId': did})
+ token=self.gettk()
+ self.headers.update({'token': token})
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://zero.mitotv.com'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.12.0',
+ 'client': 'app',
+ 'deviceType': 'Android'
+ }
+
+ def homeContent(self, filter):
+ data=self.post(f"{self.host}/api/v1/app/screen/screenType", headers=self.headers).json()
+ result = {}
+ cate = {
+ "类型": "classify",
+ "地区": "region",
+ "年份": "year"
+ }
+ sort={
+ 'key':'sreecnTypeEnum',
+ 'name': '排序',
+ 'value':[{'n':'人气','v':'POPULARITY'},{'n':'评分','v':'COLLECT'},{'n':'热搜','v':'HOT'}]
+ }
+ classes = []
+ filters = {}
+ for k in data['data']:
+ classes.append({
+ 'type_name': k['name'],
+ 'type_id': k['id']
+ })
+ filters[k['id']] = [
+ {
+ 'name': v['name'],
+ 'key': cate[v['name']],
+ 'value': [
+ {'n': i['name'], 'v': i['name']}
+ for i in v['children']
+ ]
+ }
+ for v in k['children']
+ ]
+ filters[k['id']].append(sort)
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ jdata={"condition":64,"pageNum":1,"pageSize":40}
+ data=self.post(f"{self.host}/api/v1/app/recommend/recommendSubList", headers=self.headers, json=jdata).json()
+ return {'list':self.getlist(data['data']['records'])}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ jdata = {
+ 'condition': {
+ 'sreecnTypeEnum': 'NEWEST',
+ 'typeId': tid,
+ },
+ 'pageNum': int(pg),
+ 'pageSize': 40,
+ }
+ jdata['condition'].update(extend)
+ data = self.post(f"{self.host}/api/v1/app/screen/screenMovie", headers=self.headers, json=jdata).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['records'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@@')
+ jdata = {"id": int(ids[0]), "typeId": ids[-1]}
+ v = self.post(f"{self.host}/api/v1/app/play/movieDesc", headers=self.headers, json=jdata).json()
+ v = v['data']
+ vod = {
+ 'type_name': v.get('classify'),
+ 'vod_year': v.get('year'),
+ 'vod_area': v.get('area'),
+ 'vod_actor': v.get('star'),
+ 'vod_director': v.get('director'),
+ 'vod_content': v.get('introduce'),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ c = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
+ l = c['data']['moviePlayerList']
+ n = {str(i['id']): i['moviePlayerName'] for i in l}
+ m = jdata.copy()
+ m.update({'playerId': str(l[0]['id'])})
+ pd = self.getv(m, c['data']['episodeList'])
+ if len(l)-1:
+ with ThreadPoolExecutor(max_workers=len(l)-1) as executor:
+ future_to_player = {executor.submit(self.getd, jdata, player): player for player in l[1:]}
+ for future in future_to_player:
+ try:
+ o,p = future.result()
+ pd.update(self.getv(o,p))
+ except Exception as e:
+ print(f"请求失败: {e}")
+ w, e = [],[]
+ for i, x in pd.items():
+ if x:
+ w.append(n[i])
+ e.append(x)
+ vod['vod_play_from'] = '$$$'.join(w)
+ vod['vod_play_url'] = '$$$'.join(e)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ jdata={
+ "condition": {
+ "value": key
+ },
+ "pageNum": int(pg),
+ "pageSize": 40
+ }
+ data=self.post(f"{self.host}/api/v1/app/search/searchMovie", headers=self.headers, json=jdata).json()
+ return {'list':self.getlist(data['data']['records']),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ jdata=json.loads(self.d64(id))
+ data = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
+ try:
+ params={'playerUrl':data['data']['url'],'playerId':jdata['playerId']}
+ pd=self.fetch(f"{self.host}/api/v1/app/play/analysisMovieUrl", headers=self.headers, params=params).json()
+ url,p=pd['data'],0
+ except Exception as e:
+ print(f"请求失败: {e}")
+ url,p=data['data']['url'],0
+ return {'parse': p, 'url': url, 'header': {'User-Agent': 'okhttp/4.12.0'}}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def gettk(self):
+ data=self.fetch(f"{self.host}/api/v1/app/user/visitorInfo", headers=self.headers).json()
+ return data['data']['token']
+
+ def getdid(self):
+ did=self.getCache('ldid')
+ if not did:
+ hex_chars = '0123456789abcdef'
+ did =''.join(random.choice(hex_chars) for _ in range(16))
+ self.setCache('ldid',did)
+ return did
+
+ def getd(self,jdata,player):
+ x = jdata.copy()
+ x.update({'playerId': str(player['id'])})
+ response = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=x).json()
+ return x, response['data']['episodeList']
+
+ def getv(self,d,c):
+ f={d['playerId']:''}
+ g=[]
+ for i in c:
+ j=d.copy()
+ j.update({'episodeId':str(i['id'])})
+ g.append(f"{i['episode']}${self.e64(json.dumps(j))}")
+ f[d['playerId']]='#'.join(g)
+ return f
+
+ def getlist(self,data):
+ videos = []
+ for i in data:
+ videos.append({
+ 'vod_id': f"{i['id']}@@{i['typeId']}",
+ 'vod_name': i.get('name'),
+ 'vod_pic': i.get('cover'),
+ 'vod_year': i.get('year'),
+ 'vod_remarks': i.get('totalEpisode')
+ })
+ return videos
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
diff --git a/PyramidStore/plugin/app/魔方影视APP.py b/PyramidStore/plugin/app/魔方影视APP.py
new file mode 100644
index 0000000..f8203bf
--- /dev/null
+++ b/PyramidStore/plugin/app/魔方影视APP.py
@@ -0,0 +1,209 @@
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+from urllib.parse import quote, urlparse
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ data = self.getdata("/api.php/getappapi.index/initV119")
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["type_list"]
+ homedata = data["banner_list"][8:]
+ for item in json_data:
+ if item["type_name"] == "全部":
+ continue
+ has_non_empty_field = False
+ jsontype_extend = json.loads(item["type_extend"])
+ homedata.extend(item["recommend_list"])
+ jsontype_extend["sort"] = "最新,最热,最赞"
+ classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ result["list"] = homedata[1:]
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
+ "sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
+ "class": extend.get('class', '全部')}
+ result = {}
+ data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
+ result["list"] = data["recommend_list"]
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = f"vod_id={ids[0]}"
+ data = self.getdata("/api.php/getappapi.index/vodDetail", body)
+ vod = data["vod"]
+ play = []
+ names = []
+ for itt in data["vod_play_list"]:
+ a = []
+ names.append(itt["player_info"]["show"])
+ for it in itt['urls']:
+ it['user_agent'] = itt["player_info"].get("user_agent")
+ it["parse"] = itt["player_info"].get("parse")
+ a.append(f"{it['name']}${self.e64(json.dumps(it))}")
+ play.append("#".join(a))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ body = f"keywords={key}&type_id=0&page={pg}"
+ data = self.getdata("/api.php/getappapi.index/searchList", body)
+ result = {"list": data["search_list"], "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = json.loads(self.d64(id))
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
+ try:
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
+ b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
+ url = json.loads(b)['url']
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/3.14.9'
+ }
+ response = self.fetch('https://snysw.xyz/mfys.txt',headers=headers).text
+ return response.strip()
+
+ def aes(self, text, b=None):
+ key = b"1234567887654321"
+ cipher = AES.new(key, AES.MODE_CBC, key)
+ if b:
+ ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
+ ct = b64encode(ct_bytes).decode("utf-8")
+ return ct
+ else:
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return pt.decode("utf-8")
+
+ def header(self):
+ t = str(int(time.time()))
+ header = {"Referer": self.host,
+ "User-Agent": "okhttp/3.14.9", "app-version-code": "140", "app-ui-mode": "light",
+ "app-api-verify-time": t, "app-user-device-id": self.md5(t),
+ "app-api-verify-sign": self.aes(t, True),
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
+ return header
+
+ def getdata(self, path, data=None):
+ vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
+ data1 = self.aes(vdata)
+ return json.loads(data1)
+
+ def Mproxy(self, url):
+ return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
+
+ def Mlocal(self, param, header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
diff --git a/PyramidStore/plugin/html/4KAV.py b/PyramidStore/plugin/html/4KAV.py
new file mode 100644
index 0000000..7db14f7
--- /dev/null
+++ b/PyramidStore/plugin/html/4KAV.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'document',
+ 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.8 Mobile/15E148 Safari/604.1'
+ }
+
+ host = "https://4k-av.com"
+
+ def homeContent(self, filter):
+ data=self.getpq()
+ result = {}
+ classes = []
+ for k in list(data('#category ul li').items())[:-1]:
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': k('a').attr('href')
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('#MainContent_scrollul ul li'),'.poster span')
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.getpq(f"{tid}page-{pg}.html")
+ result = {}
+ result['list'] = self.getlist(data('#MainContent_newestlist .virow .NTMitem'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(ids[0])
+ v = data('#videoinfo')
+ vod = {
+ 'vod_name': data('#tophead h1').text().split(' ')[0],
+ 'type_name': v('#MainContent_tags.tags a').text(),
+ 'vod_year': v('#MainContent_videodetail.videodetail a').text(),
+ 'vod_remarks': v('#MainContent_titleh12 h2').text(),
+ 'vod_content': v('p.cnline').text(),
+ 'vod_play_from': '4KAV',
+ 'vod_play_url': ''
+ }
+ vlist = data('#rtlist li')
+ jn = f"{vod['vod_name']}_" if 'EP0' in vlist.eq(0)('span').text() else ''
+ if vlist:
+ c = [f"{jn}{i('span').text()}${i('a').attr('href')}" for i in list(vlist.items())[1:]]
+ c.insert(0, f"{jn}{vlist.eq(0)('span').text()}${ids[0]}")
+ vod['vod_play_url'] = '#'.join(c)
+ else:
+ vod['vod_play_url'] = f"{vod['vod_name']}${ids[0]}"
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getpq(f"/s?k={key}")
+ return {'list':self.getlist(data('#MainContent_newestlist .virow.search .NTMitem.Main'))}
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ data=self.getpq(id)
+ p,url=0,data('#MainContent_videowindow source').attr('src')
+ if not url:raise Exception("未找到播放地址")
+ except Exception as e:
+ p,url=1,f"{self.host}{id}"
+ headers = {
+ 'origin': self.host,
+ 'referer': f'{self.host}/',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.8 Mobile/15E148 Safari/604.1',
+ }
+ return {'parse': p, 'url': url, 'header': headers}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def getlist(self,data,y='.resyear label[title="分辨率"]'):
+ videos = []
+ for i in data.items():
+ ns = i('.title h2').text().split(' ')
+ videos.append({
+ 'vod_id': i('.title a').attr('href'),
+ 'vod_name': ns[0],
+ 'vod_pic': i('.poster img').attr('src'),
+ 'vod_remarks': ns[-1] if len(ns) > 1 else '',
+ 'vod_year': i(y).text()
+ })
+ return videos
+
+ def getpq(self, path=''):
+ url=f"{self.host}{path}"
+ data=self.fetch(url,headers=self.headers).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
\ No newline at end of file
diff --git a/PyramidStore/plugin/html/LIVES.py b/PyramidStore/plugin/html/LIVES.py
new file mode 100644
index 0000000..5fbeeaa
--- /dev/null
+++ b/PyramidStore/plugin/html/LIVES.py
@@ -0,0 +1,768 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+import time
+from base64 import b64decode, b64encode
+from urllib.parse import parse_qs
+import requests
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ tid = 'douyin'
+ headers = self.gethr(0, tid)
+ response = requests.head(self.hosts[tid], headers=headers)
+ ttwid = response.cookies.get('ttwid')
+ headers.update({
+ 'authority': self.hosts[tid].split('//')[-1],
+ 'cookie': f'ttwid={ttwid}' if ttwid else ''
+ })
+ self.dyheaders = headers
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = [
+ {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
+ },
+ {
+ "User-Agent": "Dart/3.4 (dart:io)"
+ }
+ ]
+
+ excepturl = 'https://www.baidu.com'
+
+ hosts = {
+ "huya": ["https://www.huya.com","https://mp.huya.com"],
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://www.douyu.com",
+ "wangyi": "https://cc.163.com",
+ "bili": ["https://api.live.bilibili.com", "https://api.bilibili.com"]
+ }
+
+ referers = {
+ "huya": "https://live.cdn.huya.com",
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://m.douyu.com",
+ "bili": "https://live.bilibili.com"
+ }
+
+ playheaders = {
+ "wangyi": {
+ "User-Agent": "ExoPlayer",
+ "Connection": "Keep-Alive",
+ "Icy-MetaData": "1"
+ },
+ "bili": {
+ 'Accept': '*/*',
+ 'Icy-MetaData': '1',
+ 'referer': referers['bili'],
+ 'user-agent': headers[0]['User-Agent']
+ },
+ 'douyin': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ },
+ 'huya': {
+ 'User-Agent': 'ExoPlayer',
+ 'Connection': 'Keep-Alive',
+ 'Icy-MetaData': '1'
+ },
+ 'douyu': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ }
+ }
+
+ def process_bili(self):
+ try:
+ self.blfdata = self.fetch(
+ f'{self.hosts["bili"][0]}/room/v1/Area/getList?need_entrance=1&parent_id=0',
+ headers=self.gethr(0, 'bili')
+ ).json()
+ return ('bili', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['name'], 'v': str(i['id'])}
+ for i in self.blfdata['data']]}])
+ except Exception as e:
+ print(f"bili处理错误: {e}")
+ return 'bili', None
+
+ def process_douyin(self):
+ try:
+ data = self.getpq(self.hosts['douyin'], headers=self.dyheaders)('script')
+ for i in data.items():
+ if 'categoryData' in i.text():
+ content = i.text()
+ start = content.find('{')
+ end = content.rfind('}') + 1
+ if start != -1 and end != -1:
+ json_str = content[start:end]
+ json_str = json_str.replace('\\"', '"')
+ try:
+ self.dyifdata = json.loads(json_str)
+ return ('douyin', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['partition']['title'],
+ 'v': f"{i['partition']['id_str']}@@{i['partition']['title']}"}
+ for i in self.dyifdata['categoryData']]}])
+ except json.JSONDecodeError as e:
+ print(f"douyin解析错误: {e}")
+ return 'douyin', None
+ except Exception as e:
+ print(f"douyin请求或处理错误: {e}")
+ return 'douyin', None
+
+ def process_douyu(self):
+ try:
+ self.dyufdata = self.fetch(
+ f'{self.referers["douyu"]}/api/cate/list',
+ headers=self.headers[1]
+ ).json()
+ return ('douyu', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['cate1Name'], 'v': str(i['cate1Id'])}
+ for i in self.dyufdata['data']['cate1Info']]}])
+ except Exception as e:
+ print(f"douyu错误: {e}")
+ return 'douyu', None
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "虎牙": "huya",
+ "哔哩": "bili",
+ "抖音": "douyin",
+ "斗鱼": "douyu",
+ "网易": "wangyi"
+ }
+ classes = []
+ filters = {
+ 'huya': [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': '网游', 'v': '1'}, {'n': '单机', 'v': '2'},
+ {'n': '娱乐', 'v': '8'}, {'n': '手游', 'v': '3'}]}]
+ }
+
+ with ThreadPoolExecutor(max_workers=3) as executor:
+ futures = {
+ executor.submit(self.process_bili): 'bili',
+ executor.submit(self.process_douyin): 'douyin',
+ executor.submit(self.process_douyu): 'douyu'
+ }
+
+ for future in futures:
+ platform, filter_data = future.result()
+ if filter_data:
+ filters[platform] = filter_data
+
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ vdata = []
+ result = {}
+ pagecount = 9999
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ if tid == 'wangyi':
+ vdata, pagecount = self.wyccContent(tid, pg, filter, extend, vdata)
+ elif 'bili' in tid:
+ vdata, pagecount = self.biliContent(tid, pg, filter, extend, vdata)
+ elif 'huya' in tid:
+ vdata, pagecount = self.huyaContent(tid, pg, filter, extend, vdata)
+ elif 'douyin' in tid:
+ vdata, pagecount = self.douyinContent(tid, pg, filter, extend, vdata)
+ elif 'douyu' in tid:
+ vdata, pagecount = self.douyuContent(tid, pg, filter, extend, vdata)
+ result['list'] = vdata
+ result['pagecount'] = pagecount
+ return result
+
+ def wyccContent(self, tid, pg, filter, extend, vdata):
+ params = {
+ 'format': 'json',
+ 'start': (int(pg) - 1) * 20,
+ 'size': '20',
+ }
+ response = self.fetch(f'{self.hosts[tid]}/api/category/live/', params=params, headers=self.headers[0]).json()
+ for i in response['lives']:
+ if i.get('cuteid'):
+ bvdata = self.buildvod(
+ vod_id=f"{tid}@@{i['cuteid']}",
+ vod_name=i.get('title'),
+ vod_pic=i.get('cover'),
+ vod_remarks=i.get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(bvdata)
+ return vdata, 9999
+
+ def biliContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.blfdata['data']:
+ if str(i['id']) == extend['cate']:
+ for j in i['list']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['id']}@@{j['id']}",
+ vod_name=j.get('name'),
+ vod_pic=j.get('pic'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/xlive/web-interface/v1/second/getListByArea?platform=web&sort=online&page_size=30&page={pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/xlive/web-interface/v1/second/getList?platform=web&parent_area_id={ids[1]}&area_id={ids[-1]}&sort_type=&page={pg}'
+ data = self.fetch(f'{self.hosts[tid][0]}{path}', headers=self.gethr(0, tid)).json()
+ for i in data['data']['list']:
+ if i.get('roomid'):
+ data = self.buildvod(
+ f"{tid}@@{i['roomid']}",
+ i.get('title'),
+ i.get('cover'),
+ i.get('watched_show', {}).get('text_large'),
+ 0,
+ i.get('uname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(data)
+ return vdata, 9999
+
+ def huyaContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ id = extend.get('cate')
+ data = self.fetch(f'{self.referers[tid]}/liveconfig/game/bussLive?bussType={id}',
+ headers=self.headers[1]).json()
+ for i in data['data']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{int(i['gid'])}",
+ vod_name=i.get('gameFullName'),
+ vod_pic=f'https://huyaimg.msstatic.com/cdnimage/game/{int(i["gid"])}-MS.jpg',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ gid = ''
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ gid = f'&gameId={ids[1]}'
+ data = self.fetch(f'{self.hosts[tid][0]}/cache.php?m=LiveList&do=getLiveListByPage&tagAll=0{gid}&page={pg}',
+ headers=self.headers[1]).json()
+ for i in data['data']['datas']:
+ if i.get('profileRoom'):
+ v = self.buildvod(
+ f"{tid}@@{i['profileRoom']}",
+ i.get('introduction'),
+ i.get('screenshot'),
+ str(int(i.get('totalCount', '1')) / 10000) + '万',
+ 0,
+ i.get('nick'),
+ style={"type": "rect", "ratio": 1.33}
+
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyinContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ ids = extend.get('cate').split('@@')
+ for i in self.dyifdata['categoryData']:
+ c = i['partition']
+ if c['id_str'] == ids[0] and c['title'] == ids[1]:
+ vlist = i['sub_partition'].copy()
+ vlist.insert(0, {'partition': c})
+ for j in vlist:
+ j = j['partition']
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{j['id_str']}@@{j['type']}",
+ vod_name=j.get('title'),
+ vod_pic='https://p3-pc-weboff.byteimg.com/tos-cn-i-9r5gewecjs/pwa_v3/512x512-1.png',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition=720&partition_type=1'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition={ids[1]}&partition_type={ids[-1]}&req_from=2'
+ data = self.fetch(f'{self.hosts[tid]}{path}', headers=self.dyheaders).json()
+ for i in data['data']['data']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['web_rid']}",
+ vod_name=i['room'].get('title'),
+ vod_pic=i['room']['cover'].get('url_list')[0],
+ vod_year=i.get('user_count_str'),
+ vod_remarks=i['room']['owner'].get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyuContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.dyufdata['data']['cate2Info']:
+ if str(i['cate1Id']) == extend['cate']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['cate2Id']}",
+ vod_name=i.get('cate2Name'),
+ vod_pic=i.get('icon'),
+ vod_remarks=i.get('count'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/japi/weblist/apinc/allpage/6/{pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/gapi/rkc/directory/mixList/2_{ids[1]}/{pg}'
+ url = f'{self.hosts[tid]}{path}'
+ data = self.fetch(url, headers=self.headers[1]).json()
+ for i in data['data']['rl']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['rid']}",
+ vod_name=i.get('rn'),
+ vod_pic=i.get('rs16'),
+ vod_year=str(int(i.get('ol', 1)) / 10000) + '万',
+ vod_remarks=i.get('nn'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@@')
+ if ids[0] == 'wangyi':
+ vod = self.wyccDetail(ids)
+ elif ids[0] == 'bili':
+ vod = self.biliDetail(ids)
+ elif ids[0] == 'huya':
+ vod = self.huyaDetail(ids)
+ elif ids[0] == 'douyin':
+ vod = self.douyinDetail(ids)
+ elif ids[0] == 'douyu':
+ vod = self.douyuDetail(ids)
+ return {'list': [vod]}
+
+ def wyccDetail(self, ids):
+ try:
+ vdata = self.getpq(f'{self.hosts[ids[0]]}/{ids[1]}', self.headers[0])('script').eq(-1).text()
+
+ def get_quality_name(vbr):
+ if vbr <= 600:
+ return "标清"
+ elif vbr <= 1000:
+ return "高清"
+ elif vbr <= 2000:
+ return "超清"
+ else:
+ return "蓝光"
+
+ data = json.loads(vdata)['props']['pageProps']['roomInfoInitData']
+ name = data['live'].get('title', ids[0])
+ vod = self.buildvod(vod_name=data.get('keywords_suffix'), vod_remarks=data['live'].get('title'),
+ vod_content=data.get('description_suffix'))
+ resolution_data = data['live']['quickplay']['resolution']
+ all_streams = {}
+ sorted_qualities = sorted(resolution_data.items(),
+ key=lambda x: x[1]['vbr'],
+ reverse=True)
+ for quality, data in sorted_qualities:
+ vbr = data['vbr']
+ quality_name = get_quality_name(vbr)
+ for cdn_name, url in data['cdn'].items():
+ if cdn_name not in all_streams and type(url) == str and url.startswith('http'):
+ all_streams[cdn_name] = []
+ if isinstance(url, str) and url.startswith('http'):
+ all_streams[cdn_name].extend([quality_name, url])
+ plists = []
+ names = []
+ for i, (cdn_name, stream_list) in enumerate(all_streams.items(), 1):
+ names.append(f'线路{i}')
+ pstr = f"{name}${ids[0]}@@{self.e64(json.dumps(stream_list))}"
+ plists.append(pstr)
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plists)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def biliDetail(self, ids):
+ try:
+ vdata = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v1/index/getInfoByRoom?room_id={ids[1]}&wts={int(time.time())}',
+ headers=self.gethr(0, ids[0])).json()
+ v = vdata['data']['room_info']
+ vod = self.buildvod(
+ vod_name=v.get('title'),
+ type_name=v.get('parent_area_name') + '/' + v.get('area_name'),
+ vod_remarks=v.get('tags'),
+ vod_play_from=v.get('title'),
+ )
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0%2C1&format=0%2C1%2C2&codec=0%2C1&platform=web',
+ headers=self.gethr(0, ids[0])).json()
+ vdnams = data['data']['playurl_info']['playurl']['g_qn_desc']
+ all_accept_qns = []
+ streams = data['data']['playurl_info']['playurl']['stream']
+ for stream in streams:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ if 'accept_qn' in codec:
+ all_accept_qns.append(codec['accept_qn'])
+ max_accept_qn = max(all_accept_qns, key=len) if all_accept_qns else []
+ quality_map = {
+ item['qn']: item['desc']
+ for item in vdnams
+ }
+ quality_names = [f"{quality_map.get(qn)}${ids[0]}@@{ids[1]}@@{qn}" for qn in max_accept_qn]
+ vod['vod_play_url'] = "#".join(quality_names)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def huyaDetail(self, ids):
+ try:
+ vdata = self.fetch(f'{self.hosts[ids[0]][1]}/cache.php?m=Live&do=profileRoom&roomid={ids[1]}',
+ headers=self.headers[0]).json()
+ v = vdata['data']['liveData']
+ vod = self.buildvod(
+ vod_name=v.get('introduction'),
+ type_name=v.get('gameFullName'),
+ vod_director=v.get('nick'),
+ vod_remarks=v.get('contentIntro'),
+ )
+ data = dict(reversed(list(vdata['data']['stream'].items())))
+ names = []
+ plist = []
+
+ for stream_type, stream_data in data.items():
+ if isinstance(stream_data, dict) and 'multiLine' in stream_data and 'rateArray' in stream_data:
+ names.append(f"线路{len(names) + 1}")
+ qualities = sorted(
+ stream_data['rateArray'],
+ key=lambda x: (x['iBitRate'], x['sDisplayName']),
+ reverse=True
+ )
+ cdn_urls = []
+ for cdn in stream_data['multiLine']:
+ quality_urls = []
+ for quality in qualities:
+ quality_name = quality['sDisplayName']
+ bit_rate = quality['iBitRate']
+ base_url = cdn['url']
+ if bit_rate > 0:
+ if '.m3u8' in base_url:
+ new_url = base_url.replace(
+ 'ratio=2000',
+ f'ratio={bit_rate}'
+ )
+ else:
+ new_url = base_url.replace(
+ 'imgplus.flv',
+ f'imgplus_{bit_rate}.flv'
+ )
+ else:
+ new_url = base_url
+ quality_urls.extend([quality_name, new_url])
+ encoded_urls = self.e64(json.dumps(quality_urls))
+ cdn_urls.append(f"{cdn['cdnType']}${ids[0]}@@{encoded_urls}")
+
+ if cdn_urls:
+ plist.append('#'.join(cdn_urls))
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyinDetail(self, ids):
+ url = f'{self.hosts[ids[0]]}/webcast/room/web/enter/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&enter_from=web_live&web_rid={ids[1]}&room_id_str=&enter_source=&Room-Enter-User-Login-Ab=0&is_need_double_stream=false&cookie_enabled=true&screen_width=1980&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=125.0.0.0'
+ data = self.fetch(url, headers=self.dyheaders).json()
+ try:
+ vdata = data['data']['data'][0]
+ vod = self.buildvod(
+ vod_name=vdata['title'],
+ vod_remarks=vdata['user_count_str'],
+ )
+ resolution_data = vdata['stream_url']['live_core_sdk_data']['pull_data']['options']['qualities']
+ stream_json = vdata['stream_url']['live_core_sdk_data']['pull_data']['stream_data']
+ stream_json = json.loads(stream_json)
+ available_types = []
+ if any(sdk_key in stream_json['data'] and 'main' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('main')
+ if any(sdk_key in stream_json['data'] and 'backup' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('backup')
+ plist = []
+ for line_type in available_types:
+ format_arrays = {'flv': [], 'hls': [], 'lls': []}
+ qualities = sorted(resolution_data, key=lambda x: x['level'], reverse=True)
+ for quality in qualities:
+ sdk_key = quality['sdk_key']
+ if sdk_key in stream_json['data'] and line_type in stream_json['data'][sdk_key]:
+ stream_info = stream_json['data'][sdk_key][line_type]
+ if stream_info.get('flv'):
+ format_arrays['flv'].extend([quality['name'], stream_info['flv']])
+ if stream_info.get('hls'):
+ format_arrays['hls'].extend([quality['name'], stream_info['hls']])
+ if stream_info.get('lls'):
+ format_arrays['lls'].extend([quality['name'], stream_info['lls']])
+ format_urls = []
+ for format_name, url_array in format_arrays.items():
+ if url_array:
+ encoded_urls = self.e64(json.dumps(url_array))
+ format_urls.append(f"{format_name}${ids[0]}@@{encoded_urls}")
+
+ if format_urls:
+ plist.append('#'.join(format_urls))
+
+ names = ['线路1', '线路2'][:len(plist)]
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyuDetail(self, ids):
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{ids[1]}')
+ try:
+ data = self.fetch(f'{self.hosts[ids[0]]}/betard/{ids[1]}', headers=headers).json()
+ vname = data['room']['room_name']
+ vod = self.buildvod(
+ vod_name=vname,
+ vod_remarks=data['room'].get('second_lvl_name'),
+ vod_director=data['room'].get('nickname'),
+ )
+ vdata = self.fetch(f'{self.hosts[ids[0]]}/swf_api/homeH5Enc?rids={ids[1]}', headers=headers).json()
+ json_body = vdata['data']
+ json_body = {"html": self.douyu_text(json_body[f'room{ids[1]}']), "rid": ids[1]}
+ sign = self.post('http://alive.nsapps.cn/api/AllLive/DouyuSign', json=json_body, headers=self.headers[1]).json()['data']
+ body = f'{sign}&cdn=&rate=-1&ver=Douyu_223061205&iar=1&ive=1&hevc=0&fa=0'
+ body=self.params_to_json(body)
+ nubdata = self.post(f'{self.hosts[ids[0]]}/lapi/live/getH5Play/{ids[1]}', data=body, headers=headers).json()
+ plist = []
+ names = []
+ for i,x in enumerate(nubdata['data']['cdnsWithName']):
+ names.append(f'线路{i+1}')
+ d = {'sign': sign, 'cdn': x['cdn'], 'id': ids[1]}
+ plist.append(
+ f'{vname}${ids[0]}@@{self.e64(json.dumps(d))}@@{self.e64(json.dumps(nubdata["data"]["multirates"]))}')
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyu_text(self, text):
+ function_positions = [m.start() for m in re.finditer('function', text)]
+ total_functions = len(function_positions)
+ if total_functions % 2 == 0:
+ target_index = total_functions // 2 + 1
+ else:
+ target_index = (total_functions - 1) // 2 + 1
+ if total_functions >= target_index:
+ cut_position = function_positions[target_index - 1]
+ ctext = text[4:cut_position]
+ return re.sub(r'eval\(strc\)\([\w\d,]+\)', 'strc', ctext)
+ return text
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ ids = id.split('@@')
+ p = 1
+ if ids[0] in ['wangyi', 'douyin','huya']:
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'bili':
+ p, url = self.biliplay(ids)
+ elif ids[0] == 'huya':
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'douyu':
+ p, url = self.douyuplay(ids)
+ return {'parse': p, 'url': url, 'header': self.playheaders[ids[0]]}
+ except Exception as e:
+ return {'parse': 1, 'url': self.excepturl, 'header': self.headers[0]}
+
+ def biliplay(self, ids):
+ try:
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0,1&format=0,2&codec=0&platform=web&qn={ids[2]}',
+ headers=self.gethr(0, ids[0])).json()
+ urls = []
+ line_index = 1
+ for stream in data['data']['playurl_info']['playurl']['stream']:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ for url_info in codec['url_info']:
+ full_url = f"{url_info['host']}/{codec['base_url'].lstrip('/')}{url_info['extra']}"
+ urls.extend([f"线路{line_index}", full_url])
+ line_index += 1
+ return 0, urls
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyuplay(self, ids):
+ try:
+ sdata = json.loads(self.d64(ids[1]))
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{sdata["id"]}')
+ ldata = json.loads(self.d64(ids[2]))
+ result_obj = {}
+ with ThreadPoolExecutor(max_workers=len(ldata)) as executor:
+ futures = [
+ executor.submit(
+ self.douyufp,
+ sdata,
+ quality,
+ headers,
+ self.hosts[ids[0]],
+ result_obj
+ ) for quality in ldata
+ ]
+ for future in futures:
+ future.result()
+
+ result = []
+ for bit in sorted(result_obj.keys(), reverse=True):
+ result.extend(result_obj[bit])
+
+ if result:
+ return 0, result
+ return 1, self.excepturl
+
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyufp(self, sdata, quality, headers, host, result_obj):
+ try:
+ body = f'{sdata["sign"]}&cdn={sdata["cdn"]}&rate={quality["rate"]}'
+ body=self.params_to_json(body)
+ data = self.post(f'{host}/lapi/live/getH5Play/{sdata["id"]}',
+ data=body, headers=headers).json()
+ if data.get('data'):
+ play_url = data['data']['rtmp_url'] + '/' + data['data']['rtmp_live']
+ bit = quality.get('bit', 0)
+ if bit not in result_obj:
+ result_obj[bit] = []
+ result_obj[bit].extend([quality['name'], play_url])
+ except Exception as e:
+ print(f"Error fetching {quality['name']}: {str(e)}")
+
+ def localProxy(self, param):
+ pass
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+ def params_to_json(self, query_string):
+ parsed_data = parse_qs(query_string)
+ result = {key: value[0] for key, value in parsed_data.items()}
+ return result
+
+ def buildvod(self, vod_id='', vod_name='', vod_pic='', vod_year='', vod_tag='', vod_remarks='', style='',
+ type_name='', vod_area='', vod_actor='', vod_director='',
+ vod_content='', vod_play_from='', vod_play_url=''):
+ vod = {
+ 'vod_id': vod_id,
+ 'vod_name': vod_name,
+ 'vod_pic': vod_pic,
+ 'vod_year': vod_year,
+ 'vod_tag': 'folder' if vod_tag else '',
+ 'vod_remarks': vod_remarks,
+ 'style': style,
+ 'type_name': type_name,
+ 'vod_area': vod_area,
+ 'vod_actor': vod_actor,
+ 'vod_director': vod_director,
+ 'vod_content': vod_content,
+ 'vod_play_from': vod_play_from,
+ 'vod_play_url': vod_play_url
+ }
+ vod = {key: value for key, value in vod.items() if value}
+ return vod
+
+ def getpq(self, url, headers=None, cookies=None):
+ data = self.fetch(url, headers=headers, cookies=cookies).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"解析页面错误: {str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def gethr(self, index, rf='', zr=''):
+ headers = self.headers[index]
+ if zr:
+ headers['referer'] = zr
+ else:
+ headers['referer'] = f"{self.referers[rf]}/"
+ return headers
+
+ def handle_exception(self, e):
+ print(f"报错: {str(e)}")
+ return {'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'翻车啦${self.excepturl}'}
+
diff --git a/PyramidStore/plugin/html/LREEOK.py b/PyramidStore/plugin/html/LREEOK.py
new file mode 100644
index 0000000..2cf1b1c
--- /dev/null
+++ b/PyramidStore/plugin/html/LREEOK.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from Crypto.Hash import MD5
+sys.path.append("..")
+import json
+import time
+from pyquery import PyQuery as pq
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://www.lreeok.vip'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'Origin': host,
+ 'Referer': f"{host}/",
+ }
+
+ def homeContent(self, filter):
+ data = self.getpq(self.fetch(self.host, headers=self.headers).text)
+ result = {}
+ classes = []
+ for k in data('.head-more.box a').items():
+ i = k.attr('href')
+ if i and '/vod' in i:
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': re.search(r'\d+', i).group(0)
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('.border-box.diy-center .public-list-div'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {'type': tid, 'class': '', 'area': '', 'lang': '', 'version': '', 'state': '', 'letter': '', 'page': pg}
+ data = self.post(f"{self.host}/index.php/api/vod", headers=self.headers, data=self.getbody(body)).json()
+ result = {}
+ result['list'] = data['list']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(self.fetch(f"{self.host}/voddetail/{ids[0]}.html", headers=self.headers).text)
+ v = data('.detail-info.lightSpeedIn .slide-info')
+ vod = {
+ 'vod_year': v.eq(-1).text(),
+ 'vod_remarks': v.eq(0).text(),
+ 'vod_actor': v.eq(3).text(),
+ 'vod_director': v.eq(2).text(),
+ 'vod_content': data('.switch-box #height_limit').text()
+ }
+ np = data('.anthology.wow.fadeInUp')
+ ndata = np('.anthology-tab .swiper-wrapper .swiper-slide')
+ pdata = np('.anthology-list .anthology-list-box ul')
+ play, names = [], []
+ for i in range(len(ndata)):
+ n = ndata.eq(i)('a')
+ n('span').remove()
+ names.append(n.text())
+ vs = []
+ for v in pdata.eq(i)('li').items():
+ vs.append(f"{v.text()}${v('a').attr('href')}")
+ play.append('#'.join(vs))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ # data = self.getpq(self.fetch(f"{self.host}/vodsearch/{key}----------{pg}---.html", headers=self.headers).text)
+ # return {'list': self.getlist(data('.row-right .search-box .public-list-bj')), 'page': pg}
+ data = self.fetch(
+ f"{self.host}/index.php/ajax/suggest?mid={pg}&wd={key}&limit=999×tamp={int(time.time() * 1000)}",
+ headers=self.headers).json()
+ videos = []
+ for i in data['list']:
+ videos.append({
+ 'vod_id': i['id'],
+ 'vod_name': i['name'],
+ 'vod_pic': i['pic']
+ })
+ return {'list': videos, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ h, p = {"User-Agent": "okhttp/3.14.9"}, 1
+ url = f"{self.host}{id}"
+ data = self.getpq(self.fetch(url, headers=self.headers).text)
+ try:
+ jstr = data('.player .player-left script').eq(0).text()
+ jsdata = json.loads(jstr.split('aaa=')[-1])
+ body = {'url': jsdata['url']}
+ if not re.search(r'\.m3u8|\.mp4', body['url']):
+ data = self.post(f"{self.host}/okplay/api_config.php", headers=self.headers,
+ data=self.getbody(body)).json()
+ url = data.get('url') or data.get('data', {}).get('url')
+ p = 0
+ except Exception as e:
+ print('错误信息:', e)
+ pass
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getbody(self, params):
+ t = int(time.time())
+ h = MD5.new()
+ h.update(f"DS{t}DCC147D11943AF75".encode('utf-8'))
+ key = h.hexdigest()
+ params.update({'time': t, 'key': key})
+ return params
+
+ def getlist(self, data):
+ videos = []
+ for i in data.items():
+ id = i('a').attr('href')
+ if id:
+ id = re.search(r'\d+', id).group(0)
+ img = i('img').attr('data-src')
+ if img and 'url=' in img: img = f'{self.host}{img}'
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': i('img').attr('alt'),
+ 'vod_pic': img,
+ 'vod_remarks': i('.public-prt').text() or i('.public-list-prb').text()
+ })
+ return videos
+
+ def getpq(self, data):
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
diff --git a/PyramidStore/plugin/html/偷乐短剧.py b/PyramidStore/plugin/html/偷乐短剧.py
new file mode 100644
index 0000000..d6b0d15
--- /dev/null
+++ b/PyramidStore/plugin/html/偷乐短剧.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://www.toule.top'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'Referer':f'{host}/',
+ 'Origin':host
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq()
+ result = {}
+ classes = []
+ for k in data('.swiper-wrapper .swiper-slide').items():
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': k.text()
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('.container.items ul li'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.getpq(f"/index.php/vod/show/class/{tid}/id/1/page/{pg}.html")
+ result = {}
+ result['list'] = self.getlist(data('.container.items ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.getpq(ids[0])
+ v=data('.container.detail-content')
+ vod = {
+ 'vod_remarks': v('.items-tags a').text(),
+ 'vod_content': v('.text-content .detail').text(),
+ 'vod_play_from': '嗷呜爱看短剧',
+ 'vod_play_url': '#'.join([f"{i.text()}${i('a').attr('href')}" for i in data('.swiper-wrapper .swiper-slide').items()])
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getpq(f"/index.php/vod/search/page/{pg}/wd/{key}.html")
+ return {'list':self.getlist(data('.container.items ul li')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ data=self.getpq(id)
+ try:
+ jstr=data('.player-content script').eq(0).text()
+ jt=json.loads(jstr.split('=',1)[-1])
+ p,url=0,jt['url']
+ except Exception as e:
+ print(f"获取播放地址失败: {e}")
+ p,url=1,f'{self.host}{id}'
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def getpq(self, path=''):
+ data=self.fetch(f"{self.host}{path}",headers=self.headers).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def getlist(self,data):
+ videos = []
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('.image-line').attr('href'),
+ 'vod_name': i('img').attr('alt'),
+ 'vod_pic': i('img').attr('src'),
+ 'vod_remarks': i('.remarks.light').text()
+ })
+ return videos
diff --git a/PyramidStore/plugin/html/剧粑粑.py b/PyramidStore/plugin/html/剧粑粑.py
new file mode 100644
index 0000000..458e9d3
--- /dev/null
+++ b/PyramidStore/plugin/html/剧粑粑.py
@@ -0,0 +1,315 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import re
+import sys
+import time
+from base64 import b64decode, b64encode
+import concurrent.futures
+import requests
+from Crypto.Hash import MD5
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host=self.gethost()
+ self.headers.update({
+ 'referer': f'{self.host}/',
+ 'origin': self.host,
+ })
+ self.session = requests.Session()
+ self.session.headers.update(self.headers)
+ self.session.get(self.host)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-site': 'same-origin',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-user': '?1',
+ 'sec-fetch-dest': 'document',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+
+ config={
+ "1":[{"key":"class","name":"剧情","value":[{"n":"全部","v":""},{"n":"喜剧","v":"喜剧"},{"n":"爱情","v":"爱情"},{"n":"恐怖","v":"恐怖"},{"n":"动作","v":"动作"},{"n":"科幻","v":"科幻"},{"n":"剧情","v":"剧情"},{"n":"战争","v":"战争"},{"n":"警匪","v":"警匪"},{"n":"犯罪","v":"犯罪"},{"n":"动画","v":"动画"},{"n":"奇幻","v":"奇幻"},{"n":"武侠","v":"武侠"},{"n":"冒险","v":"冒险"},{"n":"枪战","v":"枪战"},{"n":"悬疑","v":"悬疑"},{"n":"惊悚","v":"惊悚"},{"n":"经典","v":"经典"},{"n":"青春","v":"青春"},{"n":"伦理","v":"伦理"},{"n":"文艺","v":"文艺"},{"n":"微电影","v":"微电影"},{"n":"古装","v":"古装"},{"n":"历史","v":"历史"},{"n":"运动","v":"运动"},{"n":"农村","v":"农村"},{"n":"儿童","v":"儿童"},{"n":"网络电影","v":"网络电影"}]},{"key":"area","name":"地区","value":[{"n":"全部","v":""},{"n":"大陆","v":"大陆"},{"n":"香港","v":"香港"},{"n":"台湾","v":"台湾"},{"n":"美国","v":"美国"},{"n":"法国","v":"法国"},{"n":"英国","v":"英国"},{"n":"日本","v":"日本"},{"n":"韩国","v":"韩国"},{"n":"德国","v":"德国"},{"n":"泰国","v":"泰国"},{"n":"印度","v":"印度"},{"n":"意大利","v":"意大利"},{"n":"西班牙","v":"西班牙"},{"n":"加拿大","v":"加拿大"},{"n":"其他","v":"其他"}]},{"key":"year","name":"年份","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"},{"n":"2009","v":"2009"},{"n":"2008","v":"2008"},{"n":"2007","v":"2007"},{"n":"2006","v":"2006"},{"n":"2005","v":"2005"},{"n":"2004","v":"2004"},{"n":"2003","v":"2003"},{"n":"2002","v":"2002"},{"n":"2001","v":"2001"},{"n":"2000","v":"2000"}]},{"key":"by","name":"排序","value":[{"n":"时间","v":"time"},{"n":"人气","v":"hits"},{"n":"评分","v":"score"}]}],
+ "2":[{"key":"class","name":"剧情","value":[{"n":"全部","v":""},{"n":"古装","v":"古装"},{"n":"战争","v":"战争"},{"n":"青春偶像","v":"青春偶像"},{"n":"喜剧","v":"喜剧"},{"n":"家庭","v":"家庭"},{"n":"犯罪","v":"犯罪"},{"n":"动作","v":"动作"},{"n":"奇幻","v":"奇幻"},{"n":"剧情","v":"剧情"},{"n":"历史","v":"历史"},{"n":"经典","v":"经典"},{"n":"乡村","v":"乡村"},{"n":"情景","v":"情景"},{"n":"商战","v":"商战"},{"n":"网剧","v":"网剧"},{"n":"其他","v":"其他"}]},{"key":"area","name":"地区","value":[{"n":"全部","v":""},{"n":"内地","v":"内地"},{"n":"香港","v":"香港"},{"n":"台湾","v":"台湾"},{"n":"美国","v":"美国"},{"n":"法国","v":"法国"},{"n":"英国","v":"英国"},{"n":"日本","v":"日本"},{"n":"韩国","v":"韩国"},{"n":"德国","v":"德国"},{"n":"泰国","v":"泰国"},{"n":"印度","v":"印度"},{"n":"意大利","v":"意大利"},{"n":"西班牙","v":"西班牙"},{"n":"加拿大","v":"加拿大"},{"n":"其他","v":"其他"}]},{"key":"year","name":"年份","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"},{"n":"2009","v":"2009"},{"n":"2008","v":"2008"},{"n":"2007","v":"2007"},{"n":"2006","v":"2006"},{"n":"2005","v":"2005"},{"n":"2004","v":"2004"},{"n":"2003","v":"2003"},{"n":"2002","v":"2002"},{"n":"2001","v":"2001"},{"n":"2000","v":"2000"}]},{"key":"by","name":"排序","value":[{"n":"时间","v":"time"},{"n":"人气","v":"hits"},{"n":"评分","v":"score"}]}],
+ "3":[{"key":"class","name":"剧情","value":[{"n":"全部","v":""},{"n":"选秀","v":"选秀"},{"n":"情感","v":"情感"},{"n":"访谈","v":"访谈"},{"n":"播报","v":"播报"},{"n":"旅游","v":"旅游"},{"n":"音乐","v":"音乐"},{"n":"美食","v":"美食"},{"n":"纪实","v":"纪实"},{"n":"曲艺","v":"曲艺"},{"n":"生活","v":"生活"},{"n":"游戏互动","v":"游戏互动"},{"n":"财经","v":"财经"},{"n":"求职","v":"求职"}]},{"key":"area","name":"地区","value":[{"n":"全部","v":""},{"n":"内地","v":"内地"},{"n":"港台","v":"港台"},{"n":"欧美","v":"欧美"},{"n":"日韩","v":"日韩"},{"n":"其他","v":"其他"}]},{"key":"year","name":"年份","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"},{"n":"2009","v":"2009"},{"n":"2008","v":"2008"},{"n":"2007","v":"2007"},{"n":"2006","v":"2006"},{"n":"2005","v":"2005"},{"n":"2004","v":"2004"},{"n":"2003","v":"2003"},{"n":"2002","v":"2002"},{"n":"2001","v":"2001"},{"n":"2000","v":"2000"}]},{"key":"by","name":"排序","value":[{"n":"时间","v":"time"},{"n":"人气","v":"hits"},{"n":"评分","v":"score"}]}],
+ "4":[{"key":"class","name":"剧情","value":[{"n":"全部","v":""},{"n":"情感","v":"情感"},{"n":"科幻","v":"科幻"},{"n":"热血","v":"热血"},{"n":"推理","v":"推理"},{"n":"搞笑","v":"搞笑"},{"n":"冒险","v":"冒险"},{"n":"萝莉","v":"萝莉"},{"n":"校园","v":"校园"},{"n":"动作","v":"动作"},{"n":"机战","v":"机战"},{"n":"运动","v":"运动"},{"n":"战争","v":"战争"},{"n":"少年","v":"少年"},{"n":"少女","v":"少女"},{"n":"社会","v":"社会"},{"n":"原创","v":"原创"},{"n":"亲子","v":"亲子"},{"n":"益智","v":"益智"},{"n":"励志","v":"励志"},{"n":"其他","v":"其他"}]},{"key":"area","name":"地区","value":[{"n":"全部","v":""},{"n":"国产","v":"国产"},{"n":"欧美","v":"欧美"},{"n":"日本","v":"日本"},{"n":"其他","v":"其他"}]},{"key":"year","name":"年份","value":[{"n":"全部","v":""},{"n":"2025","v":"2025"},{"n":"2024","v":"2024"},{"n":"2023","v":"2023"},{"n":"2022","v":"2022"},{"n":"2021","v":"2021"},{"n":"2020","v":"2020"},{"n":"2019","v":"2019"},{"n":"2018","v":"2018"},{"n":"2017","v":"2017"},{"n":"2016","v":"2016"},{"n":"2015","v":"2015"},{"n":"2014","v":"2014"},{"n":"2013","v":"2013"},{"n":"2012","v":"2012"},{"n":"2011","v":"2011"},{"n":"2010","v":"2010"},{"n":"2009","v":"2009"},{"n":"2008","v":"2008"},{"n":"2007","v":"2007"},{"n":"2006","v":"2006"},{"n":"2005","v":"2005"},{"n":"2004","v":"2004"},{"n":"2003","v":"2003"},{"n":"2002","v":"2002"},{"n":"2001","v":"2001"},{"n":"2000","v":"2000"}]},{"key":"by","name":"排序","value":[{"n":"时间","v":"time"},{"n":"人气","v":"hits"},{"n":"评分","v":"score"}]}],
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq()
+ result = {}
+ classes = []
+ for k in data('ul.swiper-wrapper').eq(0)('li').items():
+ i=k('a').attr('href')
+ if i and 'type' in i:
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': re.findall(r'\d+', i)[0],
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('.tab-content.ewave-pannel_bd li'))
+ result['filters'] = self.config
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ path=f"/vodshow/{tid}-{extend.get('area','')}-{extend.get('by','')}-{extend.get('class','')}-----{pg}---{extend.get('year','')}.html"
+ data=self.getpq(path)
+ result = {}
+ result['list'] = self.getlist(data('ul.ewave-vodlist.clearfix li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.getpq(f"/voddetail/{ids[0]}.html")
+ v=data('.ewave-content__detail')
+ c=data('p')
+ vod = {
+ 'type_name':c.eq(0)('a').text(),
+ 'vod_year': v('.data.hidden-sm').text(),
+ 'vod_remarks': v('h1').text(),
+ 'vod_actor': c.eq(1)('a').text(),
+ 'vod_director': c.eq(2)('a').text(),
+ 'vod_content': c.eq(-1).text(),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ nd=list(data('ul.nav-tabs.swiper-wrapper li').items())
+ pd=list(data('ul.ewave-content__playlist').items())
+ n,p=[],[]
+ for i,x in enumerate(nd):
+ n.append(x.text())
+ p.append('#'.join([f"{j.text()}${j('a').attr('href')}" for j in pd[i]('li').items()]))
+ vod['vod_play_url']='$$$'.join(p)
+ vod['vod_play_from']='$$$'.join(n)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ if pg=="1":
+ p=f"-------------.html?wd={key}"
+ else:
+ p=f"{key}----------{pg}---.html"
+ data=self.getpq(f"/vodsearch/{p}")
+ return {'list':self.getlist(data('ul.ewave-vodlist__media.clearfix li')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ data=self.getpq(id)
+ jstr = json.loads(data('.ewave-player__video script').eq(0).text().split('=', 1)[-1])
+ jxpath='/bbplayer/api.php'
+ data=self.session.post(f"{self.host}{jxpath}",data={'vid':jstr['url']}).json()['data']
+ if re.search(r'\.m3u8|\.mp4',data['url']):
+ url=data['url']
+ elif data['urlmode'] == 1:
+ url=self.decode1(data['url'])
+ elif data['urlmode'] == 2:
+ url=self.decode2(data['url'])
+ elif re.search(r'\.m3u8|\.mp4',jstr['url']):
+ url=jstr['url']
+ else:
+ url=None
+ if not url:raise Exception('未找到播放地址')
+ p,c=0,''
+ except Exception as e:
+ self.log(f"解析失败: {e}")
+ p,url,c=1,f"{self.host}{id}",'document.querySelector("#playleft iframe").contentWindow.document.querySelector("#start").click()'
+ return {'parse': p, 'url': url, 'header': {'User-Agent':'okhttp/3.12.1'},'click': c}
+
+ def localProxy(self, param):
+ wdict=json.loads(self.d64(param['wdict']))
+ url=f"{wdict['jx']}{wdict['id']}"
+ data=pq(self.fetch(url,headers=self.headers).text)
+ html=data('script').eq(-1).text()
+ url = re.search(r'src="(.*?)"', html).group(1)
+ return [302,'text/html',None,{'Location':url}]
+
+ def liveContent(self, url):
+ pass
+
+ def gethost(self):
+ data=pq(self.fetch('https://www.jubaba.vip',headers=self.headers).text)
+ hlist=list(data('.content-top ul li').items())[:2]
+ hsots=[j('a').attr('href') for i in hlist for j in i('a').items()]
+ return self.host_late(hsots)
+
+ def host_late(self, urls):
+ with concurrent.futures.ThreadPoolExecutor() as executor:
+ future_to_url = {
+ executor.submit(self.test_host, url): url
+ for url in urls
+ }
+ results = {}
+ for future in concurrent.futures.as_completed(future_to_url):
+ url = future_to_url[future]
+ try:
+ results[url] = future.result()
+ except Exception as e:
+ results[url] = float('inf')
+ min_url = min(results.items(), key=lambda x: x[1])[0] if results else None
+ if all(delay == float('inf') for delay in results.values()) or not min_url:
+ return urls[0]
+ return min_url
+
+ def test_host(self, url):
+ try:
+ start_time = time.monotonic()
+ response = requests.head(
+ url,
+ timeout=1.0,
+ allow_redirects=False,
+ headers=self.headers
+ )
+ response.raise_for_status()
+ return (time.monotonic() - start_time) * 1000
+ except Exception as e:
+ print(f"测试{url}失败: {str(e)}")
+ return float('inf')
+
+ def getpq(self, path='',min=0,max=3):
+ data = self.session.get(f"{self.host}{path}")
+ data=data.text
+ try:
+ if '人机验证' in data:
+ print(f"第{min}次尝试人机验证")
+ jstr=pq(data)('script').eq(-1).html()
+ token,tpath,stt=self.extract(jstr)
+ body={'value':self.encrypt(self.host,stt),'token':self.encrypt(token,stt)}
+ cd=self.session.post(f"{self.host}{tpath}",data=body)
+ if min>max:raise Exception('人机验证失败')
+ return self.getpq(path,min+1,max)
+ return pq(data)
+ except:
+ return pq(data.encode('utf-8'))
+
+ def encrypt(self, input_str,staticchars):
+ encodechars = ""
+ for char in input_str:
+ num0 = staticchars.find(char)
+ if num0 == -1:
+ code = char
+ else:
+ code = staticchars[(num0 + 3) % 62]
+ num1 = random.randint(0, 61)
+ num2 = random.randint(0, 61)
+ encodechars += staticchars[num1] + code + staticchars[num2]
+ return self.e64(encodechars)
+
+ def extract(self, js_code):
+ token_match = re.search(r'var token = encrypt\("([^"]+)"\);', js_code)
+ token_value = token_match.group(1) if token_match else None
+ url_match = re.search(r'var url = \'([^\']+)\';', js_code)
+ url_value = url_match.group(1) if url_match else None
+ staticchars_match = re.search(r'var\s+staticchars\s*=\s*["\']([^"\']+)["\'];', js_code)
+ staticchars = staticchars_match.group(1) if staticchars_match else None
+ return token_value, url_value,staticchars
+
+ def decode1(self, val):
+ url = self._custom_str_decode(val)
+ parts = url.split("/")
+ result = "/".join(parts[2:])
+ key1 = json.loads(self.d64(parts[1]))
+ key2 = json.loads(self.d64(parts[0]))
+ decoded = self.d64(result)
+ return self._de_string(key1, key2, decoded)
+
+ def _custom_str_decode(self, val):
+ decoded = self.d64(val)
+ key = self.md5("test")
+ result = ""
+ for i in range(len(decoded)):
+ result += chr(ord(decoded[i]) ^ ord(key[i % len(key)]))
+ return self.d64(result)
+
+ def _de_string(self, key_array, value_array, input_str):
+ result = ""
+ for char in input_str:
+ if re.match(r'^[a-zA-Z]$', char):
+ if char in key_array:
+ index = key_array.index(char)
+ result += value_array[index]
+ continue
+ result += char
+ return result
+
+ def decode2(self, url):
+ key = "PXhw7UT1B0a9kQDKZsjIASmOezxYG4CHo5Jyfg2b8FLpEvRr3WtVnlqMidu6cN"
+ url=self.d64(url)
+ result = ""
+ i = 1
+ while i < len(url):
+ try:
+ index = key.find(url[i])
+ if index == -1:
+ char = url[i]
+ else:
+ char = key[(index + 59) % 62]
+ result += char
+ except IndexError:
+ break
+ i += 3
+ return result
+
+ def getlist(self, data):
+ videos = []
+ for k in data.items():
+ j = k('.ewave-vodlist__thumb')
+ h=k('.text-overflow a')
+ if not h.attr('href'):h=j
+ videos.append({
+ 'vod_id': re.findall(r'\d+', h.attr('href'))[0],
+ 'vod_name': j.attr('title'),
+ 'vod_pic': j.attr('data-original'),
+ 'vod_remarks': k('.pic-text').text(),
+ })
+ return videos
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def md5(self, text):
+ h = MD5.new()
+ h.update(text.encode('utf-8'))
+ return h.hexdigest()
diff --git a/PyramidStore/plugin/html/嗷呜动漫.py b/PyramidStore/plugin/html/嗷呜动漫.py
new file mode 100644
index 0000000..0c9a52e
--- /dev/null
+++ b/PyramidStore/plugin/html/嗷呜动漫.py
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import colorsys
+import random
+import re
+import sys
+from base64 import b64decode, b64encode
+from email.utils import unquote
+from Crypto.Hash import MD5
+sys.path.append("..")
+import json
+import time
+from pyquery import PyQuery as pq
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.aowu.tv'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'pragma': 'no-cache',
+ 'cache-control': 'no-cache',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'dnt': '1',
+ 'upgrade-insecure-requests': '1',
+ 'sec-fetch-site': 'same-origin',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-user': '?1',
+ 'sec-fetch-dest': 'document',
+ 'referer': f'{host}/',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'priority': 'u=0, i',
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq(self.fetch(self.host,headers=self.headers).text)
+ result = {}
+ classes = []
+ ldata=data('.wrap.border-box.public-r .public-list-box')
+ cd={"新番":"32","番剧":"20","剧场":"33"}
+ for k,r in cd.items():
+ classes.append({
+ 'type_name': k,
+ 'type_id': r,
+ })
+ videos=[]
+ for i in ldata.items():
+ j = i('.public-list-exp')
+ k=i('.public-list-button')
+ videos.append({
+ 'vod_id': j.attr('href').split('/')[-1].split('-')[0],
+ 'vod_name': k('.time-title').text(),
+ 'vod_pic': j('img').attr('data-src'),
+ 'vod_year': f"·{j('.public-list-prb').text()}",
+ 'vod_remarks': k('.public-list-subtitle').text(),
+ })
+ result['class'] = classes
+ result['list']=videos
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {'type':tid,'class':'','area':'','lang':'','version':'','state':'','letter':'','page':pg}
+ data = self.post(f"{self.host}/index.php/api/vod", headers=self.headers, data=self.getbody(body)).json()
+ result = {}
+ result['list'] = data['list']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(self.fetch(f"{self.host}/play/{ids[0]}-1-1.html", headers=self.headers).text)
+ v=data('.player-info-text .this-text')
+ vod = {
+ 'type_name': v.eq(-1)('a').text(),
+ 'vod_year': v.eq(1)('a').text(),
+ 'vod_remarks': v.eq(0).text(),
+ 'vod_actor': v.eq(2)('a').text(),
+ 'vod_content': data('.player-content').text()
+ }
+ ns=data('.swiper-wrapper .vod-playerUrl')
+ ps=data('.player-list-box .anthology-list-box ul')
+ play,names=[],[]
+ for i in range(len(ns)):
+ n=ns.eq(i)('a')
+ n('span').remove()
+ names.append(re.sub(r"[\ue679\xa0]", "", n.text()))
+ play.append('#'.join([f"{v.text()}${v('a').attr('href')}" for v in ps.eq(i)('li').items()]))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.fetch(f"{self.host}/index.php/ajax/suggest?mid=1&wd={key}&limit=9999×tamp={int(time.time()*1000)}", headers=self.headers).json()
+ videos=[]
+ for i in data['list']:
+ videos.append({
+ 'vod_id': i['id'],
+ 'vod_name': i['name'],
+ 'vod_pic': i['pic']
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ p,url1= 1,''
+ yurl=f"{self.host}{id}"
+ data = self.getpq(self.fetch(yurl, headers=self.headers).text)
+ dmhtm=data('.ds-log-set')
+ dmdata={'vod_id':dmhtm.attr('data-id'),'vod_ep':dmhtm.attr('data-nid')}
+ try:
+ jstr = data('.player-top.box.radius script').eq(0).text()
+ jsdata = json.loads(jstr.split('=',1)[-1])
+ url1= jsdata['url']
+ data = self.fetch(f"{self.host}/player/?url={unquote(self.d64(jsdata['url']))}", headers=self.headers).text
+ data=self.p_qjs(self.getjstr(data))
+ url=data['qualities'] if len(data['qualities']) else data['url']
+ p = 0
+ if not url:raise Exception("未找到播放地址")
+ except Exception as e:
+ self.log(e)
+ url = yurl
+ if re.search(r'\.m3u8|\.mp4',url1):url=url1
+ dmurl = f"{self.getProxyUrl()}&data={self.e64(json.dumps(dmdata))}&type=dm.xml"
+ return {"parse": p, "url": url, "header": {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'},'danmaku':dmurl}
+
+ def localProxy(self, param):
+ try:
+ data = json.loads(self.d64(param['data']))
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'origin': self.host,
+ 'Content-Type': 'application/x-www-form-urlencoded'
+ }
+ params = {'vod_id': data['vod_id'], 'vod_ep': data['vod_ep']}
+ res = self.post(f"https://app.wuyaoy.cn/danmu/api.php/getDanmu", headers=headers, data=params).json()
+ danmustr = f'\n\n\tchat.aowudm.com\n\t88888888\n\t0\n\t99999\n\t0\n\t0\n\tk-v\n'
+ my_list = ['1', '4', '5', '6']
+ for i in sorted(res['data'], key=lambda x: x['time']):
+ dms = [str(i.get('time',1)), random.choice(my_list), '25', self.get_color(), '0']
+ dmtxt = re.sub(r'[<>&\u0000\b]', '', self.cleanText(i.get('text', '')))
+ tempdata = f'\t{dmtxt}\n'
+ danmustr += tempdata
+ danmustr += ''
+ return [200,'text/xml',danmustr]
+ except Exception as e:
+ print(f"获取弹幕失败:{str(e)}")
+ return ""
+
+ def getbody(self, params):
+ t=int(time.time())
+ h = MD5.new()
+ h.update(f"DS{t}DCC147D11943AF75".encode('utf-8'))
+ key=h.hexdigest()
+ params.update({'time':t,'key':key})
+ return params
+
+ def getpq(self, data):
+ data=self.cleanText(data)
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def get_color(self):
+ h = random.random()
+ s = random.uniform(0.7, 1.0)
+ v = random.uniform(0.8, 1.0)
+ r, g, b = colorsys.hsv_to_rgb(h, s, v)
+ r = int(r * 255)
+ g = int(g * 255)
+ b = int(b * 255)
+ decimal_color = (r << 16) + (g << 8) + b
+ return str(decimal_color)
+
+ def getjstr(self, data):
+ pattern = r'new\s+Artplayer\s*\((\{[\s\S]*?\})\);'
+ match = re.search(pattern, data)
+ config_str = match.group(1) if match else '{}'
+
+ replacements = [
+ (r'contextmenu\s*:\s*\[[\s\S]*?\{[\s\S]*?\}[\s\S]*?\],', 'contextmenu: [],'),
+ (r'customType\s*:\s*\{[\s\S]*?\},', 'customType: {},'),
+ (r'plugins\s*:\s*\[\s*artplayerPluginDanmuku\(\{[\s\S]*?lockTime:\s*\d+,?\s*\}\)\,?\s*\]', 'plugins: []')
+ ]
+ for pattern, replacement in replacements:
+ config_str = re.sub(pattern, replacement, config_str)
+ return config_str
+
+ def p_qjs(self, config_str):
+ try:
+ from com.whl.quickjs.wrapper import QuickJSContext
+ ctx = QuickJSContext.create()
+ js_code = f"""
+ function extractVideoInfo() {{
+ try {{
+ const config = {config_str};
+ const result = {{
+ url: "",
+ qualities: []
+ }};
+ if (config.url) {{
+ result.url = config.url;
+ }}
+ if (config.quality && Array.isArray(config.quality)) {{
+ config.quality.forEach(function(q) {{
+ if (q && q.url) {{
+ result.qualities.push(q.html || "嗷呜");
+ result.qualities.push(q.url);
+ }}
+ }});
+ }}
+
+ return JSON.stringify(result);
+ }} catch (e) {{
+ return JSON.stringify({{
+ error: "解析错误: " + e.message,
+ url: "",
+ qualities: []
+ }});
+ }}
+ }}
+ extractVideoInfo();
+ """
+ result_json = ctx.evaluate(js_code)
+ ctx.destroy()
+ return json.loads(result_json)
+
+ except Exception as e:
+ self.log(f"执行失败: {e}")
+ return {
+ "error": str(e),
+ "url": "",
+ "qualities": []
+ }
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
+
+
diff --git a/PyramidStore/plugin/html/好帅短剧.py b/PyramidStore/plugin/html/好帅短剧.py
new file mode 100644
index 0000000..5392224
--- /dev/null
+++ b/PyramidStore/plugin/html/好帅短剧.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+sys.path.append('..')
+from base.spider import Spider
+from pyquery import PyQuery as pq
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.nhsyy.com'
+
+ headers = {
+ 'Accept': '*/*',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'DNT': '1',
+ 'Origin': host,
+ 'Pragma': 'no-cache',
+ 'Referer': f'{host}/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'sec-ch-ua-mobile': '?1',
+ 'sec-ch-ua-platform': '"Android"',
+ }
+
+ def homeContent(self, filter):
+ data = pq(self.fetch(self.host, headers=self.headers).text)
+ result = {}
+ classes = []
+ for i in data('.drop-content-items li').items():
+ j = i('a').attr('href')
+ if j and 'type' in j:
+ id = j.split('/')[-1].split('.')[0]
+ classes.append({
+ 'type_name': i('a').text(),
+ 'type_id': id
+ })
+ hlist = self.getlist(data('.module-lines-list .module-item'))
+ result['class'] = classes
+ result['list'] = hlist
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data = self.fetch(f'{self.host}/vodshwo/{tid}--------{pg}---.html', headers=self.headers).text
+ vlist = self.getlist(pq(data)('.module-list .module-item'))
+ return {"list": vlist, "page": pg, "pagecount": 9999, "limit": 90, "total": 999999}
+
+ def detailContent(self, ids):
+ data = pq(self.fetch(f"{self.host}{ids[0]}", headers=self.headers).text)
+ udata = data('.scroll-box-y .scroll-content a')
+ vdata = data('.video-info-main .video-info-item')
+ vod = {
+ 'vod_year': vdata.eq(2)('div').text(),
+ 'vod_remarks': vdata.eq(3)('div').text(),
+ 'vod_actor': vdata.eq(1)('a').text(),
+ 'vod_director': vdata.eq(0)('a').text(),
+ 'typt_name': data('.video-info-aux a').eq(0).attr('title'),
+ 'vod_content': vdata.eq(4)('p').eq(-1).text(),
+ 'vod_play_from': '嗷呜爱看短剧',
+ 'vod_play_url': '#'.join([f"{i.text()}${i.attr('href')}" for i in udata.items()]),
+ }
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ dlist = self.fetch(f'{self.host}/vodsearch/{key}----------{pg}---.html', headers=self.headers).text
+ ldata = pq(dlist)('.module-list .module-search-item')
+ vlist = []
+ for i in ldata.items():
+ img = i('.module-item-pic')
+ vlist.append({
+ 'vod_id': i('.video-serial').attr('href'),
+ 'vod_name': img('img').attr('alt'),
+ 'vod_pic': img('img').attr('data-src'),
+ 'vod_year': i('.tag-link a').eq(0).text(),
+ 'vod_remarks': i('.video-serial').text()
+ })
+ result = {"list": vlist, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ data=self.fetch(f"{self.host}{id}", headers=self.headers).text
+ jstr = pq(data)('.player-wrapper script').eq(0).text()
+ try:
+ jdata = json.loads(jstr.split('=', 1)[-1])
+ url = jdata.get('url') or jdata.get('next_url')
+ p=0
+ except:
+ url,p = f"{self.host}{id}",1
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self, data):
+ vlist = []
+ for i in data.items():
+ img = i('.module-item-pic')
+ vlist.append({
+ 'vod_id': img('a').attr('href'),
+ 'vod_name': img('img').attr('alt'),
+ 'vod_pic': img('img').attr('data-src'),
+ 'vod_remarks': i('.module-item-text').text()
+ })
+ return vlist
diff --git a/PyramidStore/plugin/html/小红影视.py b/PyramidStore/plugin/html/小红影视.py
new file mode 100644
index 0000000..f884543
--- /dev/null
+++ b/PyramidStore/plugin/html/小红影视.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from base64 import b64decode
+from Crypto.Cipher import AES
+from Crypto.Hash import MD5
+from Crypto.Util.Padding import unpad
+sys.path.append("..")
+import json
+import time
+from pyquery import PyQuery as pq
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.xiaohys.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'Origin': host,
+ 'Referer': f"{host}/",
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq(self.fetch(self.host,headers=self.headers).text)
+ result = {}
+ classes = []
+ for k in data('.head-more.box a').items():
+ i=k.attr('href')
+ if i and '/show' in i:
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': i.split('/')[-1]
+ })
+ result['class'] = classes
+ result['list']=self.getlist(data('.border-box.diy-center .public-list-div'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {'type':tid,'class':'','area':'','lang':'','version':'','state':'','letter':'','page':pg}
+ data = self.post(f"{self.host}/index.php/api/vod", headers=self.headers, data=self.getbody(body)).json()
+ result = {}
+ result['list'] = data['list']
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(self.fetch(f"{self.host}/detail/{ids[0]}/", headers=self.headers).text)
+ v=data('.detail-info.lightSpeedIn .slide-info')
+ vod = {
+ 'vod_year': v.eq(-1).text(),
+ 'vod_remarks': v.eq(0).text(),
+ 'vod_actor': v.eq(3).text(),
+ 'vod_director': v.eq(2).text(),
+ 'vod_content': data('.switch-box #height_limit').text()
+ }
+ np=data('.anthology.wow.fadeInUp')
+ ndata=np('.anthology-tab .swiper-wrapper .swiper-slide')
+ pdata=np('.anthology-list .anthology-list-box ul')
+ play,names=[],[]
+ for i in range(len(ndata)):
+ n=ndata.eq(i)('a')
+ n('span').remove()
+ names.append(n.text())
+ vs=[]
+ for v in pdata.eq(i)('li').items():
+ vs.append(f"{v.text()}${v('a').attr('href')}")
+ play.append('#'.join(vs))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.fetch(f"{self.host}/index.php/ajax/suggest?mid=1&wd={key}&limit=9999×tamp={int(time.time()*1000)}", headers=self.headers).json()
+ videos=[]
+ for i in data['list']:
+ videos.append({
+ 'vod_id': i['id'],
+ 'vod_name': i['name'],
+ 'vod_pic': i['pic']
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ h,p,url1= {"User-Agent": "okhttp/3.14.9"},1,''
+ url=f"{self.host}{id}"
+ data = self.getpq(self.fetch(url, headers=self.headers).text)
+ try:
+ jstr = data('.player .player-left script').eq(0).text()
+ jsdata = json.loads(jstr.split('=',1)[-1])
+ body, url1= {'url': jsdata['url'],'referer':url},jsdata['url']
+ data = self.post(f"{self.host}/static/player/artplayer/api.php?ac=getdate", headers=self.headers, data=body).json()
+ l=self.aes(data['data'],data['iv'])
+ url=l.get('url') or l['data'].get('url')
+ p = 0
+ if not url:raise Exception('未找到播放地址')
+ except Exception as e:
+ print('错误信息:',e)
+ if re.search(r'\.m3u8|\.mp4',url1):url=url1
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = h
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getbody(self, params):
+ t=int(time.time())
+ h = MD5.new()
+ h.update(f"DS{t}DCC147D11943AF75".encode('utf-8'))
+ key=h.hexdigest()
+ params.update({'time':t,'key':key})
+ return params
+
+ def getlist(self,data):
+ videos=[]
+ for i in data.items():
+ id = i('a').attr('href')
+ if id:
+ id = re.search(r'\d+', id).group(0)
+ img = i('img').attr('data-src')
+ if img and 'url=' in img and 'http' not in img: img = f'{self.host}{img}'
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': i('img').attr('alt'),
+ 'vod_pic': img,
+ 'vod_remarks': i('.public-prt').text() or i('.public-list-prb').text()
+ })
+ return videos
+
+ def getpq(self, data):
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def aes(self, text,iv):
+ key = b"d978a93ffb4d3a00"
+ iv = iv.encode("utf-8")
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
+ return json.loads(pt.decode("utf-8"))
diff --git a/PyramidStore/plugin/html/旺旺.py b/PyramidStore/plugin/html/旺旺.py
new file mode 100644
index 0000000..c218cad
--- /dev/null
+++ b/PyramidStore/plugin/html/旺旺.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import concurrent.futures
+import json
+import re
+import sys
+import time
+from base64 import b64decode, b64encode
+import requests
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'Pragma': 'no-cache',
+ 'Cache-Control': 'no-cache',
+ 'sec-ch-ua-platform': '"Android"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'DNT': '1',
+ 'sec-ch-ua-mobile': '?1',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'Sec-Fetch-Mode': 'no-cors',
+ 'Sec-Fetch-Dest': 'video',
+ 'Sec-Fetch-Storage-Access': 'active',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
+ }
+
+ config ={"1": [{"key": "cateId","name": "类型","value": [{"n": "全部","v": "1"},{"n": "动作片","v": "5"},{"n": "喜剧片","v": "6"},{"n": "爱情片","v": "7"},{"n": "科幻片","v": "8"},{"n": "恐怖片","v": "9"},{"n": "剧情片","v": "10"},{"n": "战争片","v": "11"},{"n": "惊悚片","v": "16"},{"n": "奇幻片","v": "17"}]},{"key": "area","name": "地区","value": [{"n": "全部","v": ""},{"n": "大陆","v": "大陆"},{"n": "香港","v": "香港"},{"n": "台湾","v": "台湾"},{"n": "美国","v": "美国"},{"n": "韩国","v": "韩国"},{"n": "日本","v": "日本"},{"n": "泰国","v": "泰国"},{"n": "新加坡","v": "新加坡"},{"n": "马来西亚","v": "马来西亚"},{"n": "印度","v": "印度"},{"n": "英国","v": "英国"},{"n": "法国","v": "法国"},{"n": "加拿大","v": "加拿大"},{"n": "西班牙","v": "西班牙"},{"n": "俄罗斯","v": "俄罗斯"},{"n": "其它","v": "其它"}]},{"key": "year","name": "时间","value": [{"n": "全部","v": ""},{"n": "2024","v": "2024"},{"n": "2023","v": "2023"},{"n": "2022","v": "2022"},{"n": "2021","v": "2021"},{"n": "2020","v": "2020"},{"n": "2019","v": "2019"},{"n": "2018","v": "2018"},{"n": "2017","v": "2017"},{"n": "2016","v": "2016"},{"n": "2015","v": "2015"},{"n": "2014","v": "2014"},{"n": "2013","v": "2013"},{"n": "2012","v": "2012"},{"n": "2011","v": "2011"},{"n": "2010","v": "2010"},{"n": "2009","v": "2009"},{"n": "2008","v": "2008"},{"n": "2007","v": "2007"},{"n": "2006","v": "2006"},{"n": "2005","v": "2005"},{"n": "2004","v": "2004"},{"n": "2003","v": "2003"},{"n": "2002","v": "2002"},{"n": "2001","v": "2001"},{"n": "2000","v": "2000"},{"n": "1999","v": "1999"},{"n": "1998","v": "1998"},{"n": "1997","v": "1997"},{"n": "1996","v": "1996"},{"n": "1995","v": "1995"},{"n": "1994","v": "1994"},{"n": "1993","v": "1993"},{"n": "1992","v": "1992"},{"n": "1991","v": "1991"},{"n": "1990","v": "1990"},{"n": "1989","v": "1989"},{"n": "1988","v": "1988"},{"n": "1987","v": "1987"},{"n": "1986","v": "1986"},{"n": "1985","v": "1985"},{"n": "1984","v": "1984"},{"n": "1983","v": "1983"},{"n": "1982","v": "1982"},{"n": "1981","v": "1981"},{"n": "1980","v": "1980"},{"n": "1979","v": "1979"},{"n": "1978","v": "1978"},{"n": "1977","v": "1977"},{"n": "1976","v": "1976"},{"n": "1975","v": "1975"},{"n": "1974","v": "1974"},{"n": "1973","v": "1973"},{"n": "1972","v": "1972"},{"n": "1971","v": "1971"},{"n": "1970","v": "1970"},{"n": "1969","v": "1969"},{"n": "1968","v": "1968"},{"n": "1967","v": "1967"},{"n": "1966","v": "1966"},{"n": "1965","v": "1965"},{"n": "1964","v": "1964"},{"n": "1963","v": "1963"},{"n": "1962","v": "1962"},{"n": "1961","v": "1961"},{"n": "1960","v": "1960"},{"n": "1959","v": "1959"},{"n": "1958","v": "1958"},{"n": "1957","v": "1957"},{"n": "1956","v": "1956"},{"n": "1955","v": "1955"},{"n": "1954","v": "1954"},{"n": "1953","v": "1953"},{"n": "1952","v": "1952"},{"n": "1951","v": "1951"},{"n": "1950","v": "1950"},{"n": "1949","v": "1949"},{"n": "1948","v": "1948"},{"n": "1947","v": "1947"},{"n": "1946","v": "1946"},{"n": "1945","v": "1945"},{"n": "1944","v": "1944"},{"n": "1943","v": "1943"},{"n": "1942","v": "1942"},{"n": "1941","v": "1941"},{"n": "1940","v": "1940"},{"n": "1939","v": "1939"},{"n": "1938","v": "1938"},{"n": "1937","v": "1937"},{"n": "1936","v": "1936"},{"n": "1935","v": "1935"},{"n": "1934","v": "1934"},{"n": "1933","v": "1933"},{"n": "1932","v": "1932"},{"n": "1931","v": "1931"},{"n": "1930","v": "1930"},{"n": "1929","v": "1929"},{"n": "1928","v": "1928"},{"n": "1927","v": "1927"},{"n": "1926","v": "1926"},{"n": "1925","v": "1925"},{"n": "1924","v": "1924"},{"n": "1923","v": "1923"},{"n": "1922","v": "1922"},{"n": "1921","v": "1921"},{"n": "1920","v": "1920"},{"n": "1919","v": "1919"},{"n": "1918","v": "1918"},{"n": "1917","v": "1917"},{"n": "1916","v": "1916"},{"n": "1915","v": "1915"},{"n": "1914","v": "1914"}]},{"key": "letter","name": "字母","value": [{"n": "全部","v": ""},{"n": "A","v": "A"},{"n": "B","v": "B"},{"n": "C","v": "C"},{"n": "D","v": "D"},{"n": "E","v": "E"},{"n": "F","v": "F"},{"n": "G","v": "G"},{"n": "H","v": "H"},{"n": "I","v": "I"},{"n": "J","v": "J"},{"n": "K","v": "K"},{"n": "L","v": "L"},{"n": "M","v": "M"},{"n": "N","v": "N"},{"n": "O","v": "O"},{"n": "P","v": "P"},{"n": "Q","v": "Q"},{"n": "R","v": "R"},{"n": "S","v": "S"},{"n": "T","v": "T"},{"n": "U","v": "U"},{"n": "V","v": "V"},{"n": "W","v": "W"},{"n": "X","v": "X"},{"n": "Y","v": "Y"},{"n": "Z","v": "Z"},{"n": "0-9","v": "0-9"}]},{"key": "by","name": "排序","value": [{"n": "全部","v": ""},{"n": "时间","v": "time"},{"n": "人气","v": "hits"},{"n": "评分","v": "score"}]}],"2": [{"key": "cateId","name": "类型","value": [{"n": "全部","v": "2"},{"n": "国产剧","v": "12"},{"n": "港台泰","v": "13"},{"n": "日韩剧","v": "14"},{"n": "欧美剧","v": "15"}]},{"key": "area","name": "地区","value": [{"n": "全部","v": ""},{"n": "大陆","v": "大陆"},{"n": "香港","v": "香港"},{"n": "台湾","v": "台湾"},{"n": "美国","v": "美国"},{"n": "韩国","v": "韩国"},{"n": "日本","v": "日本"},{"n": "泰国","v": "泰国"},{"n": "新加坡","v": "新加坡"},{"n": "马来西亚","v": "马来西亚"},{"n": "印度","v": "印度"},{"n": "英国","v": "英国"},{"n": "法国","v": "法国"},{"n": "加拿大","v": "加拿大"},{"n": "西班牙","v": "西班牙"},{"n": "俄罗斯","v": "俄罗斯"},{"n": "其它","v": "其它"}]},{"key": "year","name": "时间","value": [{"n": "全部","v": ""},{"n": "2024","v": "2024"},{"n": "2023","v": "2023"},{"n": "2022","v": "2022"},{"n": "2021","v": "2021"},{"n": "2020","v": "2020"},{"n": "2019","v": "2019"},{"n": "2018","v": "2018"},{"n": "2017","v": "2017"},{"n": "2016","v": "2016"},{"n": "2015","v": "2015"},{"n": "2014","v": "2014"},{"n": "2013","v": "2013"},{"n": "2012","v": "2012"},{"n": "2011","v": "2011"},{"n": "2010","v": "2010"},{"n": "2009","v": "2009"},{"n": "2008","v": "2008"},{"n": "2007","v": "2007"},{"n": "2006","v": "2006"},{"n": "2005","v": "2005"},{"n": "2004","v": "2004"},{"n": "2003","v": "2003"},{"n": "2002","v": "2002"},{"n": "2001","v": "2001"},{"n": "2000","v": "2000"},{"n": "1999","v": "1999"},{"n": "1998","v": "1998"},{"n": "1997","v": "1997"},{"n": "1996","v": "1996"},{"n": "1995","v": "1995"},{"n": "1994","v": "1994"},{"n": "1993","v": "1993"},{"n": "1992","v": "1992"},{"n": "1991","v": "1991"},{"n": "1990","v": "1990"},{"n": "1989","v": "1989"},{"n": "1988","v": "1988"},{"n": "1987","v": "1987"},{"n": "1986","v": "1986"},{"n": "1985","v": "1985"},{"n": "1984","v": "1984"},{"n": "1983","v": "1983"},{"n": "1982","v": "1982"},{"n": "1981","v": "1981"},{"n": "1980","v": "1980"},{"n": "1979","v": "1979"},{"n": "1978","v": "1978"},{"n": "1977","v": "1977"},{"n": "1976","v": "1976"},{"n": "1975","v": "1975"},{"n": "1974","v": "1974"},{"n": "1973","v": "1973"},{"n": "1972","v": "1972"},{"n": "1971","v": "1971"},{"n": "1970","v": "1970"},{"n": "1969","v": "1969"},{"n": "1968","v": "1968"},{"n": "1967","v": "1967"},{"n": "1966","v": "1966"},{"n": "1965","v": "1965"},{"n": "1964","v": "1964"},{"n": "1963","v": "1963"},{"n": "1962","v": "1962"},{"n": "1961","v": "1961"},{"n": "1960","v": "1960"}]},{"key": "letter","name": "字母","value": [{"n": "全部","v": ""},{"n": "A","v": "A"},{"n": "B","v": "B"},{"n": "C","v": "C"},{"n": "D","v": "D"},{"n": "E","v": "E"},{"n": "F","v": "F"},{"n": "G","v": "G"},{"n": "H","v": "H"},{"n": "I","v": "I"},{"n": "J","v": "J"},{"n": "K","v": "K"},{"n": "L","v": "L"},{"n": "M","v": "M"},{"n": "N","v": "N"},{"n": "O","v": "O"},{"n": "P","v": "P"},{"n": "Q","v": "Q"},{"n": "R","v": "R"},{"n": "S","v": "S"},{"n": "T","v": "T"},{"n": "U","v": "U"},{"n": "V","v": "V"},{"n": "W","v": "W"},{"n": "X","v": "X"},{"n": "Y","v": "Y"},{"n": "Z","v": "Z"},{"n": "0-9","v": "0-9"}]},{"key": "by","name": "排序","value": [{"n": "全部","v": ""},{"n": "时间","v": "time"},{"n": "人气","v": "hits"},{"n": "评分","v": "score"}]}],"3": [{"key": "area","name": "地区","value": [{"n": "全部","v": ""},{"n": "大陆","v": "大陆"},{"n": "香港","v": "香港"},{"n": "台湾","v": "台湾"},{"n": "美国","v": "美国"},{"n": "韩国","v": "韩国"},{"n": "日本","v": "日本"},{"n": "泰国","v": "泰国"},{"n": "新加坡","v": "新加坡"},{"n": "马来西亚","v": "马来西亚"},{"n": "印度","v": "印度"},{"n": "英国","v": "英国"},{"n": "法国","v": "法国"},{"n": "加拿大","v": "加拿大"},{"n": "西班牙","v": "西班牙"},{"n": "俄罗斯","v": "俄罗斯"},{"n": "其它","v": "其它"}]},{"key": "year","name": "时间","value": [{"n": "全部","v": ""},{"n": "2024","v": "2024"},{"n": "2023","v": "2023"},{"n": "2022","v": "2022"},{"n": "2021","v": "2021"},{"n": "2020","v": "2020"},{"n": "2019","v": "2019"},{"n": "2018","v": "2018"},{"n": "2017","v": "2017"},{"n": "2016","v": "2016"},{"n": "2015","v": "2015"},{"n": "2014","v": "2014"},{"n": "2013","v": "2013"},{"n": "2012","v": "2012"},{"n": "2011","v": "2011"},{"n": "2010","v": "2010"},{"n": "2009","v": "2009"},{"n": "2008","v": "2008"},{"n": "2007","v": "2007"},{"n": "2006","v": "2006"},{"n": "2005","v": "2005"},{"n": "2004","v": "2004"},{"n": "2003","v": "2003"},{"n": "2002","v": "2002"},{"n": "2001","v": "2001"},{"n": "2000","v": "2000"},{"n": "1999","v": "1999"},{"n": "1998","v": "1998"},{"n": "1997","v": "1997"},{"n": "1996","v": "1996"},{"n": "1995","v": "1995"},{"n": "1994","v": "1994"},{"n": "1993","v": "1993"},{"n": "1992","v": "1992"},{"n": "1991","v": "1991"},{"n": "1990","v": "1990"},{"n": "1989","v": "1989"},{"n": "1988","v": "1988"},{"n": "1987","v": "1987"},{"n": "1986","v": "1986"},{"n": "1985","v": "1985"},{"n": "1984","v": "1984"},{"n": "1983","v": "1983"}]},{"key": "letter","name": "字母","value": [{"n": "全部","v": ""},{"n": "A","v": "A"},{"n": "B","v": "B"},{"n": "C","v": "C"},{"n": "D","v": "D"},{"n": "E","v": "E"},{"n": "F","v": "F"},{"n": "G","v": "G"},{"n": "H","v": "H"},{"n": "I","v": "I"},{"n": "J","v": "J"},{"n": "K","v": "K"},{"n": "L","v": "L"},{"n": "M","v": "M"},{"n": "N","v": "N"},{"n": "O","v": "O"},{"n": "P","v": "P"},{"n": "Q","v": "Q"},{"n": "R","v": "R"},{"n": "S","v": "S"},{"n": "T","v": "T"},{"n": "U","v": "U"},{"n": "V","v": "V"},{"n": "W","v": "W"},{"n": "X","v": "X"},{"n": "Y","v": "Y"},{"n": "Z","v": "Z"},{"n": "0-9","v": "0-9"}]},{"key": "by","name": "排序","value": [{"n": "全部","v": ""},{"n": "时间","v": "time"},{"n": "人气","v": "hits"},{"n": "评分","v": "score"}]}],"4": [{"key": "cateId","name": "类型","value": [{"n": "全部","v": "4"},{"n": "动漫剧","v": "18"},{"n": "动漫片","v": "19"}]},{"key": "area","name": "地区","value": [{"n": "全部","v": ""},{"n": "大陆","v": "大陆"},{"n": "香港","v": "香港"},{"n": "台湾","v": "台湾"},{"n": "美国","v": "美国"},{"n": "韩国","v": "韩国"},{"n": "日本","v": "日本"},{"n": "泰国","v": "泰国"},{"n": "新加坡","v": "新加坡"},{"n": "马来西亚","v": "马来西亚"},{"n": "印度","v": "印度"},{"n": "英国","v": "英国"},{"n": "法国","v": "法国"},{"n": "加拿大","v": "加拿大"},{"n": "西班牙","v": "西班牙"},{"n": "俄罗斯","v": "俄罗斯"},{"n": "其它","v": "其它"}]},{"key": "year","name": "时间","value": [{"n": "全部","v": ""},{"n": "2024","v": "2024"},{"n": "2023","v": "2023"},{"n": "2022","v": "2022"},{"n": "2021","v": "2021"},{"n": "2020","v": "2020"},{"n": "2019","v": "2019"},{"n": "2018","v": "2018"},{"n": "2017","v": "2017"},{"n": "2016","v": "2016"},{"n": "2015","v": "2015"},{"n": "2014","v": "2014"},{"n": "2013","v": "2013"},{"n": "2012","v": "2012"},{"n": "2011","v": "2011"},{"n": "2010","v": "2010"},{"n": "2009","v": "2009"},{"n": "2008","v": "2008"},{"n": "2007","v": "2007"},{"n": "2006","v": "2006"},{"n": "2005","v": "2005"},{"n": "2004","v": "2004"},{"n": "2003","v": "2003"},{"n": "2002","v": "2002"},{"n": "2001","v": "2001"},{"n": "2000","v": "2000"},{"n": "1999","v": "1999"},{"n": "1998","v": "1998"},{"n": "1997","v": "1997"},{"n": "1996","v": "1996"},{"n": "1995","v": "1995"},{"n": "1994","v": "1994"},{"n": "1993","v": "1993"},{"n": "1992","v": "1992"},{"n": "1991","v": "1991"},{"n": "1990","v": "1990"},{"n": "1989","v": "1989"},{"n": "1988","v": "1988"},{"n": "1987","v": "1987"},{"n": "1986","v": "1986"},{"n": "1985","v": "1985"},{"n": "1984","v": "1984"},{"n": "1983","v": "1983"},{"n": "1982","v": "1982"},{"n": "1981","v": "1981"},{"n": "1980","v": "1980"},{"n": "1979","v": "1979"},{"n": "1978","v": "1978"},{"n": "1977","v": "1977"},{"n": "1976","v": "1976"},{"n": "1975","v": "1975"},{"n": "1974","v": "1974"},{"n": "1973","v": "1973"},{"n": "1972","v": "1972"},{"n": "1971","v": "1971"},{"n": "1970","v": "1970"},{"n": "1969","v": "1969"},{"n": "1968","v": "1968"},{"n": "1967","v": "1967"}]},{"key": "letter","name": "字母","value": [{"n": "全部","v": ""},{"n": "A","v": "A"},{"n": "B","v": "B"},{"n": "C","v": "C"},{"n": "D","v": "D"},{"n": "E","v": "E"},{"n": "F","v": "F"},{"n": "G","v": "G"},{"n": "H","v": "H"},{"n": "I","v": "I"},{"n": "J","v": "J"},{"n": "K","v": "K"},{"n": "L","v": "L"},{"n": "M","v": "M"},{"n": "N","v": "N"},{"n": "O","v": "O"},{"n": "P","v": "P"},{"n": "Q","v": "Q"},{"n": "R","v": "R"},{"n": "S","v": "S"},{"n": "T","v": "T"},{"n": "U","v": "U"},{"n": "V","v": "V"},{"n": "W","v": "W"},{"n": "X","v": "X"},{"n": "Y","v": "Y"},{"n": "Z","v": "Z"},{"n": "0-9","v": "0-9"}]},{"key": "by","name": "排序","value": [{"n": "全部","v": ""},{"n": "时间","v": "time"},{"n": "人气","v": "hits"},{"n": "评分","v": "score"}]}],"26": [{"key": "area","name": "地区","value": [{"n": "全部","v": ""},{"n": "大陆","v": "大陆"},{"n": "香港","v": "香港"},{"n": "台湾","v": "台湾"},{"n": "美国","v": "美国"},{"n": "韩国","v": "韩国"},{"n": "日本","v": "日本"},{"n": "泰国","v": "泰国"},{"n": "新加坡","v": "新加坡"},{"n": "马来西亚","v": "马来西亚"},{"n": "印度","v": "印度"},{"n": "英国","v": "英国"},{"n": "法国","v": "法国"},{"n": "加拿大","v": "加拿大"},{"n": "西班牙","v": "西班牙"},{"n": "俄罗斯","v": "俄罗斯"},{"n": "其它","v": "其它"}]},{"key": "year","name": "时间","value": [{"n": "全部","v": ""},{"n": "2024","v": "2024"},{"n": "2023","v": "2023"},{"n": "2022","v": "2022"},{"n": "2021","v": "2021"},{"n": "2020","v": "2020"}]},{"key": "letter","name": "字母","value": [{"n": "全部","v": ""},{"n": "A","v": "A"},{"n": "B","v": "B"},{"n": "C","v": "C"},{"n": "D","v": "D"},{"n": "E","v": "E"},{"n": "F","v": "F"},{"n": "G","v": "G"},{"n": "H","v": "H"},{"n": "I","v": "I"},{"n": "J","v": "J"},{"n": "K","v": "K"},{"n": "L","v": "L"},{"n": "M","v": "M"},{"n": "N","v": "N"},{"n": "O","v": "O"},{"n": "P","v": "P"},{"n": "Q","v": "Q"},{"n": "R","v": "R"},{"n": "S","v": "S"},{"n": "T","v": "T"},{"n": "U","v": "U"},{"n": "V","v": "V"},{"n": "W","v": "W"},{"n": "X","v": "X"},{"n": "Y","v": "Y"},{"n": "Z","v": "Z"},{"n": "0-9","v": "0-9"}]},{"key": "by","name": "排序","value": [{"n": "全部","v": ""},{"n": "时间","v": "time"},{"n": "人气","v": "hits"},{"n": "评分","v": "score"}]}]}
+
+ def homeContent(self, filter):
+ data = self.getpq()
+ cdata = data('#topnav .swiper-wrapper li')
+ result = {}
+ classes = []
+ videos = []
+ for k in cdata.items():
+ i = k('a').attr('href')
+ if i and 'type' in i and '音乐' not in k.text():
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': i.split('-')[-3],
+ })
+ for i in list(data('.globalPicList').items())[1:]:
+ videos.extend(self.getlist(i('ul li')))
+ result['class'] = classes
+ result['filters'] = self.config
+ result['list'] = videos
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data = self.getpq(
+ f"/vod-list-id-{extend.get('cateId', tid)}-pg-{pg}-order--by-{extend.get('by', 'time')}-class-0-year-{extend.get('year', '')}-letter-{extend.get('letter', '')}-area-{extend.get('area', '')}-lang-.html")
+ result = {}
+ result['list'] = self.getlist(data('.globalPicList .resize_list li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(ids[0])
+ v = data('.numList ul li').eq(0)('a').attr('href')
+ html = self.getpq(v)
+ d = html('.detailPosterIntro script').eq(0).text()
+ mac_from = re.search(r"mac_from='(.*?)'", d)
+ mac_url = re.search(r"mac_url='(.*?)'", d).group(1)
+ z = data('.page-bd')
+ c = z('.desc_item')
+ vod = {
+ 'vod_name': z('h1 a').text(),
+ 'vod_year': c.eq(3)('a').text(),
+ 'vod_remarks': c.eq(0)('font').text(),
+ 'vod_actor': c.eq(1)('a').text(),
+ 'vod_director': c.eq(2)('a').text(),
+ 'vod_content': data('.detail-con p').text().split(':')[-1],
+ 'vod_play_from': mac_from.group(1) if mac_from else '呜呜呜',
+ 'vod_play_url': mac_url
+ }
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data = pq(self.post(f"{self.host}/index.php?m=vod-search", data={'wd': key}, headers=self.headers).text)
+ video = []
+ for k in data('#data_list li').items():
+ video.append({
+ 'vod_id': k('.pic a').attr('href'),
+ 'vod_name': k('.sTit').text(),
+ 'vod_pic': k('.pic img').attr('src'),
+ 'vod_year': k('.sStyle').text(),
+ 'vod_remarks': k('.sDes').eq(-1).text()
+ })
+ return {'list': video, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ if flag == '呜呜呜': raise Exception('未找到播放地址')
+ jxdata = self.getpq(f"/player/{flag}.js").html()
+ jxurl = re.search(r'http.*?url=', jxdata).group()
+ data = self.fetch(f"{jxurl}{id}", headers=self.headers).text
+ matches = re.findall(r'http.*?url=', data)
+ if len(matches):
+ url = []
+ for i, x in enumerate(matches):
+ js = {'jx': x, 'id': id}
+ purl = f"{self.getProxyUrl()}&wdict={self.e64(json.dumps(js))}"
+ url.extend([f'线路{i + 1}', purl])
+ else:
+ url = re.search(r"url='(.*?)'", data).group(1)
+ if not url: raise Exception('未找到播放地址')
+ p = 0
+ except:
+ p, url = 1, id
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ wdict = json.loads(self.d64(param['wdict']))
+ url = f"{wdict['jx']}{wdict['id']}"
+ data = pq(self.fetch(url, headers=self.headers).text)
+ html = data('script').eq(-1).text()
+ url = re.search(r'src="(.*?)"', html).group(1)
+ return [302, 'text/html', None, {'Location': url}]
+
+ def liveContent(self, url):
+ pass
+
+ def gethost(self):
+ data = pq(self.fetch('https://www.nmdvd.com', headers=self.headers).text)
+ hlist = data('a[rel="nofollow"] b').text().split(' ')
+ return self.host_late(hlist)
+
+ def host_late(self, urls):
+ with concurrent.futures.ThreadPoolExecutor() as executor:
+ future_to_url = {
+ executor.submit(self.test_host, f"https://{url}"): f"https://{url}"
+ for url in urls
+ }
+ results = {}
+ for future in concurrent.futures.as_completed(future_to_url):
+ url = future_to_url[future]
+ try:
+ results[url] = future.result()
+ except Exception as e:
+ results[url] = float('inf')
+ min_url = min(results.items(), key=lambda x: x[1])[0] if results else None
+ if all(delay == float('inf') for delay in results.values()) or not min_url:
+ return f"https://{urls[0]}"
+ return min_url
+
+ def test_host(self, url):
+ try:
+ start_time = time.monotonic()
+ response = requests.head(
+ url,
+ timeout=1.0,
+ allow_redirects=False,
+ headers=self.headers
+ )
+ response.raise_for_status()
+ return (time.monotonic() - start_time) * 1000
+ except Exception as e:
+ print(f"测试{url}失败: {str(e)}")
+ return float('inf')
+
+ def getpq(self, path=''):
+ data = self.fetch(f"{self.host}{path}", headers=self.headers).text
+ return pq(data)
+
+ def getlist(self, data):
+ videos = []
+ for k in data.items():
+ i = k('.sBottom')
+ j = i('em').text()
+ i.remove('em')
+ videos.append({
+ 'vod_id': k('a').attr('href'),
+ 'vod_name': k('.sTit').text(),
+ 'vod_pic': k('.pic img').attr('src'),
+ 'vod_year': j,
+ 'vod_remarks': i.text(),
+ })
+ return videos
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
diff --git a/PyramidStore/plugin/html/柯南影视.py b/PyramidStore/plugin/html/柯南影视.py
new file mode 100644
index 0000000..5313945
--- /dev/null
+++ b/PyramidStore/plugin/html/柯南影视.py
@@ -0,0 +1,197 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from urllib.parse import quote, urlparse
+from Crypto.Hash import SHA256
+sys.path.append("..")
+import json
+import time
+from pyquery import PyQuery as pq
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.knvod.com'
+
+ headers = {
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'Sec-Fetch-Dest': 'document',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'Origin': host,
+ 'Referer': f"{host}/",
+ 'Cookie':'X-Robots-Tag=CDN-VERIFY'
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq(self.fetch(self.host,headers=self.headers).text)
+ result = {}
+ classes = []
+ for k in data('.head-more.box a').items():
+ i=k.attr('href')
+ if i and '/show' in i:
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': re.findall(r'\d+', i)[0]
+ })
+ result['class'] = classes
+ result['list']=self.getlist(data('.border-box.public-r .public-list-div'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.getpq(self.fetch(f"{self.host}/show/{tid}--------{pg}---/",headers=self.headers).text)
+ result = {}
+ result['list'] = self.getlist(data('.border-box.public-r .public-list-div'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(self.fetch(f"{self.host}/list/{ids[0]}/", headers=self.headers).text)
+ v=data('.detail-info.lightSpeedIn .slide-info')
+ vod = {
+ 'vod_year': v.eq(-1).text().split(':',1)[-1],
+ 'vod_remarks': v.eq(0),
+ 'vod_actor': v.eq(3).text().split(':',1)[-1],
+ 'vod_director': v.eq(2).text().split(':',1)[-1],
+ 'vod_content': data('.switch-box #height_limit').text()
+ }
+ np=data('.anthology.wow.fadeInUp')
+ ndata=np('.anthology-tab .swiper-wrapper .swiper-slide')
+ pdata=np('.anthology-list .anthology-list-box ul')
+ play,names=[],[]
+ for i in range(len(ndata)):
+ n=ndata.eq(i)('a')
+ n('span').remove()
+ names.append(n.text())
+ vs=[]
+ for v in pdata.eq(i)('li').items():
+ vs.append(f"{v.text()}${v('a').attr('href')}")
+ play.append('#'.join(vs))
+ vod["vod_play_from"] = "$$$".join(names)
+ vod["vod_play_url"] = "$$$".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.fetch(f"{self.host}/index.php/ajax/suggest?mid=1&wd={key}&limit=9999×tamp={int(time.time()*1000)}", headers=self.headers).json()
+ videos=[]
+ for i in data['list']:
+ videos.append({
+ 'vod_id': i['id'],
+ 'vod_name': i['name'],
+ 'vod_pic': i['pic']
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ h={
+ 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.8 Mobile/15E148 Safari/604.1',
+ 'Origin': self.host
+ }
+ data = self.getpq(self.fetch(f"{self.host}{id}", headers=self.headers).text)
+ try:
+ jstr = data('.player-box .player-left script').eq(1).text()
+ jsdata = json.loads(jstr.split('=',1)[-1])
+ url = jsdata.get('url')
+ if not re.search(r'\.m3u8|\.mp4',jsdata['url']):
+ jxd=self.fetch(f"{self.host}/static/player/{jsdata['from']}.js", headers=self.headers).text
+ jx=re.search(r'http.*?url=', jxd)
+ if not jx:raise Exception('未找到jx')
+ parsed_url = urlparse(jx.group())
+ jxhost = parsed_url.scheme + "://" + parsed_url.netloc
+ title=data('head title').eq(0).text().split('-')[0]
+ next=f"{self.host.split('//')[-1]}{jsdata['link_next']}" if jsdata.get('link_next') else ''
+ cd=self.fetch(f"{jx.group()}{jsdata['url']}&next=//{next}&title={quote(title)}", headers=self.headers).text
+ match = re.search(r'var\s+config\s*=\s*(\{[\s\S]*?\})', cd)
+ if not match:raise Exception('未找到config')
+ cm=re.sub(r',\s*}(?=\s*$)', '}', match.group(1))
+ config=json.loads(cm)
+ config.update({'key':self.sha256(f"{self.gettime()}knvod")})
+ config.pop('next',None)
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.8 Mobile/15E148 Safari/604.1',
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'Cache-Control': 'no-cache',
+ 'DNT': '1',
+ 'Origin': jxhost,
+ 'Pragma': 'no-cache',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Sec-Fetch-Storage-Access': 'active',
+ 'X-Requested-With': 'XMLHttpRequest',
+ }
+ h['Origin']=jxhost
+ jd=self.post(f"{jxhost}/post.php", headers=headers, data=json.dumps(config))
+ data=json.loads(jd.content.decode('utf-8-sig'))
+ url=data.get('knvod')
+ p = 0
+ if not url:raise Exception('未找到播放地址')
+ except Exception as e:
+ print('错误信息:',e)
+ p,url=1,f"{self.host}{id}"
+ return {"parse": p, "url": url, "header": h}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self,data):
+ videos=[]
+ for i in data.items():
+ id = i('a').attr('href')
+ if id:
+ id = re.search(r'\d+', id).group(0)
+ img = i('img').attr('data-src')
+ if img and 'url=' in img and 'http' not in img: img = f'{self.host}{img}'
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': i('a').attr('title'),
+ 'vod_pic': img,
+ 'vod_remarks': i('.public-prt').text() or i('.public-list-prb').text()
+ })
+ return videos
+
+ def getpq(self, data):
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def gettime(self):
+ current_time = int(time.time())
+ hourly_timestamp = current_time - (current_time % 3600)
+ return hourly_timestamp
+
+ def sha256(self, text):
+ sha = SHA256.new()
+ sha.update(text.encode())
+ return sha.hexdigest()
diff --git a/PyramidStore/plugin/html/甜圈短剧.py b/PyramidStore/plugin/html/甜圈短剧.py
new file mode 100644
index 0000000..9d1ca26
--- /dev/null
+++ b/PyramidStore/plugin/html/甜圈短剧.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ ahost='https://api.cenguigui.cn'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'DNT': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'Sec-Fetch-Mode': 'no-cors',
+ 'Sec-Fetch-Dest': 'video',
+ 'Sec-Fetch-Storage-Access': 'active',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+
+ def homeContent(self, filter):
+ result = {'class': [{'type_id': '推荐榜', 'type_name': '🔥 推荐榜'},
+ {'type_id': '新剧', 'type_name': '🎬 新剧'},
+ {'type_id': '逆袭', 'type_name': '🎬 逆袭'},
+ {'type_id': '霸总', 'type_name': '🎬 霸总'},
+ {'type_id': '现代言情', 'type_name': '🎬 现代言情'},
+ {'type_id': '打脸虐渣', 'type_name': '🎬 打脸虐渣'},
+ {'type_id': '豪门恩怨', 'type_name': '🎬 豪门恩怨'},
+ {'type_id': '神豪', 'type_name': '🎬 神豪'},
+ {'type_id': '马甲', 'type_name': '🎬 马甲'},
+ {'type_id': '都市日常', 'type_name': '🎬 都市日常'},
+ {'type_id': '战神归来', 'type_name': '🎬 战神归来'},
+ {'type_id': '小人物', 'type_name': '🎬 小人物'},
+ {'type_id': '女性成长', 'type_name': '🎬 女性成长'},
+ {'type_id': '大女主', 'type_name': '🎬 大女主'},
+ {'type_id': '穿越', 'type_name': '🎬 穿越'},
+ {'type_id': '都市修仙', 'type_name': '🎬 都市修仙'},
+ {'type_id': '强者回归', 'type_name': '🎬 强者回归'},
+ {'type_id': '亲情', 'type_name': '🎬 亲情'},
+ {'type_id': '古装', 'type_name': '🎬 古装'},
+ {'type_id': '重生', 'type_name': '🎬 重生'},
+ {'type_id': '闪婚', 'type_name': '🎬 闪婚'},
+ {'type_id': '赘婿逆袭', 'type_name': '🎬 赘婿逆袭'},
+ {'type_id': '虐恋', 'type_name': '🎬 虐恋'},
+ {'type_id': '追妻', 'type_name': '🎬 追妻'},
+ {'type_id': '天下无敌', 'type_name': '🎬 天下无敌'},
+ {'type_id': '家庭伦理', 'type_name': '🎬 家庭伦理'},
+ {'type_id': '萌宝', 'type_name': '🎬 萌宝'},
+ {'type_id': '古风权谋', 'type_name': '🎬 古风权谋'},
+ {'type_id': '职场', 'type_name': '🎬 职场'},
+ {'type_id': '奇幻脑洞', 'type_name': '🎬 奇幻脑洞'},
+ {'type_id': '异能', 'type_name': '🎬 异能'},
+ {'type_id': '无敌神医', 'type_name': '🎬 无敌神医'},
+ {'type_id': '古风言情', 'type_name': '🎬 古风言情'},
+ {'type_id': '传承觉醒', 'type_name': '🎬 传承觉醒'},
+ {'type_id': '现言甜宠', 'type_name': '🎬 现言甜宠'},
+ {'type_id': '奇幻爱情', 'type_name': '🎬 奇幻爱情'},
+ {'type_id': '乡村', 'type_name': '🎬 乡村'},
+ {'type_id': '历史古代', 'type_name': '🎬 历史古代'},
+ {'type_id': '王妃', 'type_name': '🎬 王妃'},
+ {'type_id': '高手下山', 'type_name': '🎬 高手下山'},
+ {'type_id': '娱乐圈', 'type_name': '🎬 娱乐圈'},
+ {'type_id': '强强联合', 'type_name': '🎬 强强联合'},
+ {'type_id': '破镜重圆', 'type_name': '🎬 破镜重圆'},
+ {'type_id': '暗恋成真', 'type_name': '🎬 暗恋成真'},
+ {'type_id': '民国', 'type_name': '🎬 民国'},
+ {'type_id': '欢喜冤家', 'type_name': '🎬 欢喜冤家'},
+ {'type_id': '系统', 'type_name': '🎬 系统'},
+ {'type_id': '真假千金', 'type_name': '🎬 真假千金'},
+ {'type_id': '龙王', 'type_name': '🎬 龙王'},
+ {'type_id': '校园', 'type_name': '🎬 校园'},
+ {'type_id': '穿书', 'type_name': '🎬 穿书'},
+ {'type_id': '女帝', 'type_name': '🎬 女帝'},
+ {'type_id': '团宠', 'type_name': '🎬 团宠'},
+ {'type_id': '年代爱情', 'type_name': '🎬 年代爱情'},
+ {'type_id': '玄幻仙侠', 'type_name': '🎬 玄幻仙侠'},
+ {'type_id': '青梅竹马', 'type_name': '🎬 青梅竹马'},
+ {'type_id': '悬疑推理', 'type_name': '🎬 悬疑推理'},
+ {'type_id': '皇后', 'type_name': '🎬 皇后'},
+ {'type_id': '替身', 'type_name': '🎬 替身'},
+ {'type_id': '大叔', 'type_name': '🎬 大叔'},
+ {'type_id': '喜剧', 'type_name': '🎬 喜剧'},
+ {'type_id': '剧情', 'type_name': '🎬 剧情'}]}
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {
+ 'classname': tid,
+ 'offset': str((int(pg) - 1)),
+ }
+ data = self.fetch(f'{self.ahost}/api/duanju/api.php', params=params, headers=self.headers).json()
+ videos = []
+ for k in data['data']:
+ videos.append({
+ 'vod_id': k.get('book_id'),
+ 'vod_name': k.get('title'),
+ 'vod_pic': k.get('cover'),
+ 'vod_year': k.get('score'),
+ 'vod_remarks': f"{k.get('sub_title')}|{k.get('episode_cnt')}"
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ v=self.fetch(f'{self.ahost}/api/duanju/api.php', params={'book_id': ids[0]}, headers=self.headers).json()
+ vod = {
+ 'type_name': v.get('category'),
+ 'vod_year': v.get('time'),
+ 'vod_remarks': v.get('duration'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '嗷呜爱看短剧',
+ 'vod_play_url': '#'.join([f"{i['title']}${i['video_id']}" for i in v['data']])
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ return self.categoryContent(key, pg, True, {})
+
+ def playerContent(self, flag, id, vipFlags):
+ data=self.fetch(f'{self.ahost}/api/duanju/api.php', params={'video_id': id}, headers=self.headers).json()
+ return {'parse': 0, 'url': data['data']['url'], 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
diff --git a/PyramidStore/plugin/html/红果网页.py b/PyramidStore/plugin/html/红果网页.py
new file mode 100644
index 0000000..fea8d5f
--- /dev/null
+++ b/PyramidStore/plugin/html/红果网页.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.hongguodj.cc'
+
+ headers = {
+ 'Accept': '*/*',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'DNT': '1',
+ 'Origin': host,
+ 'Pragma': 'no-cache',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ classes = []
+ vlist = []
+ data = pq(self.fetch(self.host, headers=self.headers).text)
+ for i in list(data('.slip li').items())[1:]:
+ classes.append({
+ 'type_name': i.text(),
+ 'type_id': re.findall(r'\d+', i('a').attr('href'))[0]
+ })
+ for i in data('.wrap .rows').items():
+ vlist.extend(self.getlist(i('li')))
+ result['class'] = classes
+ result['list'] = vlist
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=pq(self.fetch(f'{self.host}/type/{tid}-{pg}.html', headers=self.headers).text)
+ result = {}
+ result['list'] = self.getlist(data('.list ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=pq(self.fetch(f'{self.host}{ids[0]}', headers=self.headers).text)
+ v=data('.info')
+ p=v('p')
+ vod = {
+ 'vod_name': v('h1').text(),
+ 'type_name': p.eq(2).text(),
+ 'vod_year': p.eq(3).text(),
+ 'vod_area': p.eq(4).text(),
+ 'vod_remarks': v('em').text(),
+ 'vod_actor': p.eq(0).text(),
+ 'vod_director': p.eq(1).text(),
+ 'vod_content': data('#desc .text').text(),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ names = [i.text() for i in data('.title.slip a').items()]
+ plist=[]
+ for i in data('.play-list ul').items():
+ plist.append('#'.join([f'{j("a").text()}${j("a").attr("href")}' for j in i('li').items()]))
+ vod['vod_play_from'] = '$$$'.join(names)
+ vod['vod_play_url'] = '$$$'.join(plist)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=pq(self.fetch(f'{self.host}/search/{key}----------{pg}---.html', headers=self.headers).text)
+ return {'list': self.getlist(data('.show.rows li')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ p=0
+ uid=f'{self.host}{id}'
+ data=pq(self.fetch(uid, headers=self.headers).text)
+ url=data('.video.ratio').attr('data-play')
+ if not url:
+ url = uid
+ p = 1
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+ def getlist(self,data):
+ vlist = []
+ for j in data.items():
+ vlist.append({
+ 'vod_id': j('a').attr('href'),
+ 'vod_name': j('img').attr('alt'),
+ 'vod_pic': self.host + j('img').attr('data-src'),
+ 'vod_year': j('.bg').text(),
+ 'vod_remarks': j('p').text()
+ })
+ return vlist
+
+
diff --git a/PyramidStore/plugin/html/绝对影视.py b/PyramidStore/plugin/html/绝对影视.py
new file mode 100644
index 0000000..820abf2
--- /dev/null
+++ b/PyramidStore/plugin/html/绝对影视.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import base64
+import re
+import sys
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import unpad
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://www.jdys.art'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'dnt': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'origin': host,
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'referer': f'{host}/',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'priority': 'u=1, i',
+ }
+
+ def homeContent(self, filter):
+ data = self.getpq(self.fetch(self.host, headers=self.headers).text)
+ result = {}
+ classes = []
+ for k in list(data('.navtop .navlist li').items())[:9]:
+ classes.append({
+ 'type_name': k('a').text(),
+ 'type_id': k('a').attr('href'),
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('.mi_btcon .bt_img ul li'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data = self.getpq(self.fetch(f"{tid}{'' if pg == '1' else f'page/{pg}/'}", headers=self.headers).text)
+ result = {}
+ result['list'] = self.getlist(data('.mi_cont .bt_img ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(self.fetch(ids[0], headers=self.headers).text)
+ data2 = data('.moviedteail_list li')
+ vod = {
+ 'vod_name': data('.dytext h1').text(),
+ 'type_name': data2.eq(0).text(),
+ 'vod_year': data2.eq(2).text(),
+ 'vod_area': data2.eq(1).text(),
+ 'vod_remarks': data2.eq(4).text(),
+ 'vod_actor': data2.eq(7).text(),
+ 'vod_director': data2.eq(5).text(),
+ 'vod_content': data('.yp_context').text().strip()
+ }
+ vdata = data('.paly_list_btn a')
+ play = []
+ for i in vdata.items():
+ a = i.text() + "$" + i.attr.href
+ play.append(a)
+ vod["vod_play_from"] = "在线播放"
+ vod["vod_play_url"] = "#".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.getpq(self.fetch(f"{self.host}/page/{pg}/?s={key}", headers=self.headers).text)
+ return {'list': self.getlist(data('.mi_cont .bt_img ul li')), 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ data = self.getpq(self.fetch(id, headers=self.headers).text)
+ try:
+ sc = data('.videoplay script').eq(-1).text()
+ strd = re.findall(r'var\s+[^=]*=\s*"([^"]*)";', sc)
+ kdata = re.findall(r'parse\((.*?)\);', sc)
+ jm = self.aes(strd[0], kdata[0].replace('"', ''), kdata[1].replace('"', ''))
+ url = re.search(r'url: "(.*?)"', jm).group(1)
+ p = 0
+ except:
+ p = 1
+ url = id
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = self.headers
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getpq(self, text):
+ try:
+ return pq(text)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(text.encode('utf-8'))
+
+ def getlist(self, data):
+ videos = []
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('a img').attr('alt'),
+ 'vod_pic': i('a img').attr('src'),
+ 'vod_remarks': i('.dycategory').text(),
+ 'vod_year': i('.dyplayinfo').text() or i('.rating').text(),
+ })
+ return videos
+
+ def aes(self, word, key, iv):
+ key = key.encode('utf-8')
+ iv = iv.encode('utf-8')
+ encrypted_data = base64.b64decode(word)
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted_data = cipher.decrypt(encrypted_data)
+ decrypted_data = unpad(decrypted_data, AES.block_size)
+ return decrypted_data.decode('utf-8')
diff --git a/PyramidStore/plugin/html/金牌.py b/PyramidStore/plugin/html/金牌.py
new file mode 100644
index 0000000..be3b628
--- /dev/null
+++ b/PyramidStore/plugin/html/金牌.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import threading
+import uuid
+import requests
+sys.path.append('..')
+from base.spider import Spider
+import time
+from Crypto.Hash import MD5, SHA1
+
+class Spider(Spider):
+ '''
+ 配置示例:
+ {
+ "key": "xxxx",
+ "name": "xxxx",
+ "type": 3,
+ "api": ".所在路径/金牌.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "changeable": 1,
+ "ext": {
+ "site": "https://www.jiabaide.cn,域名2,域名3"
+ }
+ },
+ '''
+ def init(self, extend=""):
+ if extend:
+ hosts=json.loads(extend)['site']
+ self.host = self.host_late(hosts)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
+ fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
+ result = {}
+ classes = []
+ filters={}
+ for k in cdata['data']:
+ classes.append({
+ 'type_name': k['typeName'],
+ 'type_id': str(k['typeId']),
+ })
+ sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
+ for tid, d in fdata['data'].items():
+ current_sort_values = sort_values.copy()
+ if tid == '1':
+ del current_sort_values[0]
+ filters[tid] = [
+ {"key": "type", "name": "类型",
+ "value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
+
+ *([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
+ "value": [{"n": i["itemText"], "v": i["itemText"]}
+ for i in d["plotList"]]}]),
+
+ {"key": "area", "name": "地区",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
+
+ {"key": "year", "name": "年份",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
+
+ {"key": "lang", "name": "语言",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
+
+ {"key": "sort", "name": "排序", "value": current_sort_values}
+ ]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
+ data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
+ data=[]
+ for i in data1['data'].values():
+ data.extend(i['list'])
+ data.extend(data2['data'])
+ vods=self.getvod(data)
+ return {'list':vods}
+
+ def categoryContent(self, tid, pg, filter, extend):
+
+ params = {
+ "area": extend.get('area', ''),
+ "filterStatus": "1",
+ "lang": extend.get('lang', ''),
+ "pageNum": pg,
+ "pageSize": "30",
+ "sort": extend.get('sort', '1'),
+ "sortBy": "1",
+ "type": extend.get('type', ''),
+ "type1": tid,
+ "v_class": extend.get('v_class', ''),
+ "year": extend.get('year', '')
+ }
+ data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
+ result = {}
+ result['list'] = self.getvod(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
+ vod=self.getvod([data['data']])[0]
+ vod['vod_play_from']='嗷呜有金牌'
+ vod['vod_play_url'] = '#'.join(
+ f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
+ vod['episodelist'])
+ vod.pop('episodelist', None)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {
+ "keyword": key,
+ "pageNum": pg,
+ "pageSize": "8",
+ "sourceCode": "1"
+ }
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
+ vods=self.getvod(data['data']['result']['list'])
+ return {'list':vods,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ self.header = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'DNT': '1',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'Origin': self.host,
+ 'Referer': f'{self.host}/'
+ }
+ ids=id.split('@@')
+ pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
+ vlist=[]
+ for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
+ return {'parse':0,'url':vlist,'header':self.header}
+
+ def localProxy(self, param):
+ pass
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def md5(self, sign_key):
+ md5_hash = MD5.new()
+ md5_hash.update(sign_key.encode('utf-8'))
+ md5_result = md5_hash.hexdigest()
+ return md5_result
+
+ def js(self, param):
+ return '&'.join(f"{k}={v}" for k, v in param.items())
+
+ def getheaders(self, param=None):
+ if param is None:param = {}
+ t=str(int(time.time()*1000))
+ param['key']='cb808529bae6b6be45ecfab29a4889bc'
+ param['t']=t
+ sha1_hash = SHA1.new()
+ sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
+ sign = sha1_hash.hexdigest()
+ deviceid = str(uuid.uuid4())
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'sign': sign,
+ 't': t,
+ 'deviceid':deviceid
+ }
+ return headers
+
+ def convert_field_name(self, field):
+ field = field.lower()
+ if field.startswith('vod') and len(field) > 3:
+ field = field.replace('vod', 'vod_')
+ if field.startswith('type') and len(field) > 4:
+ field = field.replace('type', 'type_')
+ return field
+
+ def getvod(self, array):
+ return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]
+
diff --git a/PyramidStore/plugin/html/骚火电影.py b/PyramidStore/plugin/html/骚火电影.py
new file mode 100644
index 0000000..788ae32
--- /dev/null
+++ b/PyramidStore/plugin/html/骚火电影.py
@@ -0,0 +1,218 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import re
+import sys
+from urllib.parse import urlparse
+import base64
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.host=self.gethost()
+ self.headers.update({'referer': f'{self.host}/'})
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = {
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'sec-ch-ua-platform': '"Android"',
+ 'user-agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq()
+ result = {}
+ classes = []
+ filters = {"1": {"name": "类型","key": "tid","value": [{"n": "喜剧","v": 6},{"n": "爱情","v": 7},{"n": "恐怖","v": 8},{"n": "动作","v": 9},{"n": "科幻","v": 10},{"n": "战争","v": 11},{"n": "犯罪","v": 12},{"n": "动画","v": 13},{"n": "奇幻","v": 14},{"n": "剧情","v": 15},{"n": "冒险","v": 16},{"n": "悬疑","v": 17},{"n": "惊悚","v": 18},{"n": "其它","v": 19}]},"2": {"name": "类型","key": "tid","value": [{"n": "大陆剧","v": 20},{"n": "港剧","v": 21},{"n": "韩剧","v": 22},{"n": "美剧","v": 23},{"n": "日剧","v": 24},{"n": "英剧","v": 25},{"n": "台剧","v": 26},{"n": "其它","v": 27}]}}
+ for k in data('.top_bar.clearfix a').items():
+ j = k.attr('href')
+ if j and 'list' in j:
+ id = re.search(r'\d+', j).group(0)
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': id
+ })
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = self.getlist(data('.grid_box ul li'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.getpq(f"/list/{extend.get('tid',tid)}-{pg}.html")
+ result = {}
+ result['list'] = self.getlist(data('.grid_box ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.getpq(ids[0])
+ vod = {
+ 'vod_remarks': data('.grid_box.v_info_box p').text(),
+ 'vod_content': data('.p_txt.show_part').text().split('\n')[0],
+ }
+ n=list(data('.play_from ul li').items())
+ p=list(data('ul.play_list li').items())
+ ns,ps=[],[]
+ for i,j in enumerate(n):
+ ns.append(j.text())
+ ps.append('#'.join([f"{k.text()}${k.attr('href')}" for k in list(p[i]('a').items())[::-1]]))
+ vod['vod_play_from']='$$$'.join(ns)
+ vod['vod_play_url']='$$$'.join(ps)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ data=self.getpq(id)
+ try:
+ surl=data('section[style*="padding-top"] iframe').eq(0).attr('src')
+ sd=pq(self.fetch(surl,headers=self.headers).text)('body script').html()
+ jdata=self.extract_values(sd)
+ jdata['key']=self.hhh(jdata['key'])
+ parsed_url = urlparse(surl)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ headers = {
+ 'accept': 'application/json, text/javascript, */*; q=0.01',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
+ 'dnt': '1',
+ 'origin': durl,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f'{surl}',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'sec-ch-ua-mobile': '?1',
+ 'sec-ch-ua-platform': '"Android"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'sec-fetch-storage-access': 'active',
+ 'user-agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'x-requested-with': 'XMLHttpRequest',
+ }
+ jjb=self.post(f"{durl}/api.php",headers=headers,data=jdata).json()
+ url,p=jjb['url'],0
+ except Exception as e:
+ self.log(f"失败: {e}")
+ url,p=f'{self.host}{id}',1
+ phd={
+ 'User-Agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"Android"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="130", "Google Chrome";v="130"',
+ 'sec-fetch-dest': 'video',
+ 'referer': f'{self.host}/',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+ return {'parse': p, 'url': url, 'header': phd}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def gethost(self):
+ data=pq(self.fetch("http://shapp.us",headers=self.headers).text)
+ for i in data('.content-top ul li').items():
+ h=i('a').attr('href')
+ if h:
+ data = self.fetch(h, headers=self.headers, timeout=5)
+ if data.status_code == 200:
+ return h
+
+ def extract_values(self, text):
+ url_match = re.search(r'var url = "([^"]+)"', text)
+ url = url_match.group(1) if url_match else None
+ t_match = re.search(r'var t = "([^"]+)"', text)
+ t = t_match.group(1) if t_match else None
+ key_match = re.search(r'var key = hhh\("([^"]+)"\)', text)
+ key_param = key_match.group(1) if key_match else None
+ act_match = re.search(r'var act = "([^"]+)"', text)
+ act = act_match.group(1) if act_match else None
+ play_match = re.search(r'var play = "([^"]+)"', text)
+ play = play_match.group(1) if play_match else None
+ return {
+ "url": url,
+ "t": t,
+ "key": key_param,
+ "act": act,
+ "play": play
+ }
+
+ def getlist(self,data):
+ videos = []
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('a').attr('title'),
+ 'vod_pic': i('a img').attr('data-original'),
+ 'vod_remarks': i('.v_note').text()
+ })
+ return videos
+
+ def getpq(self, path=''):
+ data=self.fetch(f"{self.host}{path}",headers=self.headers).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def hhh(self, t):
+ ee = {
+ "0Oo0o0O0": "a", "1O0bO001": "b", "2OoCcO2": "c", "3O0dO0O3": "d",
+ "4OoEeO4": "e", "5O0fO0O5": "f", "6OoGgO6": "g", "7O0hO0O7": "h",
+ "8OoIiO8": "i", "9O0jO0O9": "j", "0OoKkO0": "k", "1O0lO0O1": "l",
+ "2OoMmO2": "m", "3O0nO0O3": "n", "4OoOoO4": "o", "5O0pO0O5": "p",
+ "6OoQqO6": "q", "7O0rO0O7": "r", "8OoSsO8": "s", "9O0tO0O9": "t",
+ "0OoUuO0": "u", "1O0vO0O1": "v", "2OoWwO2": "w", "3O0xO0O3": "x",
+ "4OoYyO4": "y", "5O0zO0O5": "z", "0OoAAO0": "A", "1O0BBO1": "B",
+ "2OoCCO2": "C", "3O0DDO3": "D", "4OoEEO4": "E", "5O0FFO5": "F",
+ "6OoGGO6": "G", "7O0HHO7": "H", "8OoIIO8": "I", "9O0JJO9": "J",
+ "0OoKKO0": "K", "1O0LLO1": "L", "2OoMMO2": "M", "3O0NNO3": "N",
+ "4OoOOO4": "O", "5O0PPO5": "P", "6OoQQO6": "Q", "7O0RRO7": "R",
+ "8OoSSO8": "S", "9O0TTO9": "T", "0OoUO0": "U", "1O0VVO1": "V",
+ "2OoWWO2": "W", "3O0XXO3": "X", "4OoYYO4": "Y", "5O0ZZO5": "Z"
+ }
+ n = ""
+ o = base64.b64decode(t).decode('utf-8', errors='replace')
+ i = 0
+ while i < len(o):
+ l = o[i]
+ found = False
+ for key, value in ee.items():
+ if o[i:i + len(key)] == key:
+ l = value
+ i += len(key) - 1
+ found = True
+ break
+ if not found:
+ pass
+ n += l
+ i += 1
+ return n
diff --git a/PyramidStore/plugin/official/优.py b/PyramidStore/plugin/official/优.py
new file mode 100644
index 0000000..399d0db
--- /dev/null
+++ b/PyramidStore/plugin/official/优.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import time
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from urllib.parse import quote
+from Crypto.Hash import MD5
+import requests
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.session = requests.Session()
+ self.session.headers.update(self.headers)
+ self.session.cookies.update(self.cookie)
+ self.get_ctoken()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.youku.com'
+
+ shost='https://search.youku.com'
+
+ h5host='https://acs.youku.com'
+
+ ihost='https://v.youku.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)',
+ 'Referer': f'{host}/'
+ }
+
+ cookie={
+ "__ysuid": "17416134165380iB",
+ "__aysid": "1741613416541WbD",
+ "xlly_s": "1",
+ "isI18n": "false",
+ "cna": "bNdVIKmmsHgCAXW9W6yrQ1/s",
+ "__ayft": "1741672162330",
+ "__arpvid": "1741672162331FBKgrn-1741672162342",
+ "__ayscnt": "1",
+ "__aypstp": "1",
+ "__ayspstp": "3",
+ "tfstk": "gZbiib4JpG-6DqW-B98_2rwPuFrd1fTXQt3vHEp4YpJIBA3OgrWcwOi90RTOo9XVQ5tAM5NcK_CP6Ep97K2ce1XDc59v3KXAgGFLyzC11ET2n8U8yoyib67M3xL25e8gS8pbyzC1_ET4e8URWTsSnHv2uh8VTeJBgEuN3d-ELQAWuKWV36PHGpJ2uEWVTxvicLX1ewyUXYSekxMf-CxMEqpnoqVvshvP_pABOwvXjL5wKqeulm52np_zpkfCDGW9Ot4uKFIRwZtP7vP9_gfAr3KEpDWXSIfWRay-DHIc_Z-hAzkD1i5Ooi5LZ0O5YO_1mUc476YMI3R6xzucUnRlNe_zemKdm172xMwr2L7CTgIkbvndhFAVh3_YFV9Ng__52U4SQKIdZZjc4diE4EUxlFrfKmiXbBOHeP72v7sAahuTtWm78hRB1yV3tmg9bBOEhWVnq5KwOBL5."
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ categories = ["电视剧", "电影", "综艺", "动漫", "少儿", "纪录片", "文化", "亲子", "教育", "搞笑", "生活",
+ "体育", "音乐", "游戏"]
+ classes = [{'type_name': category, 'type_id': category} for category in categories]
+ filters = {}
+ self.typeid = {}
+ with ThreadPoolExecutor(max_workers=len(categories)) as executor:
+ tasks = {
+ executor.submit(self.cf, {'type': category}, True): category
+ for category in categories
+ }
+
+ for future in as_completed(tasks):
+ try:
+ category = tasks[future]
+ session, ft = future.result()
+ filters[category] = ft
+ self.typeid[category] = session
+ except Exception as e:
+ print(f"处理分类 {tasks[future]} 时出错: {str(e)}")
+
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ try:
+ vlist = []
+ params={"ms_codes":"2019061000","params":"{\"debug\":0,\"gray\":0,\"pageNo\":1,\"utdid\":\"ZYmGMAAAACkDAMU8hbiMmYdd\",\"userId\":\"\",\"bizKey\":\"YOUKU_WEB\",\"appPackageKey\":\"com.youku.YouKu\",\"showNodeList\":0,\"reqSubNode\":0,\"nodeKey\":\"WEBHOME\",\"bizContext\":\"{\\\"spmA\\\":\\\"a2hja\\\"}\"}","system_info":"{\"device\":\"pcweb\",\"os\":\"pcweb\",\"ver\":\"1.0.0.0\",\"userAgent\":\"Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)\",\"guid\":\"1590141704165YXe\",\"appPackageKey\":\"com.youku.pcweb\",\"young\":0,\"brand\":\"\",\"network\":\"\",\"ouid\":\"\",\"idfa\":\"\",\"scale\":\"\",\"operator\":\"\",\"resolution\":\"\",\"pid\":\"\",\"childGender\":0,\"zx\":0}"}
+ data=self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.home.query/1.0/',params)
+ okey=list(data['data'].keys())[0]
+ for i in data['data'][okey]['data']['nodes'][0]['nodes'][-1]['nodes'][0]['nodes']:
+ if i.get('nodes') and i['nodes'][0].get('data'):
+ i=i['nodes'][0]['data']
+ if i.get('assignId'):
+ vlist.append({
+ 'vod_id': i['assignId'],
+ 'vod_name': i.get('title'),
+ 'vod_pic': i.get('vImg') or i.get('img'),
+ 'vod_year': i.get('mark',{}).get('data',{}).get('text'),
+ 'vod_remarks': i.get('summary')
+ })
+ return {'list': vlist}
+ except Exception as e:
+ print(f"处理主页视频数据时出错: {str(e)}")
+ return {'list': []}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ vlist = []
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ pagecount = 9999
+ params = {'type': tid}
+ id = self.typeid[tid]
+ params.update(extend)
+ if pg == '1':
+ id=self.cf(params)
+ data=self.session.get(f'{self.host}/category/data?session={id}¶ms={quote(json.dumps(params))}&pageNo={pg}').json()
+ try:
+ data=data['data']['filterData']
+ for i in data['listData']:
+ if i.get('videoLink') and 's=' in i['videoLink']:
+ vlist.append({
+ 'vod_id': i.get('videoLink').split('s=')[-1],
+ 'vod_name': i.get('title'),
+ 'vod_pic': i.get('img'),
+ 'vod_year': i.get('rightTagText'),
+ 'vod_remarks': i.get('summary')
+ })
+ self.typeid[tid]=quote(json.dumps(data['session']))
+ except:
+ pagecount=pg
+ result['list'] = vlist
+ result['pagecount'] = pagecount
+ return result
+
+ def detailContent(self, ids):
+ try:
+ data=self.session.get(f'{self.ihost}/v_getvideo_info/?showId={ids[0]}').json()
+ v=data['data']
+ vod = {
+ 'type_name': v.get('showVideotype'),
+ 'vod_year': v.get('lastUpdate'),
+ 'vod_remarks': v.get('rc_title'),
+ 'vod_actor': v.get('_personNameStr'),
+ 'vod_content': v.get('showdesc'),
+ 'vod_play_from': '优酷',
+ 'vod_play_url': ''
+ }
+ params={"biz":"new_detail_web2","videoId":v.get('vid'),"scene":"web_page","componentVersion":"3","ip":data.get('ip'),"debug":0,"utdid":"ZYmGMAAAACkDAMU8hbiMmYdd","userId":0,"platform":"pc","nextSession":"","gray":0,"source":"pcNoPrev","showId":ids[0]}
+ sdata,index=self.getinfo(params)
+ pdata=sdata['nodes']
+ if index > len(pdata):
+ batch_size = len(pdata)
+ total_batches = ((index + batch_size - 1) // batch_size) - 1
+ ssj = json.loads(sdata['data']['session'])
+ with ThreadPoolExecutor(max_workers=total_batches) as executor:
+ futures = []
+ for batch in range(total_batches):
+ start = batch_size + 1 + (batch * batch_size)
+ end = start + batch_size - 1
+ next_session = ssj.copy()
+ next_session.update({
+ "itemStartStage": start,
+ "itemEndStage": min(end, index)
+ })
+ current_params = params.copy()
+ current_params['nextSession'] = json.dumps(next_session)
+ futures.append((start, executor.submit(self.getvinfo, current_params)))
+ futures.sort(key=lambda x: x[0])
+
+ for _, future in futures:
+ try:
+ result = future.result()
+ pdata.extend(result['nodes'])
+ except Exception as e:
+ print(f"Error fetching data: {str(e)}")
+ vod['vod_play_url'] = '#'.join([f"{i['data'].get('title')}${i['data']['action'].get('value')}" for i in pdata])
+ return {'list': [vod]}
+ except Exception as e:
+ print(e)
+ return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'呜呜呜${self.host}'}]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.session.get(f'{self.shost}/api/search?pg={pg}&keyword={key}').json()
+ vlist = []
+ for i in data['pageComponentList']:
+ if i.get('commonData') and (i['commonData'].get('showId') or i['commonData'].get('realShowId')):
+ i=i['commonData']
+ vlist.append({
+ 'vod_id': i.get('showId') or i.get('realShowId'),
+ 'vod_name': i['titleDTO'].get('displayName'),
+ 'vod_pic': i['posterDTO'].get('vThumbUrl'),
+ 'vod_year': i.get('feature'),
+ 'vod_remarks': i.get('updateNotice')
+ })
+ return {'list': vlist, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'jx':1,'parse': 1, 'url': f"{self.ihost}/video?vid={id}", 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def cf(self,params,b=False):
+ response = self.session.get(f'{self.host}/category/data?params={quote(json.dumps(params))}&optionRefresh=1&pageNo=1').json()
+ data=response['data']['filterData']
+ session=quote(json.dumps(data['session']))
+ if b:
+ return session,self.get_filter_data(data['filter']['filterData'][1:])
+ return session
+
+ def process_key(self, key):
+ if '_' not in key:
+ return key
+ parts = key.split('_')
+ result = parts[0]
+ for part in parts[1:]:
+ if part:
+ result += part[0].upper() + part[1:]
+ return result
+
+ def get_filter_data(self, data):
+ result = []
+ try:
+ for item in data:
+ if not item.get('subFilter'):
+ continue
+ first_sub = item['subFilter'][0]
+ if not first_sub.get('filterType'):
+ continue
+ filter_item = {
+ 'key': self.process_key(first_sub['filterType']),
+ 'name': first_sub['title'],
+ 'value': []
+ }
+ for sub in item['subFilter']:
+ if 'value' in sub:
+ filter_item['value'].append({
+ 'n': sub['title'],
+ 'v': sub['value']
+ })
+ if filter_item['value']:
+ result.append(filter_item)
+
+ except Exception as e:
+ print(f"处理筛选数据时出错: {str(e)}")
+
+ return result
+
+ def get_ctoken(self):
+ data=self.session.get(f'{self.h5host}/h5/mtop.ykrec.recommendservice.recommend/1.0/?jsv=2.6.1&appKey=24679788')
+
+ def md5(self,t,text):
+ h = MD5.new()
+ token=self.session.cookies.get('_m_h5_tk').split('_')[0]
+ data=f"{token}&{t}&24679788&{text}"
+ h.update(data.encode('utf-8'))
+ return h.hexdigest()
+
+ def getdata(self, url, params, recursion_count=0, max_recursion=3):
+ data = json.dumps(params)
+ t = int(time.time() * 1000)
+ jsdata = {
+ 'appKey': '24679788',
+ 't': t,
+ 'sign': self.md5(t, data),
+ 'data': data
+ }
+ response = self.session.get(url, params=jsdata)
+ if '令牌过期' in response.text:
+ if recursion_count >= max_recursion:
+ raise Exception("达到最大递归次数,无法继续请求")
+ self.get_ctoken()
+ return self.getdata(url, params, recursion_count + 1, max_recursion)
+ else:
+ return response.json()
+
+ def getvinfo(self,params):
+ body = {
+ "ms_codes": "2019030100",
+ "params": json.dumps(params),
+ "system_info": "{\"os\":\"iku\",\"device\":\"iku\",\"ver\":\"9.2.9\",\"appPackageKey\":\"com.youku.iku\",\"appPackageId\":\"pcweb\"}"
+ }
+ data = self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.gateway.new.execute/1.0/', body)
+ okey = list(data['data'].keys())[0]
+ i = data['data'][okey]['data']
+ return i
+
+ def getinfo(self,params):
+ i = self.getvinfo(params)
+ jdata=i['nodes'][0]['nodes'][3]
+ info=i['data']['extra']['episodeTotal']
+ if i['data']['extra']['showCategory'] in ['电影','游戏']:
+ jdata = i['nodes'][0]['nodes'][4]
+ return jdata,info
+
diff --git a/PyramidStore/plugin/official/爱.py b/PyramidStore/plugin/official/爱.py
new file mode 100644
index 0000000..7574897
--- /dev/null
+++ b/PyramidStore/plugin/official/爱.py
@@ -0,0 +1,249 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import sys
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from urllib.parse import quote
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = 'f8da348e186e6ee574d647918f5a7114'
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost = 'https://www.iqiyi.com'
+
+ hhost='https://mesh.if.iqiyi.com'
+
+ dhost='https://miniapp.iqiyi.com'
+
+ headers = {
+ 'Origin': rhost,
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
+ 'Referer': f'{rhost}/',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "全部": "1009",
+ "电影": "1",
+ "剧集": "2",
+ "综艺": "6",
+ "动漫": "4",
+ "儿童": "15",
+ "微剧": "35",
+ "纪录片": "3"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.hhost}/portal/lw/v5/channel/recommend?v=13.014.21150', headers=self.headers).json()
+ vlist = []
+ for i in data['items'][1:]:
+ for j in i['video'][0]['data']:
+ id = j.get('firstId')
+ pic=j.get('prevue',{}).get('image_url') or j.get('album_image_url_hover')
+ if id and pic:
+ pu=j.get('prevue',{}).get('page_url') or j.get('page_url').split('?')[0]
+ id = f'{id}@{self.e64(pu)}'
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': j.get('display_name'),
+ 'vod_pic': pic,
+ 'vod_year': j.get('sns_score'),
+ 'vod_remarks': j.get('dq_updatestatus') or j.get('rank_prefix')
+ })
+ return {'list':vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ if pg == "1":
+ self.sid = ''
+ new_data = {'mode':'24'}
+ for key, value in extend.items():
+ if value:
+ key_value_pairs = self.d64(value).split(',')
+ for pair in key_value_pairs:
+ k, v = pair.split('=')
+ if k in new_data:
+ new_data[k] += "," + v
+ else:
+ new_data[k] = v
+ path=f"/portal/lw/videolib/data?uid=&passport_id=&ret_num=60&version=13.034.21571&device_id={self.did}&channel_id={tid}&page_id={pg}&session={self.sid}&os=&conduit_id=&vip=0&auth=&recent_selected_tag=&ad=%5B%7B%22lm%22%3A%225%22%2C%22ai%22%3A%225%22%2C%22fp%22%3A%226%22%2C%22sei%22%3A%22S78ff51b694677e17af4b19368dadb7bd%22%2C%22position%22%3A%22library%22%7D%5D&adExt=%7B%22r%22%3A%221.2.1-ares6-pure%22%7D&dfp=a00b3c577e541c41149be7cde9320500b0a11307e61a8445448f7f4a9e895ced0f&filter={quote(json.dumps(new_data))}"
+ data=self.fetch(f'{self.hhost}{path}', headers=self.headers).json()
+ self.sid = data['session']
+ videos = []
+ for i in data['data']:
+ id = i.get('firstId') or i.get('tv_id')
+ if not id:
+ id=i.get('play_url').split(';')[0].split('=')[-1]
+ if id and not i.get('h'):
+ id=f'{id}@{self.e64(i.get("page_url"))}'
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': i.get('display_name'),
+ 'vod_pic': i.get('album_image_url_hover'),
+ 'vod_year': i.get('sns_score'),
+ 'vod_remarks': i.get('dq_updatestatus') or i.get('pay_mark')
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@')
+ ids[-1] = self.d64(ids[-1])
+ data = self.fetch(f'{self.dhost}/h5/mina/baidu/play/body/v1/{ids[0]}/', headers=self.headers).json()
+ v=data['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('albumName'),
+ 'type_name': v.get('tags'),
+ 'vod_year': v.get('albumYear'),
+ 'vod_remarks': v.get('updateStrategy'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('albumDesc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': ''
+ }
+ if data.get('data') and data['data'].get('videoList') and data['data']['videoList'].get('videos'):
+ purl=[f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ pg=data['data']['videoList'].get('totalPages')
+ if pg and pg > 1:
+ id = v['albumId']
+ pages = list(range(2, pg + 1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, id): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ purl.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(purl)
+ else:
+ vdata=self.fetch(f'{self.dhost}/h5/mina/baidu/play/head/v1/{ids[0]}/', headers=self.headers).json()
+ v=vdata['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('shortTitle'),
+ 'type_name': v.get('channelName'),
+ 'vod_year': v.get('year'),
+ 'vod_remarks': v.get('focus'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': f'{v.get("shortTitle")}${ids[-1]}'
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.hhost}/portal/lw/search/homePageV3?key={key}¤t_page={pg}&mode=1&source=input&suggest=&version=13.014.21150&pageNum={pg}&pageSize=25&pu=&u={self.did}&scale=150&token=&userVip=0&conduit=&vipType=-1&os=&osShortName=win10&dataType=&appMode=', headers=self.headers).json()
+ videos = []
+ vdata=data['data']['templates']
+ for i in data['data']['templates']:
+ if i.get('intentAlbumInfos'):
+ vdata=[{'albumInfo': c} for c in i['intentAlbumInfos']]+vdata
+
+ for i in vdata:
+ if i.get('albumInfo') and (i['albumInfo'].get('playQipuId','') or i['albumInfo'].get('qipuId')) and i['albumInfo'].get('pageUrl'):
+ b=i['albumInfo']
+ id=f"{(b.get('playQipuId','') or b.get('qipuId'))}@{self.e64(b.get('pageUrl'))}"
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': b.get('title'),
+ 'vod_pic': b.get('img'),
+ 'vod_year': (b.get('year',{}) or {}).get('value'),
+ 'vod_remarks': b.get('subscriptContent') or b.get('channel') or b.get('vipTips')
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ id=id.replace('http://m.','https://www.')
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def fetch_page_data(self, page, id):
+ try:
+ url = f'{self.dhost}/h5/mina/avlist/{page}/{id}/'
+ data = self.fetch(url, headers=self.headers).json()
+ return [f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ except:
+ return []
+
+ def getf(self,body):
+ data=self.fetch(f'{self.hhost}/portal/lw/videolib/tag?channel_id={body["type_id"]}&tagAdd=&selected_tag_name=&version=13.014.21150&device={self.did}&uid=', headers=self.headers).json()
+ ft = []
+ # for i in data[:-1]:
+ for i in data:
+ try:
+ value_array = [{"n": value['text'], "v": self.e64(value['tag_param'])} for value in i['tags'] if
+ value.get('tag_param')]
+ ft.append({"key": i['group'], "name": i['group'], "value": value_array})
+ except:
+ print(i)
+ return (body['type_id'], ft)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text: str):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def random_str(self,length=16):
+ hex_chars = '0123456789abcdef'
+ return ''.join(random.choice(hex_chars) for _ in range(length))
diff --git a/PyramidStore/plugin/official/腾.py b/PyramidStore/plugin/official/腾.py
new file mode 100644
index 0000000..dfa4404
--- /dev/null
+++ b/PyramidStore/plugin/official/腾.py
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import uuid
+import copy
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor, as_completed
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.dbody = {
+ "page_params": {
+ "channel_id": "",
+ "filter_params": "sort=75",
+ "page_type": "channel_operation",
+ "page_id": "channel_list_second_page"
+ }
+ }
+ self.body = self.dbody
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://v.qq.com'
+
+ apihost = 'https://pbaccess.video.qq.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
+ 'origin': host,
+ 'referer': f'{host}/'
+ }
+
+ def homeContent(self, filter):
+ cdata = {
+ "电视剧": "100113",
+ "电影": "100173",
+ "综艺": "100109",
+ "纪录片": "100105",
+ "动漫": "100119",
+ "少儿": "100150",
+ "短剧": "110755"
+ }
+ result = {}
+ classes = []
+ filters = {}
+ for k in cdata:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cdata[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ futures = [executor.submit(self.get_filter_data, item['type_id']) for item in classes]
+ for future in futures:
+ cid, data = future.result()
+ if not data.get('data', {}).get('module_list_datas'):
+ continue
+ filter_dict = {}
+ try:
+ items = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ for item in items:
+ if not item.get('item_params', {}).get('index_item_key'):
+ continue
+ params = item['item_params']
+ filter_key = params['index_item_key']
+ if filter_key not in filter_dict:
+ filter_dict[filter_key] = {
+ 'key': filter_key,
+ 'name': params['index_name'],
+ 'value': []
+ }
+ filter_dict[filter_key]['value'].append({
+ 'n': params['option_name'],
+ 'v': params['option_value']
+ })
+ except (IndexError, KeyError):
+ continue
+ filters[cid] = list(filter_dict.values())
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ json_data = {'page_context':None,'page_params':{'page_id':'100101','page_type':'channel','skip_privacy_types':'0','support_click_scan':'1','new_mark_label_enabled':'1','ams_cookies':'',},'page_bypass_params':{'params':{'caller_id':'','data_mode':'default','page_id':'','page_type':'channel','platform_id':'2','user_mode':'default',},'scene':'channel','abtest_bypass_id':'',}}
+ data = self.post(f'{self.apihost}/trpc.vector_layout.page_view.PageService/getPage',headers=self.headers, json=json_data).json()
+ vlist = []
+ for it in data['data']['CardList'][0]['children_list']['list']['cards']:
+ if it.get('params'):
+ p = it['params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ id = it.get('id') or p.get('cid')
+ name = p.get('mz_title') or p.get('title')
+ if name and 'http' not in id:
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': p.get('image_url'),
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ return {'list': vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ params = {
+ "sort": extend.get('sort', '75'),
+ "attr": extend.get('attr', '-1'),
+ "itype": extend.get('itype', '-1'),
+ "ipay": extend.get('ipay', '-1'),
+ "iarea": extend.get('iarea', '-1'),
+ "iyear": extend.get('iyear', '-1'),
+ "theater": extend.get('theater', '-1'),
+ "award": extend.get('award', '-1'),
+ "recommend": extend.get('recommend', '-1')
+ }
+ if pg == '1':
+ self.body = self.dbody.copy()
+ self.body['page_params']['channel_id'] = tid
+ self.body['page_params']['filter_params'] = self.josn_to_params(params)
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=self.body, headers=self.headers).json()
+ ndata = data['data']
+ if ndata['has_next_page']:
+ result['pagecount'] = 9999
+ self.body['page_context'] = ndata['next_page_context']
+ else:
+ result['pagecount'] = int(pg)
+ vlist = []
+ for its in ndata['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']:
+ id = its.get('item_params', {}).get('cid')
+ if id:
+ p = its['item_params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ name = p.get('mz_title') or p.get('title')
+ pic = p.get('new_pic_hz') or p.get('new_pic_vt')
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': pic,
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ result['list'] = vlist
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody = {"page_params":{"req_from":"web","cid":ids[0],"vid":"","lid":"","page_type":"detail_operation","page_id":"detail_page_introduction"},"has_cache":1}
+ body = {"page_params":{"req_from":"web_vsite","page_id":"vsite_episode_list","page_type":"detail_operation","id_type":"1","page_size":"","cid":ids[0],"vid":"","lid":"","page_num":"","page_context":"","detail_page_type":"1"},"has_cache":1}
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ future_detail = executor.submit(self.get_vdata, vbody)
+ future_episodes = executor.submit(self.get_vdata, body)
+ vdata = future_detail.result()
+ data = future_episodes.result()
+
+ pdata = self.process_tabs(data, body, ids)
+ if not pdata:
+ return self.handle_exception(None, "No pdata available")
+
+ try:
+ star_list = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][
+ 0].get('sub_items', {}).get('star_list', {}).get('item_datas', [])
+ actors = [star['item_params']['name'] for star in star_list]
+ names = ['腾讯视频', '预告片']
+ plist, ylist = self.process_pdata(pdata, ids)
+ if not plist:
+ del names[0]
+ if not ylist:
+ del names[1]
+ vod = self.build_vod(vdata, actors, plist, ylist, names)
+ return {'list': [vod]}
+ except Exception as e:
+ return self.handle_exception(e, "Error processing detail")
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {
+ "query": key,
+ "appID": "3172",
+ "appKey": "lGhFIPeD3HsO9xEp",
+ "pageNum": int(pg) - 1,
+ "pageSize": "10"
+ }
+ data = self.fetch(f"{self.apihost}/trpc.videosearch.smartboxServer.HttpRountRecall/Smartbox", params=params,headers=self.headers).json()
+ vlist = []
+ for k in data['data']['smartboxItemList']:
+ if k.get('basicDoc') and k['basicDoc'].get('id'):
+ img_tag = k.get('videoInfo', {}).get('imgTag')
+ if img_tag is not None and isinstance(img_tag, str):
+ try:
+ tag = json.loads(img_tag)
+ except json.JSONDecodeError as e:
+ tag = {}
+ else:
+ tag = {}
+ vlist.append({
+ 'vod_id': k['basicDoc']['id'],
+ 'vod_name': self.removeHtmlTags(k['basicDoc']['title']),
+ 'vod_pic': k['videoInfo']['imgUrl'],
+ 'vod_year': k['videoInfo'].get('typeName') + ' ' + tag.get('tag_2', {}).get('text', ''),
+ 'vod_remarks': tag.get('tag_4', {}).get('text', '')
+ })
+ return {'list': vlist, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = id.split('@')
+ url = f"{self.host}/x/cover/{ids[0]}/{ids[1]}.html"
+ return {'jx':1,'parse': 1, 'url': url, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def get_filter_data(self, cid):
+ hbody = self.dbody.copy()
+ hbody['page_params']['channel_id'] = cid
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=hbody, headers=self.headers).json()
+ return cid, data
+
+ def get_vdata(self, body):
+ try:
+ vdata = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=3000010&vplatform=2&vversion_name=8.2.96',
+ json=body, headers=self.headers
+ ).json()
+ return vdata
+ except Exception as e:
+ print(f"Error in get_vdata: {str(e)}")
+ return {'data': {'module_list_datas': []}}
+
+ def process_pdata(self, pdata, ids):
+ plist = []
+ ylist = []
+ for k in pdata:
+ if k.get('item_id'):
+ pid = f"{k['item_params']['union_title']}${ids[0]}@{k['item_id']}"
+ if '预告' in k['item_params']['union_title']:
+ ylist.append(pid)
+ else:
+ plist.append(pid)
+ return plist, ylist
+
+ def build_vod(self, vdata, actors, plist, ylist, names):
+ d = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][0]['item_params']
+ urls = []
+ if plist:
+ urls.append('#'.join(plist))
+ if ylist:
+ urls.append('#'.join(ylist))
+ vod = {
+ 'type_name': d.get('sub_genre', ''),
+ 'vod_name': d.get('title', ''),
+ 'vod_year': d.get('year', ''),
+ 'vod_area': d.get('area_name', ''),
+ 'vod_remarks': d.get('holly_online_time', '') or d.get('hotval', ''),
+ 'vod_actor': ','.join(actors),
+ 'vod_content': d.get('cover_description', ''),
+ 'vod_play_from': '$$$'.join(names),
+ 'vod_play_url': '$$$'.join(urls)
+ }
+ return vod
+
+ def handle_exception(self, e, message):
+ print(f"{message}: {str(e)}")
+ return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': '翻车啦#555'}]}
+
+ def process_tabs(self, data, body, ids):
+ try:
+ pdata = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ tabs = data['data']['module_list_datas'][-1]['module_datas'][-1]['module_params'].get('tabs')
+ if tabs and len(json.loads(tabs)):
+ tabs = json.loads(tabs)
+ remaining_tabs = tabs[1:]
+ task_queue = []
+ for tab in remaining_tabs:
+ nbody = copy.deepcopy(body)
+ nbody['page_params']['page_context'] = tab['page_context']
+ task_queue.append(nbody)
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_map = {executor.submit(self.get_vdata, task): idx for idx, task in enumerate(task_queue)}
+ results = [None] * len(task_queue)
+ for future in as_completed(future_map.keys()):
+ idx = future_map[future]
+ results[idx] = future.result()
+ for result in results:
+ if result:
+ page_data = result['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists'][
+ 'item_datas']
+ pdata.extend(page_data)
+ return pdata
+ except Exception as e:
+ print(f"Error processing episodes: {str(e)}")
+ return []
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+
diff --git a/PyramidStore/plugin/official/芒.py b/PyramidStore/plugin/official/芒.py
new file mode 100644
index 0000000..6ba8e34
--- /dev/null
+++ b/PyramidStore/plugin/official/芒.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+import time
+from concurrent.futures import ThreadPoolExecutor, as_completed
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost='https://www.mgtv.com'
+
+ host='https://pianku.api.mgtv.com'
+
+ vhost='https://pcweb.api.mgtv.com'
+
+ mhost='https://dc.bz.mgtv.com'
+
+ shost='https://mobileso.bz.mgtv.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'origin': rhost,
+ 'referer': f'{rhost}/'
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "电影": "3",
+ "电视剧": "2",
+ "综艺": "1",
+ "动画": "50",
+ "少儿": "10",
+ "纪录片": "51",
+ "教育": "115"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.mhost}/dynamic/v1/channel/index/0/0/0/1000000/0/0/17/1354?type=17&version=5.0&t={str(int(time.time()*1000))}&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']:
+ if i.get('DSLList') and len(i['DSLList']):
+ for j in i['DSLList']:
+ if j.get('data') and j['data'].get('items') and len(j['data']['items']):
+ for k in j['data']['items']:
+ videoList.append({
+ 'vod_id': k["videoId"],
+ 'vod_name': k['videoName'],
+ 'vod_pic': k['img'],
+ 'vod_year': k.get('cornerTitle'),
+ 'vod_remarks': k.get('time') or k.get('desc'),
+ })
+ return {'list':videoList}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body={
+ 'allowedRC': '1',
+ 'platform': 'pcweb',
+ 'channelId': tid,
+ 'pn': pg,
+ 'pc': '80',
+ 'hudong': '1',
+ '_support': '10000000'
+ }
+ body.update(extend)
+ data=self.fetch(f'{self.host}/rider/list/pcweb/v3', params=body, headers=self.headers).json()
+ videoList = []
+ for i in data['data']['hitDocs']:
+ videoList.append({
+ 'vod_id': i["playPartId"],
+ 'vod_name': i['title'],
+ 'vod_pic': i['img'],
+ 'vod_year': (i.get('rightCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': i['updateInfo']
+ })
+ result = {}
+ result['list'] = videoList
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody={'allowedRC': '1', 'vid': ids[0], 'type': 'b', '_support': '10000000'}
+ vdata=self.fetch(f'{self.vhost}/video/info', params=vbody, headers=self.headers).json()
+ d=vdata['data']['info']['detail']
+ vod = {
+ 'vod_name': vdata['data']['info']['title'],
+ 'type_name': d.get('kind'),
+ 'vod_year': d.get('releaseTime'),
+ 'vod_area': d.get('area'),
+ 'vod_lang': d.get('language'),
+ 'vod_remarks': d.get('updateInfo'),
+ 'vod_actor': d.get('leader'),
+ 'vod_director': d.get('director'),
+ 'vod_content': d.get('story'),
+ 'vod_play_from': '芒果TV',
+ 'vod_play_url': ''
+ }
+ data,pdata=self.fetch_page_data('1', ids[0],True)
+ pagecount=data['data'].get('total_page') or 1
+ if int(pagecount)>1:
+ pages = list(range(2, pagecount+1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, ids[0]): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ pdata.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(pdata)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.shost}/applet/search/v1?channelCode=mobile-wxap&q={key}&pn={pg}&pc=10&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']['contents']:
+ if i.get('data') and len(i['data']):
+ k = i['data'][0]
+ if k.get('vid') and k.get('img'):
+ try:
+ videoList.append({
+ 'vod_id': k['vid'],
+ 'vod_name': k['title'],
+ 'vod_pic': k['img'],
+ 'vod_year': (i.get('rightTopCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': '/'.join(i.get('desc',[])),
+ })
+ except:
+ print(k)
+ return {'list':videoList,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ id=f'{self.rhost}{id}'
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def getf(self, body):
+ params = {
+ 'allowedRC': '1',
+ 'channelId': body['type_id'],
+ 'platform': 'pcweb',
+ '_support': '10000000',
+ }
+ data = self.fetch(f'{self.host}/rider/config/channel/v1', params=params, headers=self.headers).json()
+ ft = []
+ for i in data['data']['listItems']:
+ try:
+ value_array = [{"n": value['tagName'], "v": value['tagId']} for value in i['items'] if
+ value.get('tagName')]
+ ft.append({"key": i['eName'], "name": i['typeName'], "value": value_array})
+ except:
+ print(i)
+ return body['type_id'], ft
+
+ def fetch_page_data(self, page, id, b=False):
+ body = {'version': '5.5.35', 'video_id': id, 'page': page, 'size': '30',
+ 'platform': '4', 'src': 'mgtv', 'allowedRC': '1', '_support': '10000000'}
+ data = self.fetch(f'{self.vhost}/episode/list', params=body, headers=self.headers).json()
+ ldata = [f'{i["t3"]}${i["url"]}' for i in data['data']['list']]
+ if b:
+ return data, ldata
+ else:
+ return ldata
diff --git a/PyramidStore/plugin/tools/wogg_wobg分类筛选生成.py b/PyramidStore/plugin/tools/wogg_wobg分类筛选生成.py
new file mode 100644
index 0000000..612647b
--- /dev/null
+++ b/PyramidStore/plugin/tools/wogg_wobg分类筛选生成.py
@@ -0,0 +1,111 @@
+import re
+import asyncio
+from urllib.parse import unquote
+import aiohttp
+from pyquery import PyQuery as pq
+
+headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'Accept-Language': 'zh-CN,zh;q=0.9'
+}
+
+async def fetch(session, url):
+ async with session.get(url, headers=headers) as response:
+ return await response.text()
+
+def grtclass(data):
+ classes = []
+ vdata = []
+ for i in data.items():
+ j = i('a').attr('href')
+ if j and ('type' in j or 'show' in j):
+ id = re.search(r'\d+', j)
+ if id:
+ id = id.group(0)
+ else:
+ id = j.split('/')[-1].split('.')[0]
+ if id not in vdata:
+ vdata.append(id)
+ classes.append({
+ 'type_name': i('a').text(),
+ 'type_id': id
+ })
+ return classes
+
+def get_k(text,type):
+ key = ''
+ cates={"class": "类型,剧情", "area": "地区", "lang": "语言", "year": "年份,时间", "letter": "字母", "by": "排序","sort": "排序"}
+ for i,x in cates.items():
+ if type== 'wobg' and i in text:
+ key = i
+ break
+ elif type == 'wogg':
+ for j in x.split(','):
+ if j in text:
+ key = i
+ break
+
+ if type == 'wobg':
+ if not key:
+ if 'id' in text:
+ key = 'id'
+ return key
+
+def get_v(text,key,type):
+ if type == 'wobg':
+ return text.split(f'{key}/')[-1].split('/')[0].split('.')[0]
+ else:
+ v=text.split('/',-1)[-1].split('.')[0][1:].replace('-','')
+ if v=='09':v='0-9'
+ return v
+
+async def c(session, host):
+ html = await fetch(session, host)
+ data = pq(html)
+ classes = grtclass(data('.drop-content-items li'))
+ if not len(classes): classes = grtclass(data('.nav-menu-items li'))
+ return classes
+
+async def get_ft(session, url,type):
+ print(f"请求: {url}")
+ html = await fetch(session, url)
+ data = pq(html)
+ ft = []
+ for i in list(data('div.library-box.scroll-box').items())[1:]:
+ n = i('a.library-item-first').text()
+ c = i('.library-list a')
+ if type == 'wobg':
+ key = get_k(c.eq(0).attr('href'), type)
+ else:
+ key = get_k(n,type)
+ ft.append({
+ 'name': n or key,
+ 'key': key,
+ 'value': [{'v': unquote(get_v(j.attr('href'),key,type)), 'n': j.text()} for j in c.items()]
+ })
+ return ft
+
+async def main(host,type):
+ async with aiohttp.ClientSession() as session:
+ categories = await c(session, host)
+ print(f"分类: {categories}")
+ tasks = []
+ fts = {}
+ if len(categories):
+ for i in categories:
+ path=f"/index.php/vod/show/id/{i['type_id']}.html" if type == 'wobg' else f"/vodtype/{i['type_id']}.html"
+ task = asyncio.create_task(get_ft(session, f"{host}{path}",type))
+ tasks.append((i['type_id'], task))
+ for type_id, task in tasks:
+ fts[type_id] = await task
+ return {'class': categories, 'filters': fts}
+
+if __name__ == '__main__':
+ # url = 'http://wogg.xxooo.cf'
+ url = 'http://2xiaopan.fun'
+ types = ['wobg','wogg']
+ loop = asyncio.get_event_loop()
+ result = loop.run_until_complete(main(url, types[0]))
+ print('分类筛选生成结果:')
+ print(result)
\ No newline at end of file
diff --git a/PyramidStore/plugin/小白调试示例.py b/PyramidStore/plugin/小白调试示例.py
new file mode 100644
index 0000000..aa2f1f6
--- /dev/null
+++ b/PyramidStore/plugin/小白调试示例.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+'''
+调试说明:打开Python编辑器,导入项目,在plugin目录下新建文件并编写代码一键运行
+'''
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ ahost='https://api.cenguigui.cn'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'DNT': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'Sec-Fetch-Mode': 'no-cors',
+ 'Sec-Fetch-Dest': 'video',
+ 'Sec-Fetch-Storage-Access': 'active',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ classes = [{'type_name': '穿越', 'type_id': '穿越'}]
+ result['class'] = classes
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {
+ 'classname': tid,
+ 'offset': str((int(pg) - 1)),
+ }
+ data = self.fetch(f'{self.ahost}/api/duanju/api.php', params=params, headers=self.headers).json()
+ videos = []
+ for k in data['data']:
+ videos.append({
+ 'vod_id': k.get('book_id'),
+ 'vod_name': k.get('title'),
+ 'vod_pic': k.get('cover'),
+ 'vod_year': k.get('score'),
+ 'vod_remarks': f"{k.get('sub_title')}|{k.get('episode_cnt')}"
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ v=self.fetch(f'{self.ahost}/api/duanju/api.php', params={'book_id': ids[0]}, headers=self.headers).json()
+ vod = {
+ 'type_name': v.get('category'),
+ 'vod_year': v.get('time'),
+ 'vod_remarks': v.get('duration'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '嗷呜爱看短剧',
+ 'vod_play_url': '#'.join([f"{i['title']}${i['video_id']}" for i in v['data']])
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ return self.categoryContent(key, pg, True, {})
+
+ def playerContent(self, flag, id, vipFlags):
+ data=self.fetch(f'{self.ahost}/api/duanju/api.php', params={'video_id': id}, headers=self.headers).json()
+ return {'parse': 0, 'url': data['data']['url'], 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+
+if __name__ == "__main__":
+ sp = Spider()
+ formatJo = sp.init([]) # 初始化
+ formatJo = sp.homeContent(False) # 筛选分类(首页 可选)
+ # formatJo = sp.homeVideoContent() # (首页 可选)
+ # formatJo = sp.searchContent("斗罗",False,'1') # 搜索
+ # formatJo = sp.categoryContent('', '1', False, {}) # 分类
+ # formatJo = sp.detailContent(['139625']) # 详情
+ # formatJo = sp.playerContent("","",{}) # 播放
+ # formatJo = sp.localProxy({"":""}) # 代理
+ print(formatJo)
diff --git a/PyramidStore/spider.md b/PyramidStore/spider.md
new file mode 100644
index 0000000..dc952de
--- /dev/null
+++ b/PyramidStore/spider.md
@@ -0,0 +1,81 @@
+
+## Pyramid爬虫写法
+
+目前所有爬虫继承[spider.py](https://github.com/JJBJJ/PyramidStore/blob/main/base/spider.py)
+
+spider提供了一些需要被实现的方法和一些公共方法,请自行查阅
+
+使用 [base/local.py](https://github.com/JJBJJ/PyramidStore/blob/main/base/local.py) 进行爬虫调试,所有待调试的爬虫需位于plugin目录下
+
+#### 快速开发
+
+参考[美帕APP.py](https://github.com/JJBJJ/PyramidStore/blob/main/plugin/app/美帕APP.py)进行快速开发
+##### 1. 爬虫方法
+
+```python
+ # 这些具体的写法和Java版本的爬虫一致
+ # 主页
+ def homeContent(self,filter):pass
+ # 推荐视频
+ def homeVideoContent(self):pass
+ # 分类
+ def categoryContent(self,tid,pg,filter,extend):pass
+ # 详情
+ def detailContent(self,ids):pass
+ # 搜索
+ def searchContent(self,key,quick):pass
+ # 翻页搜索
+ def searchContentPage(self, key, quick, page):pass
+ # 播放
+ def playerContent(self,flag,id,vipFlags):pass
+ # 视频格式
+ def isVideoFormat(self,url):pass
+ # 视频检测
+ def manualVideoCheck(self):pass
+```
+
+##### 2. 本地代理
+
+代理地址写法```http://127.0.0.1:9978/proxy?do=py&type=```,其中{key}表示配置文件中key的名称,其他参数追加到地址最后即可。样例请参考光速.py playerContent方法
+
+```python
+ # 以下代码来自py_bilibilivd.py,完整代码请自行查看
+ # 本地代理
+ def localProxy(self, params):
+ if params['type'] == "mpd":
+ return self.proxyMpd(params)
+ if params['type'] == "media":
+ return self.proxyMedia(params)
+ return None
+
+ def proxyMpd(self, params):
+ content, durlinfos, mediaType = self.getDash(params)
+ if mediaType == 'mpd':
+ return [200, "application/dash+xml", content] # 200 返回string
+ else:
+ # 略
+ if '127.0.0.1:7777' in url:
+ header["Location"] = url
+ return [302, "video/MP2T", None, header] # 302重定向到url
+ r = requests.get(url, headers=header, stream=True)
+ return [206, "application/octet-stream", r.content] # 206 返回bytes
+```
+##### 3. 配置写法
+
+* ext写extend内容
+* api写py的网络地址或者本地地址
+
+```json
+{
+ "key": "光速",
+ "name": "光速",
+ "type": 3,
+ "api": "爬虫所在位置/光速.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1
+}
+```
+
+### 问题反馈
+问题请反馈到[telegram](https://t.me/+A3SLQRmPVi9kOThl)
diff --git a/XBPQ/139影视.json b/XBPQ/139影视.json
new file mode 100644
index 0000000..a98b4d3
--- /dev/null
+++ b/XBPQ/139影视.json
@@ -0,0 +1,7 @@
+{
+ "请求头": "手机",
+ "简介": "剧情简介','&&",
+ "副标题": "HD",
+ "分类url": "https://www.139ys.com/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "电视剧$lianxuju#电影$dianying#动漫$dongman#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/55影视.json b/XBPQ/55影视.json
new file mode 100644
index 0000000..3c436f1
--- /dev/null
+++ b/XBPQ/55影视.json
@@ -0,0 +1,14 @@
+{
+ "作者": "",
+ "站名": "55影视",
+ "请求头": "手机",
+ "播放请求头": "手机",
+ "主页url": "https://www.5555kan.com/",
+ "简介": "+",
+ "导演": "导演:&&
",
+ "主演": "主演:&&
",
+ "线路数组": "&&/h3>",
+ "线路标题": ">&&<",
+ "分类url": "https://www.5555kan.com/show/{cateId}-{area}-{by}------{catePg}---{year}.html;;ak",
+ "分类": "电影$1#电视剧$2#综艺$3#动漫$4"
+}
\ No newline at end of file
diff --git a/XBPQ/99影视.json b/XBPQ/99影视.json
new file mode 100644
index 0000000..7ff261e
--- /dev/null
+++ b/XBPQ/99影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "剧情介绍: +col-pd text-collapse content\">&&",
+ "分类url": "http://www.kangping99.com/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$25#电影$1#连续剧$2#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/NO视频.json b/XBPQ/NO视频.json
new file mode 100644
index 0000000..fa05794
--- /dev/null
+++ b/XBPQ/NO视频.json
@@ -0,0 +1,16 @@
+{
+ "数组": "class=\"stui-vodlist__box&&",
+ "图片": "data-original=\"&&\"",
+ "标题": "title=\"&&\"",
+ "副标题": "text-right\">&&",
+ "链接": "href=\"&&\"",
+ "搜索url": "",
+ "起始页": "2",
+ "线路标题": "NO视频",
+ "播放数组": "class=\"*-div&&",
+ "播放列表": "class=\"btn btn-primary&&",
+ "播放标题": ">&&[不包含:蓝光]",
+ "播放链接": "href=\"&&\"",
+ "分类url": "https://www.dgjiawu.com/{cateId}/0/0/0/0/{catePg}[https://www.dgjiawu.com/{cateId}]",
+ "分类": "电影$mo-1zxgt/5738/1#电视剧$mo-1zxgt/5749/1#综艺$mo-1zxgt/5758/1#动漫$mo-1zxgt/5767/1"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/AVJIA.json b/XBPQ/adult/AVJIA.json
new file mode 100644
index 0000000..c8f97cc
--- /dev/null
+++ b/XBPQ/adult/AVJIA.json
@@ -0,0 +1,13 @@
+{
+ "首页": "0",
+ "直接播放": "1",
+ "数组": "&&
",
+ "链接": "&&
",
+ "简介": "&&
",
+ "分类数组": "&&",
+ "分类标题": ">&&",
+ "分类ID": "href=\"/category/&&-1.html\">",
+ "分类url": "https://avjia.net/category/{cateId}-{catePg}.html"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/AVgle.json b/XBPQ/adult/AVgle.json
new file mode 100644
index 0000000..c1fe361
--- /dev/null
+++ b/XBPQ/adult/AVgle.json
@@ -0,0 +1,12 @@
+{
+ "请求头": "手机",
+ "搜索url": "https://av.gl/vod/search.html?wd={wd}",
+ "分类url": "https://av.gl/vod/show/by/{by}/id/{cateId}/page/{catePg}.html",
+ "排序": "最近更新$time#今日浏览数$hits_day#本周浏览数$hits_week#本月浏览数$hits_month#总浏览数$hits",
+ "分类": "日本有码$Censored#日本无码$Uncensored#FC2-PPV$FC2-PPV#无码破解$Mosaic_Removed#中文字幕$CHN_SUB#MGS动画$MGS#写真$Adult_IDOL#国产$Asian_Amateur#欧美$Western_Porn",
+ "链接": "href=\"&&\">[包含:/av/]",
+ "直接播放": "1",
+ "影片类型": "cat\">&&<",
+ "影片年代": ">發布日期 : &&<",
+ "主演": "title=\"&&\""
+}
\ No newline at end of file
diff --git a/XBPQ/adult/AV影视.json b/XBPQ/adult/AV影视.json
new file mode 100644
index 0000000..775de41
--- /dev/null
+++ b/XBPQ/adult/AV影视.json
@@ -0,0 +1,6 @@
+{
+ "图片": "data-src=\"&&\"",
+ "直接播放": "1",
+ "分类url": "https://wyaslcwgroup.cfd/index.php/vod/type/id/{cateId}/page/{catePg}.html",
+ "分类": "国产视频$1#主播$2#黑料$3#中文$6#国产传媒$7#制服$12#抖音$15#三级$17#AV解说$18#日本有码$8#日本无码$9#强奸乱轮$11#动漫$13#明星换脸$14#女优明星$16"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/BuzzAV.json b/XBPQ/adult/BuzzAV.json
new file mode 100644
index 0000000..13c184b
--- /dev/null
+++ b/XBPQ/adult/BuzzAV.json
@@ -0,0 +1,11 @@
+{
+ "主页url": "https://www.buzzav.com/",
+ "直接播放": "1",
+ "数组": "",
+ "图片": "data-src=\"&&\"",
+ "标题": "alt=\"&&\"",
+ "副标题": "class=\"duration\">&&",
+ "链接": "href=\"&&\"",
+ "分类url": "https://www.buzzav.com/category/{cateId}/page/{catePg}",
+ "分类": "业余$amateur#肛门$anal#亚洲$asian#Asmr$asmr#宝贝$babe#大屁股$big-ass#大鸡巴$big-dick#大奶$big-tits#金发女郎$blonde#口交$blowjob#束缚$bondage#名人$celebrity#中文$chinese-中文#大学$college#汇编$compilation#角色扮演$cosplay#中出$creampie#绿帽$cuckold#射精$cumshot#脚$feet#恋物癖$fetish#群交$gangbang#手淫$handjob#硬核$hardcore#成人动漫$hentai#跨种族$interracial#日本人$japanese#女同性恋$lesbian#按摩$massage#手淫$masturbation#成熟$mature#熟女$milf#观点$pov#民众$public#喷出$squirt#18+青少年$teen-18#三人行$threesome#未分类$uncategorized#优质的$vintage#摄像头$webcam"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/JAVSB.json b/XBPQ/adult/JAVSB.json
new file mode 100644
index 0000000..afe57a1
--- /dev/null
+++ b/XBPQ/adult/JAVSB.json
@@ -0,0 +1,11 @@
+{
+ "主页url": "https://jav.sb/",
+ "直接播放": "1",
+ "数组": "class=\"relative aspect-w-16 aspect-h-9 rounded overflow-hidden shadow-lg\"&&",
+ "图片": "https://jav.sb/ +data-src=\"&&\"",
+ "标题": "alt=\"&&\"",
+ "链接": "href=\"&&\"",
+ "分类url": "https://jav.sb/vod/show/by/{by}/id/{cateId}/page/{catePg}.html;;!",
+ "排序": "最近更新$time#今日浏览数$hits_day#本周浏览数$hits_week#本月浏览数$hits_month#总浏览数$hits",
+ "分类": "日本有碼$Censored#日本無碼$Uncensored#FC2-PPV$FC2-PPV#無碼破解$Mosaic_Removed#中文字幕$CHN_SUB#MGS動画$MGS#寫真$Adult_IDOL#國產$Asian_Amateur#歐美成人$Western_Porn"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/Pornlulu.json b/XBPQ/adult/Pornlulu.json
new file mode 100644
index 0000000..33cb004
--- /dev/null
+++ b/XBPQ/adult/Pornlulu.json
@@ -0,0 +1,9 @@
+{
+ "直接播放": "1",
+ "主页url": "https://www.pornlulu.com",
+ "搜索url": "https://www.pornlulu.com/?q={wd}&category_id=",
+ "分类数组": "",
+ "分类标题": "&&
",
+ "分类ID": "href=\"/cat/&&\">",
+ "分类url": "https://www.pornlulu.com/cat/{cateId}?page={catePg}"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/SOAV.json b/XBPQ/adult/SOAV.json
new file mode 100644
index 0000000..b4acc5a
--- /dev/null
+++ b/XBPQ/adult/SOAV.json
@@ -0,0 +1,11 @@
+{
+ "主页url": "https://www.wantav.co/",
+ "搜索url": "https://777080.xyz/?s={wd}",
+ "数组": "data-video-uid=&&",
+ "图片": "data-src=\"&&\"",
+ "标题": "alt=\"&&\"",
+ "副标题": "&&",
+ "链接": "href=\"&&\"",
+ "分类url": "https://777080.xyz/category/{cateId}/page/{catePg}/;;z",
+ "分类": "國產精選$國產精選#探花約炮$探花約炮#日韓影片$日韓影片#無碼素人$無碼素人#歐美專區$歐美專區#中字動漫$中字動漫"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/TaiAv.json b/XBPQ/adult/TaiAv.json
new file mode 100644
index 0000000..9333279
--- /dev/null
+++ b/XBPQ/adult/TaiAv.json
@@ -0,0 +1,11 @@
+{
+ "主页url": "https://taiav.com/",
+ "搜索url": "https://taiav.com/search?q={wd}",
+ "数组": "uk-card-media-top\"&&",
+ "图片": "src=\"&&\"",
+ "标题": "alt=\"&&\"",
+ "副标题": "&&",
+ "链接": "href=\"&&\"",
+ "分类url": "https://taiav.com/cn/category/{cateId}?page={catePg};;z",
+ "分类": "国产AV$国产AV#网红主播$网红主播#有码$有码#无码$无码"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/jable.json b/XBPQ/adult/jable.json
new file mode 100644
index 0000000..cd28a1e
--- /dev/null
+++ b/XBPQ/adult/jable.json
@@ -0,0 +1,20 @@
+{
+ "直接播放": "1",
+ "搜索模式": "1",
+ "搜索url": "https://jable.tv/search/{wd}/",
+ "数组": "",
+ "播放列表": "",
+ "播放链接": "href='&&'[替换:.html>>空]",
+ "嗅探词": ".mp4#index.m3u8",
+ "搜索模式": "1",
+ "搜索url": "https://danbady70.buzz/search/page/{pg}/wd/{wd}",
+ "分类url": "https://danbady70.buzz/type/{cateId}/{catePg}.html",
+ "分类": "热点专题&国产精品&华语AV&黑料吃瓜&欧美&禁漫&学生&乱伦&探花&日本无码&日本有码&主播网红&精选传媒&传媒国产&日韩系列&欧美巨屌&步兵无码&国产传媒&日本视频&国产视频&韩国&欧美视频&成人动画&黑料仓库&亚洲无码&亚洲有码&欧美情色&中文字幕&动漫卡通&美女主播&人妻熟女&强奸乱伦&日韩伦理&国产自拍&精选口爆&同性同志&重口味&韩国主播&JAV4K&91大神&网红福利&国外热门资源&Pornhub欧美资源&另类仓库&国产情色&日本无码&日本有码&中文字幕&欧美极品&动漫精品&强奸乱伦&变态另类&国产主播&巨乳美乳&制服诱惑&熟女人妻&三级伦理&自拍偷拍&AI换脸&海外明星&福利视频&少女萝莉&国产传媒&网爆门&国产视频&中文字幕&国产传媒&日本有码&日本无码&欧美无码&强奸乱伦&制服诱惑&直播主播&激情动漫&明星换脸&抖阴视频&女优明星&网-曝-门&伦理三级&AV解说&SM调教&萝莉少女&极品媚黑&女同性恋&玩偶姐姐&人妖系列&韩国主播&VR视角&特色仓库&精品推荐&国产色情&主播直播&亚洲无码&亚洲有码&中文字幕&巨乳美乳&人妻熟女&强奸乱伦&欧美精品&萝莉少女&伦理三级&成人动漫&自拍偷拍&制服丝袜&口交颜射&日本精品&Cosplay&素人自拍&台湾辣妹&韩国御姐&唯美港姐&东南亚AV&欺辱凌辱&剧情介绍&多人多P&91探花&网红流出&野外露出&古装扮演&女优系列&可爱学生&风情旗袍&兽耳系列&瑜伽裤&闷骚护士&过膝袜&网曝门&传媒出品&女同性恋&男同性恋&恋腿狂魔&精品资源&亚洲情色&国产主播&国产自拍&无码专区&欧美性爱&熟女人妻&强奸乱伦&巨乳美乳&中文字幕&制服诱惑&女同性恋&卡通动画&视频伦理&少女萝莉&重口色情&人兽性交&中文字幕&强奸中文&巨乳中文&乱伦中文&制服中文&人妻中文&调教中文&出轨中文&精品短视频&无码中文&大众精品&日本无码&日本有码&中文字幕&网红主播&成人动漫&欧美情色&国模私拍&长腿丝袜&邻家人妻&韩国伦理&香港伦理&精品推荐&原纱央莉&柚木TINA&大桥未久&橘日向&仁科百华&天海翼&小川阿佐美&樱井莉亚&长泽梓&番号大全&制服丝袜&群交淫乱&无码专区&偷拍自拍&卡通动漫&中文字幕&欧美性爱&巨乳美乳&国产裸聊&国产自拍&国产盗摄&伦理三级&女同性恋&少女萝莉&人妖系列&虚拟VR&富二代区&女神学生&美熟少妇&娇妻素人&空姐模特&国产乱伦&自慰群交&野合车震&职场同事&国产名人&精品三级&网曝门事件&抖阴视频&主播大秀&小鸟酱专题&颜射瞬间&国模私拍&水果派&福利姬&热门视频&伦理作品&香港伦理&国产伦理&韩国伦理&欧美伦理&日本伦理",
+ "分类值": "1152&1122&1123&1124&1125&1126&1128&1129&1130&1131&1132&1133&1117&1113&1114&1115&1116&158&180&181&159&182&160&246&184&185&186&187&188&189&190&191&192&193&194&195&196&197&198&199&200&201&202&327&204&205&206&207&208&209&210&211&212&213&214&215&216&217&218&219&220&221&1087&343&328&329&330&331&332&333&334&335&336&337&338&339&340&343&345&346&347&348&349&350&351&353&355&377&316&274&275&276&277&278&279&280&281&282&283&284&285&286&287&288&289&290&291&292&293&294&295&296&297&298&299&300&301&302&303&304&305&306&307&308&309&310&311&312&313&314&315&161&6&7&8&9&10&13&16&19&22&25&28&31&34&35&36&37&176&168&169&170&171&172&173&174&175&167&164&63&65&67&69&71&73&75&77&79&80&81&82&83&84&85&86&87&88&89&90&91&165&54&55&56&57&58&59&60&62&64&66&68&70&72&74&76&78&166&118&119&120&121&122&123&124&125&126&112&127&114&113&128&117&115&116&111&177&109&1&2&4&5&3"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/伊人AV.json b/XBPQ/adult/伊人AV.json
new file mode 100644
index 0000000..c5dc3b0
--- /dev/null
+++ b/XBPQ/adult/伊人AV.json
@@ -0,0 +1,6 @@
+{
+ "请求头": "手机",
+ "编码": "UTF-8",
+ "分类url": "https://yiren96.cc/index.php/vod/type/id/{cateId}/page/{catePg}.html",
+ "分类": "中文字幕$11#国产视频$4#91精选$33#无码视频$8#欧美视频$5#动漫视频$30#伦理视频$9#精品AV$34#黑料吃瓜$35#AV解说$26#素人搭讪$7#JAV字幕$29#JAV丝袜$28#国产色情$39#高清日本$53#高清国产$54#高清传媒$55#情趣综艺$57#在线短剧$58#精选福利$87"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/传媒二区.json b/XBPQ/adult/传媒二区.json
new file mode 100644
index 0000000..f52d6c6
--- /dev/null
+++ b/XBPQ/adult/传媒二区.json
@@ -0,0 +1,14 @@
+{
+ "主页url": "https://www.34gaobk.com/index.html",
+ "数组": "&&&&
",
+ "简介": "document.title='&&'",
+ "直接播放": "1",
+ "搜索模式": "1",
+ "搜索url": "https://www.34gaobk.com/gaosearch/{wd}-/page/{pg}/",
+ "分类数组": "",
+ "分类标题": ">&&<",
+ "分类ID": "/gaotype/&&.html",
+ "分类url": "https://www.34gaobk.com/gaotype/{cateId}_{catePg}.html[https://www.34gaobk.com/gaotype/{cateId}.html]"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/吃瓜爆料网.json b/XBPQ/adult/吃瓜爆料网.json
new file mode 100644
index 0000000..277aa7c
--- /dev/null
+++ b/XBPQ/adult/吃瓜爆料网.json
@@ -0,0 +1,11 @@
+{
+ "发布地址": "https://cgblw.com/",
+ "数组": "&&<",
+ "副标题": "content=\"&&\"",
+ "链接": "href=\"&&\"",
+ "搜索url": "",
+ "分类url": "https://www.cgcg7.net/category/{cateId}/{catePg}/;;zDt",
+ "分类": "今日吃瓜$jrgb#最热头条$zrtt#网红爆料$whbl#反差爆料$fcbl#聚焦时事$jjss#国产精品$gcjp#校园瓜闻$xygw#吃瓜搞笑$cggx#电影天堂$dytt#领导秘事$ldms#海角论坛$hjlt#暗网猎奇$awlq"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/哈哈影视.json b/XBPQ/adult/哈哈影视.json
new file mode 100644
index 0000000..5461998
--- /dev/null
+++ b/XBPQ/adult/哈哈影视.json
@@ -0,0 +1,9 @@
+{
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "主页url": "https://www.hahads.com/show/20-----------.html",
+ "跳转播放链接": "var player_*\"url\":\"&&\"",
+ "分类url": "https://www.hahads.com/show/20-{cateId}-------{catePg}---.html",
+ "分类": "大陆&香港&台湾&美国&法国&英国&日本&韩国&泰国&印度&西班牙&加拿大&俄罗斯&新加坡&其它",
+ "分类值": "*"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/四虎影院.json b/XBPQ/adult/四虎影院.json
new file mode 100644
index 0000000..d374c6c
--- /dev/null
+++ b/XBPQ/adult/四虎影院.json
@@ -0,0 +1,23 @@
+{
+ "站名": "四虎影院",
+ "主页url": "https://www.4hu.tv/Enter/home.html",
+ "简介": "名称:&&",
+ "数组": "img class=\"nature&&",
+ "二次截取": "",
+ "图片": "data-original=\"&&.txt\"+.jpg",
+ "标题": "Base64((d('&&'));",
+ "副标题": "",
+ "链接": "href=\"&&\"",
+ "线路数组": "target=\"_blank&&>]",
+ "线路标题": "Base64(d('&&'));",
+ "播放数组": "item&&",
+ "状态": "分类:&&",
+ "导演": "导演:&&",
+ "主演": "主演:&&",
+ "搜索模式": "1",
+ "搜索url": "https://www.4hu.tv/searchs/index.php?page={pg}&keyboard={wd}&classid=0",
+ "搜索数组": "&&",
+ "搜索标题": "_blank\"&&<",
+ "分类": "国产$video#自拍$video/zipai#淫妻作乐$video/fuqi#开放青年$video/kaifang#精品分享$video/jingpin#台湾辣妹$video/twmn#动漫卡通$video/dongman#经典三级$video/sanji#onlyfans主播$onlyfans#女优$av#女性向纯爱$av/nxx#GIGA女战士$av/giga#波多野结衣$av/bdyjy#深田咏美$av/stym#桥本有菜$av/qbyc#苍井空$av/cjk#三上悠亚$av/ssyy#吉泽明步$av/jzmb#电影$movie#无码中字$movie/wuma#SM系列$movie/sm#高清无码$movie/gaoqing#熟女人妻$movie/shunv#美颜巨乳$movie/meiyan#丝袜制服$movie/siwa#中文有码$movie/youma#欧美系列$movie/oumei",
+ "分类url": "https://www.4hu.tv/{cateId}/{class}/index_{catePg}.html[firstPage=https://www.4hu.tv/{cateId}/{class}/]"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/国产X站.json b/XBPQ/adult/国产X站.json
new file mode 100644
index 0000000..067eac4
--- /dev/null
+++ b/XBPQ/adult/国产X站.json
@@ -0,0 +1,6 @@
+{
+ "直接播放": "1",
+ "图片": "data-original=\"&&\"",
+ "分类url": "https://cwopdk85defj06.xyz/index.php/vod/show/by/{by}/id/{cateId}/page/{catePg}.html",
+ "分类": "麻豆传媒$21#国产视频$22#日本有码$23#中文字幕$24#无码影片$27"
+}
diff --git a/XBPQ/adult/国产情色AV.json b/XBPQ/adult/国产情色AV.json
new file mode 100644
index 0000000..97faed0
--- /dev/null
+++ b/XBPQ/adult/国产情色AV.json
@@ -0,0 +1,10 @@
+{
+ "请求头": "手机",
+ "搜索url": "https://www.xmldc4.buzz/index.php/vod/search.html?wd={wd}",
+ "分类url": "https://www.xmldc4.buzz/index.php/vod/type/id/{cateId}/page/{catePg}.html",
+ "分类": "国产传媒$1#高清无码$2#精品素人$4#日本有码$5#中文字幕$26#激情动漫$27#欧美色情$28#中字剧情$29#水果派$30#日韩无码$34#AV明星$35#中文字幕$36#擂台格斗$37#辣椒GIGA$38#HEYZO$39#独家DMM$40#HEY诱惑$42#童颜巨乳$43#高潮喷吹$44#激情口交$45#首次亮相$46#知名女优$48#人妻系列$49",
+ "播放标题": "title=\"&&\"",
+ "链接": "href=\"&&\">[包含:play]",
+ "影片类型": "&& -",
+ "影片年代": "] - &&
"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/国产硬AV.json b/XBPQ/adult/国产硬AV.json
new file mode 100644
index 0000000..26e11fe
--- /dev/null
+++ b/XBPQ/adult/国产硬AV.json
@@ -0,0 +1,6 @@
+{
+ "直接播放": "1",
+ "图片": "data-original=\"&&\"",
+ "分类url": "https://mtriop86ernf587.xyz/index.php/vod/type/id/{cateId}/page/{catePg}.html",
+ "分类": "麻豆传媒$21#国产视频$22#日本有码$23#中文字幕$24#无码影片$27"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/国产麻豆.json b/XBPQ/adult/国产麻豆.json
new file mode 100644
index 0000000..ece8a38
--- /dev/null
+++ b/XBPQ/adult/国产麻豆.json
@@ -0,0 +1,6 @@
+{
+ "搜索url": "http://gcmd.cc/index.php/vod/search/page/{pg}/wd/{wd}.html",
+ "分类": "日本视频$1#麻豆视频$2#国产视频$3",
+ "分类url": "http://gcmd.cc/index.php/vod/show/class/{class}/id/{cateId}/page/{catePg}.html",
+ "直接播放": "1"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/多米尼奥.json b/XBPQ/adult/多米尼奥.json
new file mode 100644
index 0000000..46cbe41
--- /dev/null
+++ b/XBPQ/adult/多米尼奥.json
@@ -0,0 +1,12 @@
+{
+ "数组": "&&
",
+ "图片": "
&&",
+ "简介": "description\" content=\"&&\"",
+ "链接": "href=\"&&\"",
+ "播放链接": "href=\"&&\"",
+ "播放标题": ">&&>在线播放]",
+ "搜索url": "https://dmnio.com/dmnsearch/{wd}----------{pg}---.html",
+ "分类url": "https://dmnio.com/dmnshow/{cateId}--{by}------{catePg}---.html",
+ "分类": "新品日韩$20#中文字幕$21#短片本土$22#大胸美女$23#剧情暴力$24#制服扮演$25#人妻御姐$26#无码专区$27#动漫卡通$28#明星幻想$29"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/天堂岛.json b/XBPQ/adult/天堂岛.json
new file mode 100644
index 0000000..9385c69
--- /dev/null
+++ b/XBPQ/adult/天堂岛.json
@@ -0,0 +1,17 @@
+{
+ "主页url": "https://www.ttdao666.buzz",
+ "数组": "&&&&<",
+ "分类ID": "/vod/type/id/&&/",
+ "分类url": "https://www.ttdao6614.buzz/vod/type/id/{cateId}/page/{catePg}/"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/奇优福利.json b/XBPQ/adult/奇优福利.json
new file mode 100644
index 0000000..c6c3d3a
--- /dev/null
+++ b/XBPQ/adult/奇优福利.json
@@ -0,0 +1,31 @@
+ {
+
+ "作者":"艾丝沐",
+
+ "站点":"奇优影院",
+
+ "请求头": "手机",
+ "主页url":"http://www.qiyoudy2.com/",
+ "简介":"&&
",
+ "数组":"&&",
+ "图片":"data-original=\"&&\"",
+ "标题":"title=\"&&\"",
+ "副标题":"text-right\">&&",
+ "链接":"href=\"&&\"",
+ "搜索url":"http://www.qiyoudy2.com/search.php;post;searchword={wd}",
+ "搜索数组":"v-thumb stui-vodlist__thumb&&",
+ "搜索图片":"data-original=\"&&\"",
+ "搜索标题":"title=\"&&\"",
+ "搜索副标题":"text-right\">&&",
+ "搜索链接":"href=\"&&\"",
+ "线路数组":"data-toggle=\"tab\"&&",
+ "线路标题":">&&",
+ "播放数组":"stui-content__playlist clearfix&&",
+ "播放列表":"
&&",
+ "播放标题":">&&",
+ "嗅探词":".m3u8#.mp4#.flv#.mp3#.m4a",
+ "分类url":"http://www.qiyoudy2.com/list/{cateId}_{catePg}.html;;ak",
+ "分类":"🔞福利推荐$6"
+
+ }
+
\ No newline at end of file
diff --git a/XBPQ/adult/女优色库.json b/XBPQ/adult/女优色库.json
new file mode 100644
index 0000000..4bdb925
--- /dev/null
+++ b/XBPQ/adult/女优色库.json
@@ -0,0 +1,8 @@
+{
+ "搜索url": "https://suwdak.nysk3.quest/cn/home/web/vodsearch/{wd}----------{pg}---.html",
+ "主页url": "https://suwdak.nysk3.quest/cn/home/web/",
+ "简介": "时间:&&<",
+ "副标题": "❤️+fa fa-heart\">&&<+🔥+\"fa fa-eye\">&&<",
+ "分类": "国产精品$20#精品三级$21#主播大秀$22#抖音视频$23#女神学生$24#国产乱伦$28",
+ "分类url": "https://suwdak.nysk3.quest/vodtype/{cateId}-{catePg}.html;;zm"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/好好J.json b/XBPQ/adult/好好J.json
new file mode 100644
index 0000000..1a72f46
--- /dev/null
+++ b/XBPQ/adult/好好J.json
@@ -0,0 +1,11 @@
+{
+ "图片": "src=\"&&\"",
+ "副标题": "video-item-badge\">&&<",
+ "链接": "href=\"&&\"",
+ "直接播放": "1",
+ "影片年代": "ms-auto\">&&",
+ "主演": "model-name mt-1\">&&<",
+ "分类url": "https://hohoj.tv/search?type={cateId}&p={catePg}&order={by}",
+ "排序": "最热门$popular#最新$latest#最多观看$views#最多好评$likes",
+ "分类": "有码$censored#中文字幕$chinese#无码$uncensored#欧美$europe"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/快乐爽片.json b/XBPQ/adult/快乐爽片.json
new file mode 100644
index 0000000..1134ff5
--- /dev/null
+++ b/XBPQ/adult/快乐爽片.json
@@ -0,0 +1,17 @@
+{
+ "发布地址": "www.gblw1.buzz",
+ "主页url": "https://www.gblw1.buzz",
+ "首页": "快乐五区",
+ "数组": "class=\"lazy&&",
+ "图片": "data-original=\"&&\"",
+ "标题": "html\">&&&&<",
+ "分类ID": "/index.php/vod/type/id/&&.html",
+ "分类url": "https://www.gblw1.buzz/index.php/vod/type/id/{cateId}/page/{catePg}.html;;z"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/成人重口.json b/XBPQ/adult/成人重口.json
new file mode 100644
index 0000000..22f540e
--- /dev/null
+++ b/XBPQ/adult/成人重口.json
@@ -0,0 +1,17 @@
+{
+ "发布地址": "1:https://www.crzk22.buzz/,2:www.crzk3.top",
+ "主页url": "https://luanlxsfbc-005.luanlxsf002.sbs",
+ "数组": " &&",
+ "简介": "description\" content=\"&&剧情",
+ "链接": "thumbnail\" href=\"&&",
+ "直接播放": "1",
+ "搜索模式": "1",
+ "搜索url": "https://luanlxsfbc-005.luanlxsf002.sbs/index.php/vod/search/page/{pg}/wd/{wd}.html",
+ "分类数组": "&&<",
+ "分类ID": "/index.php/vod/type/id/&&.html",
+ "分类url": "https://luanlxsfbc-005.luanlxsf002.sbs/index.php/vod/type/id/{cateId}/page/{catePg}.html;;z"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/撸GIAO.json b/XBPQ/adult/撸GIAO.json
new file mode 100644
index 0000000..080060c
--- /dev/null
+++ b/XBPQ/adult/撸GIAO.json
@@ -0,0 +1,15 @@
+{
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "防走丢": "http://djjmk.top/",
+ "二次截取": "class=\"row col5 clearfix&&class=\"wrap",
+ "数组": "&&",
+ "标题": "title=\"&&\"",
+ "图片": "data-src=\"&&\"",
+ "副标题": "&&",
+ "链接": "href=\"&&\"",
+ "跳转播放链接": "var player_*\"url\":\"&&\"",
+ "分类url": "https://xn--5f-ng7cy3vt8e.djjmk.icu/duoziyuan/{cateId}/index.php/vod/type/id/{class}/page/{catePg}.html",
+ "分类": "视频一区$yutu#视频二区$yaowuwu#视频三区$aosika#视频四区$laoya#视频五区$tantan",
+ "剧情": "精选$1#精品推荐$39#国产精品$40#剧情介绍$46#麻豆传媒$50#日韩$2#日本有码$51#日本无码$52#中文字幕$53#童颜巨乳$54#性感人妻$55#日本片商$56#国产$3#主播秀色$60#三级伦理$61#自拍偷拍$62#网曝系列$65#重口$4#强奸乱伦$66#欧美情色$67#卡通动漫$68#丝袜OL$69精选$1#麻豆传媒$20#AV解说$21#AI换脸$22#萝莉少女$23#女优明星$24#韩国主播$36#国产$2#美女主播$25#国产自拍$26#网红头条$27#网爆黑料$28#抖音视频$29#日韩$3#无码专区$30#三级伦理$31#中文字幕$32#熟女人妻$33#卡通动漫$34#美乳巨乳$35#重口$4#SM调教$37#欧美无码$38#女同性爱$39#多人群交$40#欧美系列$41#制服诱惑$42#强奸乱伦$43精选$1#映画传媒$20#精选独家$21#探花系列$22#成人动漫$23#AV解说$46#日韩$2#日本无码$24#日本有码$25#中文字幕$26#日本素人$27#高清名优$28#人妻熟女$29#口爆颜射$30#萝莉少女$31#貧乳小奶$32#国产$3#国产自拍$33#网红主播$34#三级伦理$35#短视频$36#AI换脸$37#重口$4#欧美精品$38#SM调教$39#女同性爱$40#多人群交$41#美乳巨乳$42#制服诱惑$43#丝袜美腿$44#翹臀美尻$45精选$1#明星换脸$20#中国传媒$21#AV解说$23#少女萝莉$32#海外明星$33#学生系列$46#日韩$2#日本无码$25#日本有码$26#中文字幕$27#成人动漫$28#巨乳美乳$29#熟女人妻$30#三级伦理$31#极品少妇$47#国产$3#自拍偷拍$34#主播直播$35#国产情色$36#网红黑料$37#重口$4#欧美性爱$40#强奸乱伦$41#变态另类$42#制服诱惑$43#女同性恋$45#人妖系列$48精选$1#精品推荐$20#少女萝莉$21#成人动漫$22#角色扮演$23#日韩$2#日本无码$24#日本有码$25#中文字幕$26#巨乳美乳$27#熟女人妻$28#三级伦理$29#国产$3#国产视频$30#主播直播$31#自拍偷拍$32#重口$4#强奸乱伦$33#欧美精品$34#制服丝袜$35#口交颜射$36"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/暗网色库.json b/XBPQ/adult/暗网色库.json
new file mode 100644
index 0000000..aab37e2
--- /dev/null
+++ b/XBPQ/adult/暗网色库.json
@@ -0,0 +1,16 @@
+{
+ "主页url": "https://awjqub.sbs/layout.html",
+ "数组": "class=\"myui-vodlist__box&&",
+ "图片": "style=\"background: url('&&')",
+ "标题": "title=\"&&\"",
+ "副标题": "",
+ "简介": "description\"*content=\"&&火热",
+ "链接": "href='&&'[替换:display>>detail]",
+ "搜索模式": "1",
+ "搜索url": "https://awjqub.sbs/lookup/{wd}/L/{pg}",
+ "分类二次截取": "",
+ "分类数组": "&&&&&&<",
+ "简介": "description\" content=\"&&\"",
+ "链接": "href=\"&&\"",
+ "跳转播放链接": "\"url\": \"&&\"",
+ "搜索模式": "1",
+ "搜索url": "/index.php?m=vod-search-wd-{wd}-pg-{pg}.htm",
+ "分类url": "/index.php?m=vod-type-id-{cateId}-pg-{catePg}.htm",
+ "分类": "国产视频$495#国产传媒$496#中文字幕$497#日本有码$498#日本无码$499#强奸乱伦$500#制服诱惑$501#激情欧美$502#成人动漫$503#抖阴视频$504#人妖伪娘$505#萝莉少女$1116#SM调教$1117#三级片$1118"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/百万破解AV.json b/XBPQ/adult/百万破解AV.json
new file mode 100644
index 0000000..836b96c
--- /dev/null
+++ b/XBPQ/adult/百万破解AV.json
@@ -0,0 +1,8 @@
+{
+ "数组": "",
+ "标题": "",
+ "图片": "
>接表组组表题#\">>接表组表题#$>>题接#\\#>>接表表题]",
+"播放数组": "组&&组",
+"播放列表": "表&&表",
+"播放标题": "题&&题",
+"播放链接": "接&&接",
+
+"直接播放": "0",
+"嗅探词": ".mp4#.m3u8",
+
+"搜索请求头": "User-Agent$MOBILE_UA",
+"搜索url": "/api.php/provide/search_result?video_name={wd}",
+"搜索模式": "1",
+"搜索二次截取": "search_result\":\\[&&\\]",
+"搜索数组": "{&&}",
+"搜索标题": "video_name\":\"&&\"",
+"搜索图片": "img\":\"&&\"",
+"搜索副标题": "\"\":\"&&\"",
+"搜索链接": "/api.php/provide/vod_detail?ac=vod_detail&id=+id\":&&,",
+
+"筛选": "1",
+"年份": "1949-2025",
+"年份值": "*",
+"排序": "为你推荐&最新&评分&最热",
+"排序值": "空&new&score&hits"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/色花堂.json b/XBPQ/adult/色花堂.json
new file mode 100644
index 0000000..30e4c23
--- /dev/null
+++ b/XBPQ/adult/色花堂.json
@@ -0,0 +1,20 @@
+{
+ "站名": "色花堂10",
+ "请求头": "手机",
+ "主页url": "https://15gaokk.com/",
+ "数组": "
&&",
+ "图片": "data-original=\"&&\"",
+ "标题": "
&&",
+ "链接": "href=\"&&\"",
+ "副标题": "
&&
",
+ "线路数组": "&&
[排序:线路一>线路二>线路三]",
+ "线路标题": ">&&",
+ "播放数组": "&&
",
+ "影片类型": "",
+ "简介": "
",
+ "播放列表": "
",
+ "播放标题": ">&&<",
+ "播放链接": "href=\"&&\"",
+ "分类": "国产精品$1#日韩亚洲$2#自拍偷拍$15#欧美激情$3#主播视频$16#中文字幕$4#日韩无码$11#动漫成人$14#推荐视频$5#同性人妖$6#欧美大片$7#原创国产$8#制服淫穴$9#换脸AI区$10#三级综艺$12#次元动漫$13",
+ "分类url": "https://15gaokk.com/typegaokk/{cateId}_{catePg}.html[https://15gaokk.com/typegaokk/{cateId}.html"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/花丛视频.json b/XBPQ/adult/花丛视频.json
new file mode 100644
index 0000000..13d304f
--- /dev/null
+++ b/XBPQ/adult/花丛视频.json
@@ -0,0 +1,5 @@
+{
+ "直接播放": "1",
+ "分类url": "https://hgpuqti89co63.xyz/index.php/vod/type/id/{cateId}/page/{catePg}.html",
+ "分类": "国产传媒$21#国产视频$22#日本有码$23#中文字幕$24#日本无码$27"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/蜜桃屯.json b/XBPQ/adult/蜜桃屯.json
new file mode 100644
index 0000000..a7635e8
--- /dev/null
+++ b/XBPQ/adult/蜜桃屯.json
@@ -0,0 +1,13 @@
+{
+ "二次截取": "class=\"block-post&&align=\"center",
+ "数组": "target=\"_blank&&class=\"info-post",
+ "图片": "data-src=\"&&\"",
+ "标题": "title=\"&&\"",
+ "链接": "href=\"&&\"",
+ "直接播放": "1",
+ "跳转播放链接": "var player_*\"url\":\"&&\"",
+ "搜索url": "https://www.okav13.mom/index.php/vod/search/page/{pg}/wd/{wd}.html",
+ "分类url": "https://www.okav13.mom/index.php/vod/show/by/{by}/id/{cateId}/page/{catePg}.html",
+ "分类": "国产AV$1#日韩AV$2#欧美AV$3#污污动漫$4#高清字幕$5#顶级主播$9#外流泄密$11#偷窥偷拍$14#传媒剧情$15#强奸乱伦$13#三级伦理$16",
+ "排序": "热门$hits#点赞$up#评分$score"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/超级福利社区.json b/XBPQ/adult/超级福利社区.json
new file mode 100644
index 0000000..2116d2f
--- /dev/null
+++ b/XBPQ/adult/超级福利社区.json
@@ -0,0 +1,17 @@
+{
+ "请求头": "手机",
+ "主页url": "https://chaojifuli729.top",
+ "数组": "fed-lazy&&&&&&<",
+ "简介": "description\" content=\"&&\"",
+ "链接": "href=\"&&\"[替换:detail>>play]",
+ "跳转播放链接": "var playUrl = '&&'",
+ "搜索模式": "1",
+ "搜索url": "https://chaojifuli729.top/search/{wd}/{pg}.html",
+ "分类数组": "
",
+ "分类标题": ">&&<",
+ "分类ID": "/list/&&.html",
+ "分类url": "https://chaojifuli729.top/list/{cateId}/{catePg}.html"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/金陵乐园.json b/XBPQ/adult/金陵乐园.json
new file mode 100644
index 0000000..a3ab47b
--- /dev/null
+++ b/XBPQ/adult/金陵乐园.json
@@ -0,0 +1,14 @@
+{
+ "主页url": "https://jud.jllth9.motorcycles/cn/home/web/",
+ "直接播放": "1",
+ "数组": "img\">&&*_self\">&&",
+ "副标题": "&&",
+ "链接": "href=\"&&\"[替换:play#.html>>/sid/1/nid/1.html]",
+ "搜索模式": "1",
+ "搜索url": "https://jud.jllth9.motorcycles/cn/home/web/index.php/vod/search/page/{pg}/wd/{wd}.html",
+ "分类url": "https://jud.jllth9.motorcycles/cn/home/web/index.php/vod/type/id/{cateId}/page/{catePg}.html",
+ "分类": "自拍偷拍&巨乳波霸&强奸乱伦&人妻熟女&制服丝袜&花季少女&无码露毛&群P多人&人兽人妖&男同女同&韩日专区&欧美色情&成人动漫&三级剧情",
+ "分类值": "20&21&22&23&24&25&26&27&28&29&30&31&32&33"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/麻豆区.json b/XBPQ/adult/麻豆区.json
new file mode 100644
index 0000000..bc917b8
--- /dev/null
+++ b/XBPQ/adult/麻豆区.json
@@ -0,0 +1,14 @@
+{
+ "首页": "0",
+ "搜索url": "https://madouqu9.xyz/?s={wd}",
+ "分类url": "https://madouqu9.xyz/{cateId}/page/{catePg}/",
+ "主演": "麻豆女郎:&& 下載地址:",
+ "分类": "麻豆传媒$modelmedia#国产传媒$gccm",
+ "数组": "class=\"col-lg-&&&&",
+ "链接": "href=\"&&\"",
+ "更新时间": "meta-date\">&&",
+ "播放链接": "
[包含:magnet]",
+ "嗅探词": "m3u8#.m3u8#.mp4#magnet"
+}
\ No newline at end of file
diff --git a/XBPQ/adult/黑料网.json b/XBPQ/adult/黑料网.json
new file mode 100644
index 0000000..c6e202c
--- /dev/null
+++ b/XBPQ/adult/黑料网.json
@@ -0,0 +1,15 @@
+{
+ "发布地址": "https://155.fun/(T)",
+ "短视频": "1",
+ "嗅探词": "https://hls.vdtuzv.com/videos3/#.m3u8?",
+ "数组": "",
+ "图片": "https://tuapi.eees.cc/api.php?category=meinv&type=302",
+ "标题": "title\" data-v-a51695bc>&& ",
+ "副标题": "ishot\" data-v-a51695bc>&&",
+ "链接": "href=\"/archives/&&.html\"",
+ "链接前缀": "https://93rot.bkrbful.xyz/archives/",
+ "链接后缀": ".html",
+ "分类url": "https://93rot.bkrbful.xyz/category/{cateId}/{catePg}.html",
+ "分类": "独家爆料&网红明星&反差女友&校园政坛&性爱课堂&奇闻异事&热点吃瓜&今日黑料&经典大瓜&黑料历史&每日热瓜",
+ "分类值": "9&1&4&2&12&3&7&6&8&10&11"
+}
\ No newline at end of file
diff --git a/XBPQ/free影视.json b/XBPQ/free影视.json
new file mode 100644
index 0000000..5c4ed83
--- /dev/null
+++ b/XBPQ/free影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "剧情介绍:+module-info-introduction-content\">&&",
+ "分类url": "https://freeok.one/type/{cateId}.html",
+ "分类": "短剧$shuangju#电视剧$juji#电影$dianying#动漫$dongman#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/一帆影视.json b/XBPQ/一帆影视.json
new file mode 100644
index 0000000..0b8e016
--- /dev/null
+++ b/XBPQ/一帆影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "details-content-all\">&&",
+ "分类url": "https://aiyf.pages.dev/ayf_show/{cateId}-{area}--{class}-----{catePg}---{year}.html;;d0",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/七七影视.json b/XBPQ/七七影视.json
new file mode 100644
index 0000000..89036cc
--- /dev/null
+++ b/XBPQ/七七影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "detail-content\" style=*>&&",
+ "分类url": "https://www.sheyit.com/show/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$33#电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/七点影视.json b/XBPQ/七点影视.json
new file mode 100644
index 0000000..f0bf629
--- /dev/null
+++ b/XBPQ/七点影视.json
@@ -0,0 +1,7 @@
+{
+ "简介": "+pt-10 pb-10\" style=*>&&",
+ "图片": "data-original=\"&&\"[替换:amp;>>空]",
+ "线路数组": "#ewave-playlist-&&",
+ "分类url": "https://www.7.movie/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$24#电视剧$21#电影$20#动漫$22#综艺$23"
+}
\ No newline at end of file
diff --git a/XBPQ/三九影视.json b/XBPQ/三九影视.json
new file mode 100644
index 0000000..d684645
--- /dev/null
+++ b/XBPQ/三九影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "+detail-content\" style=*>&&",
+ "分类url": "https://www.539539.xyz/index.php/vod/show/area/{area}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "短剧$25#电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/三四影视.json b/XBPQ/三四影视.json
new file mode 100644
index 0000000..ebc8d96
--- /dev/null
+++ b/XBPQ/三四影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "+col-pd\">&&",
+ "分类url": "https://ikan234.com/show/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$5#电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/人人影视.json b/XBPQ/人人影视.json
new file mode 100644
index 0000000..0bec50e
--- /dev/null
+++ b/XBPQ/人人影视.json
@@ -0,0 +1,6 @@
+{
+ "请求头": "手机",
+ "简介": "剧情介绍:+module-info-introduction-content\">&&",
+ "分类url": "https://svip1.fun/index.php/vod/show/area/{area}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "短剧$24#电视剧$21#电影$20#动漫$22#动漫电影$25#综艺$23"
+}
\ No newline at end of file
diff --git a/XBPQ/修罗影视.json b/XBPQ/修罗影视.json
new file mode 100644
index 0000000..b94ea54
--- /dev/null
+++ b/XBPQ/修罗影视.json
@@ -0,0 +1,15 @@
+{
+ "数组": "card card-sm card-link&&",
+ "图片": "src=\"&&\"",
+ "标题": "mb-0 card-title text-truncate\">&&<",
+ "副标题": "start-0 text-red-fg\">&&",
+ "链接": "href=\"&&\"",
+ "简介": "剧情简介:&&\"",
+ "线路标题": "磁力+>&&<",
+ "播放数组": "download-list&&",
+ "播放列表": "",
+ "播放标题": "text-muted\">&&[不包含:网盘下载]",
+ "跳转播放链接": "href=\"&&\"",
+ "分类url": "https://v.xlys.ltd.ua/s/all/{catePg}?type={cateId};;d0",
+ "分类": "电影$0#电视剧$1"
+}
\ No newline at end of file
diff --git a/XBPQ/兄弟影院.json b/XBPQ/兄弟影院.json
new file mode 100644
index 0000000..1fdb0e9
--- /dev/null
+++ b/XBPQ/兄弟影院.json
@@ -0,0 +1,9 @@
+{
+ "简介": "text cor3\">&&",
+ "图片": "data-src=\"&&\"",
+ "副标题": "public-list-prb hide ft2\">&&",
+ "线路数组": "&&",
+ "播放数组": "anthology-list-play&&",
+ "分类url": "https://www.brovods.top/show/{cateId}-{area}--{class}-----{catePg}---{year}/",
+ "分类": "电视剧$tv#电影$movie#动漫$cartoon#综艺$show#纪录片$documentary"
+}
\ No newline at end of file
diff --git a/XBPQ/光影迷.json b/XBPQ/光影迷.json
new file mode 100644
index 0000000..d961e08
--- /dev/null
+++ b/XBPQ/光影迷.json
@@ -0,0 +1,6 @@
+{
+ "简介": "&&
",
+ "副标题": "tag text-overflow\">&&<",
+ "分类url": "https://www.guangyingmi.com/pianku-{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "电影$dianying#电视剧$dianshiju#动漫$dongman#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/免费影视.json b/XBPQ/免费影视.json
new file mode 100644
index 0000000..68b8b00
--- /dev/null
+++ b/XBPQ/免费影视.json
@@ -0,0 +1,20 @@
+{
+ "站名": "",
+ "主页url": "https://www.freeok.ac",
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "数组": "",
+ "图片": "data-original=\"&&\"",
+ "标题": "",
+ "链接": "href=\"&&\"",
+ "副标题": "",
+ "线路数组": "module-tab-item&&",
+ "线路标题": "data-dropdown-value=\"&&\"",
+ "播放数组": "",
+ "播放标题": "",
+ "跳转播放链接": "urlDecode(Base64(var player_*url\":\"&&\"))",
+ "搜索url": "https://www.freeok.ac/vodsearch/-------------.html?wd={wd}",
+ "简介": "&&
",
+ "分类url": "https://www.freeok.ac/type/{cateId}.html",
+ "分类": "电影$1#连续剧$2#动漫$3#综艺$4#短剧$24"
+}
\ No newline at end of file
diff --git a/XBPQ/全网看.json b/XBPQ/全网看.json
new file mode 100644
index 0000000..cd78f2a
--- /dev/null
+++ b/XBPQ/全网看.json
@@ -0,0 +1,7 @@
+{
+ "请求头": "手机",
+ "数组": "stui-vodlist__thumb lazyload&&",
+ "简介": "剧情介绍:+stui-content__desc col-pd clearfix\">&&",
+ "分类url": "https://www.qkw1.com/qkwshow/{cateId}-{area}-{by}-{class}-----{catePg}---{year}.html",
+ "分类": "短剧$duanju#电视剧$tv#电影$dy#动漫$dm#综艺$zy"
+}
\ No newline at end of file
diff --git a/XBPQ/全网看CC.json b/XBPQ/全网看CC.json
new file mode 100644
index 0000000..fa5db1f
--- /dev/null
+++ b/XBPQ/全网看CC.json
@@ -0,0 +1,6 @@
+{
+ "请求头": "手机",
+ "简介": "剧情介绍:+stui-content__desc col-pd clearfix\">&&",
+ "分类url": "https://www.qkwaa.com/vod/show/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$28#电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/养生堂.json b/XBPQ/养生堂.json
new file mode 100644
index 0000000..4519779
--- /dev/null
+++ b/XBPQ/养生堂.json
@@ -0,0 +1,17 @@
+{
+ "首页": "0",
+ // "播放链接前缀": "https://www.gyf.lol",
+ "数组": "data\":{\"tit&&s_log",
+ "图片": "https://p0.ssl.cdn.btime.com/+ssl.cdn.btime.com/&&\"",
+ "副标题": "le\":\"&&\"",
+ "标题": "text\":\"&&\"",
+ "链接": "item.btime.com/&&\"",
+ "链接前缀": "https://app.api.btime.com/video/play?callback=jQuery36009651202523243325_1747927554988&id=",
+ // "线路标题": "&&",
+ "线路数组": "javascript:;\">&&",
+ "数组": "&&",
+ "图片": "data-echo=\"&&\"",
+ "分类url": "https://vip.wwgz.cn:5200/vod-list-id-{cateId}-pg-{catePg}-order--by-time-class-0-year-{year}-letter--area-{area}-lang-.html",
+ "分类": "电影$1#电视剧$2#动漫$4#综艺$3#短剧$26"
+}
\ No newline at end of file
diff --git a/XBPQ/剧白白.json b/XBPQ/剧白白.json
new file mode 100644
index 0000000..647438d
--- /dev/null
+++ b/XBPQ/剧白白.json
@@ -0,0 +1,5 @@
+{
+ "简介": "detail-content\" style=*>&&",
+ "分类url": "https://cnsende.com/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$36#电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/卡卡动漫.json b/XBPQ/卡卡动漫.json
new file mode 100644
index 0000000..5a2c292
--- /dev/null
+++ b/XBPQ/卡卡动漫.json
@@ -0,0 +1,5 @@
+{
+ "简介": "简介:&&",
+ "分类url": "https://mkaka.china-mt.org/{cateId}/index_{catePg}.html[https://mkaka.china-mt.org/{cateId}/index.html]",
+ "分类": "国漫$china#日漫$japan#韩剧$korea#日剧$riju#福利$lunlipian"
+}
\ No newline at end of file
diff --git a/XBPQ/可可影视.json b/XBPQ/可可影视.json
new file mode 100644
index 0000000..91ffcb4
--- /dev/null
+++ b/XBPQ/可可影视.json
@@ -0,0 +1,11 @@
+{
+ "请求头": "手机",
+ "简介": "剧情介绍:+&&
",
+ "图片": "data-original=\"/vod&&\"[替换:1/vod>> https://vres.xzzsw.com/vod1/vod]",
+ "标题": "v-item-title\">&&",
+ "副标题": "v-item-bottom\">&&",
+ "线路数组": "swiper-slide source-swiper-slide&&[不包含:4K(高峰不卡)]",
+ "播放数组": "episode-list&&",
+ "分类url": "https://www.kkys03.com/show/{cateId}-{class}-{area}--{year}-3-{catePg}.html",
+ "分类": "短剧$6#电视剧$2#电影$1#动漫$3#综艺$4"
+}
\ No newline at end of file
diff --git a/XBPQ/哆啦新番社.json b/XBPQ/哆啦新番社.json
new file mode 100644
index 0000000..6617766
--- /dev/null
+++ b/XBPQ/哆啦新番社.json
@@ -0,0 +1,18 @@
+{
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "主页url": "https://dora.xiaoxinbk.com/",
+ "数组": "class=\"card-img-bili\"&&",
+ "标题": "alt=\"&&\"",
+ "图片": "data-url=\"&&\"",
+ "链接": "href=\"&&\"",
+ "线路标题": "mt-0\">&&",
+ "播放数组": "class=\"card-body button-list\"&&",
+ "播放列表": "",
+ "播放链接": "href=\"&&\"",
+ "播放标题": ">&&",
+ "分类url": "https://www.dora-video.cn/search/sy/?niandai={year}&cat={class}&tag={cateId}&gaojijiansuo=1&zhuangtai={by}",
+ "分类": "全部$0#动画$20#剧场版$21#MV$22#预告片$23#直播$27#同人$25#民间$26#目录$28#中篇$30",
+ "剧情": "哆啦A梦新番$1#新哆啦A梦 台湾配音$2#哆啦A梦剧场版$3#最新预告片$6#剧场版集合$7#哆啦A梦MV$4#哆啦A梦七小子集合$11#生日特别篇$19#哆啦A梦生日特别篇$12#哆啦美生日特别篇$14#大雄生日特别篇$13#静香生日特别篇$15#胖虎生日特别篇$16#小夫生日特别篇$17#24小时直播$24#哆啦A梦中篇附映$29#哆啦A梦大山版 - 哆啦a梦1979版国语旧版 - 旧版$31#哆啦A梦大山修复&补档区$32#藤子·F·不二雄【其它动画】$33",
+ "排序": "全部$0#完结$2#连载$1#待定$-1"
+}
\ No newline at end of file
diff --git a/XBPQ/喝茶影视.json b/XBPQ/喝茶影视.json
new file mode 100644
index 0000000..9fb59aa
--- /dev/null
+++ b/XBPQ/喝茶影视.json
@@ -0,0 +1,7 @@
+{
+ "播放标题":">&&",
+ "分类url": " https://www.shpdchysc.com/vodshow/{cateId}-{area}-{by}-{class}-----{catePg}---{year}.html",
+ "分类": "电视剧&电影&综艺&动漫&动画片&短剧&香港电影",
+ "分类值": "2&1&3&4&36&40&37",
+ "链接": "href=\"&&\">[包含:voddetail]"
+}
\ No newline at end of file
diff --git a/XBPQ/天堂影视.json b/XBPQ/天堂影视.json
new file mode 100644
index 0000000..de1857f
--- /dev/null
+++ b/XBPQ/天堂影视.json
@@ -0,0 +1,7 @@
+{
+ "请求头": "手机",
+ "简介": "剧情介绍:+detail-content\" style=*>&&",
+ "副标题": "pic-text text-right\">&&",
+ "分类url": "https://www.tvdy.xyz/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "电视剧$dianshiju#电影$dianying#动漫$dongman#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/奇迹影视.json b/XBPQ/奇迹影视.json
new file mode 100644
index 0000000..40b7213
--- /dev/null
+++ b/XBPQ/奇迹影视.json
@@ -0,0 +1,14 @@
+{
+ "二次截取": "module-items\"&&id=\"page[替换:module-footer>>module-item]",
+ "数组": "module-item-pic&&module-item module-item-go w16",
+ "图片": "data-src=\"&&\"",
+ "副标题": "module-item-text\">&&",
+ "线路数组": "data-dropdown-value=&&",
+ "线路标题": "&&[替换:>>【]+集】",
+ "播放数组": "scroll-content&&",
+ "播放列表": "",
+ "播放标题": "&&",
+ "简介": "剧情:&&",
+ "分类url": "https://qiji.91by.top/index.php/vod/show/area/{area}/by/{by}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "电影$1#电视剧$2#综艺$3#动漫$4#短剧$20"
+}
\ No newline at end of file
diff --git a/XBPQ/小枫影视.json b/XBPQ/小枫影视.json
new file mode 100644
index 0000000..f4a87e0
--- /dev/null
+++ b/XBPQ/小枫影视.json
@@ -0,0 +1,10 @@
+{
+ "二次截取": "module-items\"&&id=\"page[替换:module-footer>>module-item]",
+ "数组": "module-item-pic&&module-item\"",
+ "图片": "data-src=\"&&\"",
+ "副标题": "module-item-text\">&&",
+ "线路数组": "data-dropdown-value=&&[不包含:视频]",
+ "线路标题": "&&[替换:>>【]+集】",
+ "分类url": "https://kekedy.icu/vodshow/{cateId}-{area}-{by}-{class}-{lang}----{catePg}---{year}.html",
+ "分类": "电影$1#国产剧$2#综艺$6#短剧$8#纪录片$9"
+}
\ No newline at end of file
diff --git a/XBPQ/小鸭看看.json b/XBPQ/小鸭看看.json
new file mode 100644
index 0000000..7977f94
--- /dev/null
+++ b/XBPQ/小鸭看看.json
@@ -0,0 +1,15 @@
+{
+ "简介": "info\">简介:&&",
+ "链接": "href=\"&&\">",
+ "图片": "data-src=\"&&\"",
+ "标题": "alt=\"&&\">",
+ "副标题": "tag1\">&&",
+ "线路二次截取": "var pp=&&[替换:var pp=>>][替换:;>>]",
+ "线路数组": "j:lines",
+ "线路标题": "j:lines[*][1][工具:unicode]",
+ "播放数组": "j:lines[*][3]",
+ "播放链接": "j:lines[{{播放序号}}][3][*]",
+ "播放数量": "j:lines[*][2]",
+ "分类url": "https://xiaoyakankan.com/cat/{cateId}-{catePg}.html;;!",
+ "分类": "电影$10#电视剧$11#动漫$13#综艺$12#福利$15"
+}
\ No newline at end of file
diff --git a/XBPQ/尼玛影视.json b/XBPQ/尼玛影视.json
new file mode 100644
index 0000000..85c1910
--- /dev/null
+++ b/XBPQ/尼玛影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "hl-content-text\">&&",
+ "分类url": "http://nm.xxxc137.top/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$25#电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/布卡影视.json b/XBPQ/布卡影视.json
new file mode 100644
index 0000000..c23ad43
--- /dev/null
+++ b/XBPQ/布卡影视.json
@@ -0,0 +1,9 @@
+{
+ "简介": "description\" content=\"*讲述的是:&&\"",
+ "主页url": "https://www.gtrgt.com/label/new.html",
+ "线路数组": "#ewave-playlist-1\"&&",
+ "播放数组": "ewave-playlist-1\">&&",
+ "副标题": "&&
&&",
+ "简介": "◎简*介&&",
+ "导演": "◎导*演&&
",
+ "主演": "◎主*演&&
",
+ "影片年代": "◎上映 日期&&
",
+ "影片地区": "◎产*地&&
",
+ "线路数组": "entry-content u-text-format u-clearfix&&\"content-template",
+ "线路标题": "磁力",
+ "播放数组": "entry-content u-text-format u-clearfix&&\"content-template",
+ "播放列表": "&&
[包含:magnet]",
+ "播放链接": "*\">&&",
+ "分类": "4K蓝光原盘$4kuhd#4K电影$4kmovie#4K剧集$4ktv#杜比视界$dolbyvision#4K纪录片$4kdocu#4K演示片$4kdemo/4ktv-4ktv#杜比演示片$4kdemo/dolbydomo#DTS演示片$4kdemo/dtsdomo#8K演示片$4kdemo/8kdemo#蓝光电影$bluraymovie/bluray-movie#蓝光剧集$bluraymovie/bluraytv#3D蓝光$bluraymovie/3dmovie#蓝光纪录片$bluraymovie/documentary#蓝光演唱会$bluraymovie/audiotest",
+ "分类url": "https://www.bugutv.org/{cateId}/page/{catePg}"
+}
\ No newline at end of file
diff --git a/XBPQ/影视森林.json b/XBPQ/影视森林.json
new file mode 100644
index 0000000..fa9d9e1
--- /dev/null
+++ b/XBPQ/影视森林.json
@@ -0,0 +1,6 @@
+{
+ "简介": "pt-10 pb-10\" style=*>&&",
+ "线路数组": "#ewave-playlist&&",
+ "分类url": "https://www.imtlink.com/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$remenduanju#电视剧$dsj#电影$dianying#动漫$dongman#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/快点播.json b/XBPQ/快点播.json
new file mode 100644
index 0000000..6b6e08e
--- /dev/null
+++ b/XBPQ/快点播.json
@@ -0,0 +1,5 @@
+{
+ "简介": "detail-content\" style=*>&&",
+ "分类url": "https://www.quickvod.cc/type/{cateId}.html",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/悟空影视.json b/XBPQ/悟空影视.json
new file mode 100644
index 0000000..f67dbd7
--- /dev/null
+++ b/XBPQ/悟空影视.json
@@ -0,0 +1,6 @@
+{
+ "简介": "col-pd\">&&<",
+ "分类": "电影$dianying#电视剧$juji#短剧$duanju#动漫$dongman#综艺$zongyi",
+ "分类url": "https://wkvod.cc/vodshow/id/{cateId}/page/{catePg}.html[https://www.wkvod.cc/type/{cateId}-{catePg}.html]",
+ "类型": "动作片$dongzuopian#喜剧片$xijupian#爱情片$aiqingpian#科幻片$kehuanpian#恐怖片$kongbupian#剧情片$juqingpian#战争片$zhanzhengpian#记录片$jilupian#动画片$donghuapian||国产剧$guochanju#港台剧$gangtaiju#日韩剧$rihanju#欧美剧$oumeiju#泰国剧$taiguoju||空||空||空"
+}
\ No newline at end of file
diff --git a/XBPQ/明星影视.json b/XBPQ/明星影视.json
new file mode 100644
index 0000000..b1c548d
--- /dev/null
+++ b/XBPQ/明星影视.json
@@ -0,0 +1,51 @@
+{
+ "作者": "",
+ "站名": "明星影院",
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "图片代理": "",
+ "直接播放": "0",
+ "播放请求头": "",
+ "过滤词": "",
+ "主页url": "https://mxvod.com",
+ "首页": "120",
+ "起始页": "1",
+ "分类url": "https://mxvod.com/vodshow/{cateId}-{area}-{by}-{class}-{lang}-{letter}---{catePg}---{year}.html",
+ "分类": "电影$dianying#电视剧$dianshiju#综艺$zongyi#动漫$dongman#短剧$duanju",
+ "二次截取": "module-items\"&&id=\"page",
+ "数组": "lazyloaded&&/a>",
+ "标题": "title=*>&&<",
+ "图片": "data-src=\"&&\"",
+ "副标题": "class\">&&<",
+ "链接": "href=\"&&\"[替换:vodplay>>voddetail#-1-1.html>>.html]",
+ "影片年代": "-----------*.html\">&&",
+ "影片地区": "video:area\" content=\"&&\"",
+ "影片类型": "video:class\" content=\"&&\"",
+ "状态": "tag-link-red\">&&",
+ "导演": "导演:&&",
+ "主演": "主演:&&\">",
+ "简介": "vod_content\"&&",
+ "线路数组": "data-dropdown&&/small>",
+ "线路标题": "value=\"&&\"+【共+&&<+集】",
+ "播放数组": "id=\"sort-item&&",
+ "播放列表": "",
+ "播放标题": "&&<",
+ "播放链接": "href=\"&&\"",
+ "跳转播放链接": "var player_*\"url\":\"&&\"",
+ "搜索请求头": "User-Agent$MOBILE_UA",
+ "搜索url": "https://mxvod.com/vodsearch/{wd}----------{pg}---.html",
+ "搜索模式": "1",
+ "搜索数组": "lazyload\"&&/a>",
+ "搜索标题": "title=\"&&\"",
+ "搜索图片": "data-src=\"&&\"",
+ "搜索副标题": "title=*>&&<",
+ "搜索链接": "href=\"&&\"",
+ "筛选": "1",
+ "类型": "动作片$dongzuopian#喜剧片$xijupian#爱情片$aiqingpian#科幻片$kehuanpian#恐怖片$kongbupian#战争片$zhanzhengpian#剧情片$juqingpian#动画片$donghuapian#悬疑片$xuanyi#纪录片$jilupian#奇幻片$qihuanpian#灾难片$zainanpian||国产剧$guochanju#欧美剧$oumeiju#日剧$riju#韩剧$hanju#港台剧$gangtai#海外剧$haiwai||大陆综艺$daluzongyi#港台综艺$gangtaizongyi#欧美综艺$oumeizongyi#日韩综艺$rihanzongyi#海外综艺$haiwaizongyi||国产动漫$guochandongman#日韩动漫$rihandongman#欧美动漫$oumeidongman#海外动漫$haiwaidongman||空",
+ "剧情": "喜剧&爱情&恐怖&动作&科幻&剧情&战争&警匪&犯罪&动画&奇幻&武侠&冒险&枪战&恐怖&悬疑&惊悚&经典&青春&文艺&微电影&古装&历史&运动&农村&儿童&网络电影||古装&战争&青春偶像&喜剧&家庭&犯罪&动作&奇幻&剧情&历史&经典&乡村&情景&商战&网剧&其他||选秀&情感&访谈&播报&旅游&音乐&美食&纪实&曲艺&生活&游戏互动&财经&求职||情感&科幻&热血&推理&搞笑&冒险&萝莉&校园&动作&机战&运动&战争&少年&少女&社会&原创&亲子&益智&励志&其他||古装&虐恋&逆袭&神豪&重生&复仇&穿越&甜宠&强者&萌宝&其它",
+ "年份": "1990-2025",
+ "年份值": "*",
+ "语言值": "*",
+ "排序": "时间&人气&评分",
+ "排序值": "time&hits&score"
+}
\ No newline at end of file
diff --git a/XBPQ/明月影院.json b/XBPQ/明月影院.json
new file mode 100644
index 0000000..83f0607
--- /dev/null
+++ b/XBPQ/明月影院.json
@@ -0,0 +1,6 @@
+{
+ "简介": "&&",
+ "副标题": "module-item-text\">&&<",
+ "分类url": "https://cnotv.com/vodshow/{cateId}-{area}-{by}-{class}-----{catePg}---{year}.html",
+ "分类": "电影$1#电视剧$2#动漫$4#短剧$51#综艺$3#体育$5"
+}
\ No newline at end of file
diff --git a/XBPQ/星辰影院.json b/XBPQ/星辰影院.json
new file mode 100644
index 0000000..a4beaed
--- /dev/null
+++ b/XBPQ/星辰影院.json
@@ -0,0 +1,8 @@
+{
+ "请求头": "手机",
+ "简介": "剧情:&&",
+ "副标题": "note text-bg-r\">&&",
+ "图片": "data-original=\"&&\"",
+ "分类url": "http://www.tjlvb.com/vodlist/{cateId}______{catePg}.html[http://www.tjlvb.com/vodlist/{cateId}_____.html]",
+ "分类": "短剧$duanju#国产剧$guocanju#香港剧$xianggangju#欧美剧$oumeiju#日本剧$ribenju#海外剧$haiwaiju#台湾剧$taiwanju#韩国剧$hanguoju#泰国剧$taiguoju#动作片$dongzuopian#喜剧片$xijupian#爱情片$aiqingpian#科幻片$kehuanpian#恐怖片$kongbupian#战争片$zhanzhengpian#剧情片$juqingpian#动漫$dongman#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/来看点播.json b/XBPQ/来看点播.json
new file mode 100644
index 0000000..d29106f
--- /dev/null
+++ b/XBPQ/来看点播.json
@@ -0,0 +1,11 @@
+{
+ "简介": "check selected\">&&",
+ "数组": "public-list-div public-list-bj\">&&",
+ "图片": "data-src=\"&&\"",
+ "标题": "time-title hide ft4\" href=*>&&",
+ "副标题": "public-list-subtitle cor5 hide ft2\">&&",
+ "线路数组": "",
+ "播放数组": "anthology-list-play&&",
+ "分类url": "https://lkvod.me/show/{cateId}-{area}-{by}-{class}-----{catePg}---{year}.html",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/樱花动漫.json b/XBPQ/樱花动漫.json
new file mode 100644
index 0000000..317804e
--- /dev/null
+++ b/XBPQ/樱花动漫.json
@@ -0,0 +1,6 @@
+{
+ "简介": "",
+ "数组": "",
+ "分类url": "http://www.yinghuadm.cn/show_{cateId}--{by}-{class}-----{catePg}---{year}.html",
+ "分类": "日本动漫$ribendongman#国产动漫$guochandongman#动漫电影$dongmandianying#欧美动漫$oumeidongman"
+}
\ No newline at end of file
diff --git a/XBPQ/樱花影视.json b/XBPQ/樱花影视.json
new file mode 100644
index 0000000..2ca5775
--- /dev/null
+++ b/XBPQ/樱花影视.json
@@ -0,0 +1,5 @@
+{
+ "线路标题": "&&
",
+ "分类url": "https://yinghuadm.fun/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "电影$1#剧集$2#综艺$3#动漫$4"
+}
\ No newline at end of file
diff --git a/XBPQ/欧乐影视.json b/XBPQ/欧乐影视.json
new file mode 100644
index 0000000..4a32637
--- /dev/null
+++ b/XBPQ/欧乐影视.json
@@ -0,0 +1,10 @@
+{
+ "简介": "fed-part-both fed-text-muted\">&&",
+ "数组": "fed-col-xs4 fed-col-sm3 fed-col-md2\">&&",
+ "图片": "data-original=\"&&\"",
+ "标题": "fed-part-eone\" href=*>&&",
+ "副标题": "fed-text-center\">&&",
+ "线路数组": "fed-btns-info fed-rims-info fed-part-eone&&",
+ "分类url": "https://www.ifuntv.cc/f/area/{area}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "短剧$32#电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/毒舌影视.json b/XBPQ/毒舌影视.json
new file mode 100644
index 0000000..d8a8472
--- /dev/null
+++ b/XBPQ/毒舌影视.json
@@ -0,0 +1,13 @@
+{
+ "数组": "/detail&&",
+ "标题": "*title\">&&<",
+ "简介": "detail-desc\">&&",
+ "图片":"https://vres.wxwoq.com/vod1/vod/cover/+vod1/vod/cover/&&\"",
+ "链接": "/detail/+/&&\"",
+ "导演": "导演:&&",
+ "演员": "演员:&&",
+ "线路数组": "source-item-label&&",
+ "播放数组": "episode-list&&",
+ "分类url": "https://www.dushe03.com/show/{cateId}-{area}-{class}--{year}-2-{catePg}.html;;d0",
+ "分类": "电视剧$2#电影$1#动漫$3#综艺$4#短剧$6"
+}
diff --git a/XBPQ/永乐影视.json b/XBPQ/永乐影视.json
new file mode 100644
index 0000000..44f7706
--- /dev/null
+++ b/XBPQ/永乐影视.json
@@ -0,0 +1,7 @@
+{
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "分类": "电影$1#电视剧$2#综艺$3#动漫$4",
+ "类型": "动作片$6#喜剧片$7#爱情片$8#科幻片$9#奇幻片$10#恐怖片$11#剧情片$12#战争片$20#动画片$26#悬疑片$22#冒险片$23#犯罪片$24#惊悚片$45#歌舞片$46#灾难片$47#网络片$48||国产剧$13#港台剧$14#日剧$15#韩剧$33#欧美剧$16#泰剧$34#新马剧$35#其他剧$25||内地综艺$27#港台综艺$28#日本综艺$29#韩国综艺$36#欧美综艺$30#新马泰综艺$37#其他综艺$38||国产动漫$31#日本动漫$32#韩国动漫$39#港台动漫$40#新马泰动漫$41#欧美动漫$42#其他动漫$43",
+ "分类url": "https://www.ylys.tv/vodshow/{cateId}-{area}-{by}-{class}-{lang}-{letter}---{catePg}---{year}.html"
+}
\ No newline at end of file
diff --git a/XBPQ/泥视频.json b/XBPQ/泥视频.json
new file mode 100644
index 0000000..5624382
--- /dev/null
+++ b/XBPQ/泥视频.json
@@ -0,0 +1,7 @@
+{
+ "简介": "&&
",
+ "副标题": "module-item-note\">&&<",
+ "主页url": "https://www.nivod.vip/label/new/",
+ "分类url": "https://www.nivod.vip/k/{cateId}-{area}-{by}--{lang}----{catePg}---{year}/",
+ "分类": "电影$1#电视剧$2#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/流光影视.json b/XBPQ/流光影视.json
new file mode 100644
index 0000000..b746ecf
--- /dev/null
+++ b/XBPQ/流光影视.json
@@ -0,0 +1,8 @@
+{
+ "简介": "wrapper_more_text\">&&
",
+ "副标题": "tag text-overflow\">&&",
+ "导演": "导演:&&",
+ "主演": "主演:&&",
+ "分类url": "https://www.lgys.xyz/index.php/vod/show/area/{area}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "短剧$20#电视剧$2#电影$1#动漫$4#综艺$3#少儿$21"
+}
\ No newline at end of file
diff --git a/XBPQ/海纳影视.json b/XBPQ/海纳影视.json
new file mode 100644
index 0000000..4b90fec
--- /dev/null
+++ b/XBPQ/海纳影视.json
@@ -0,0 +1,6 @@
+{
+ "简介": "简介:&&",
+ "数组": "row\">&&",
+ "分类url": "https://www.hainatv.net/index.php/vod/show/area/{area}/id/{cateId}/lang/{lang}/page/{catePg}/year/{year}.html",
+ "分类": "电影$1#电视剧$2#综艺$3#动漫$4#短剧$59"
+}
\ No newline at end of file
diff --git a/XBPQ/灵犀影视.json b/XBPQ/灵犀影视.json
new file mode 100644
index 0000000..54e62c6
--- /dev/null
+++ b/XBPQ/灵犀影视.json
@@ -0,0 +1,9 @@
+{
+ "简介": "description\" content=\"&&\"",
+ "图片": "data-src=\"&&\"",
+ "副标题": "hide ft2\">&&<",
+ "线路标题": "class=\"fa ds-dianying\"> &&<",
+ "播放数组":"anthology-list-play size\">&&",
+ "分类url": "https://www.lxyingshi.com/index.php/vod/show/area/{area}/by/{by}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "短剧$38#电影$1#电视剧$2#动漫$4#综艺$3#体育$62"
+}
\ No newline at end of file
diff --git a/XBPQ/热播之家.json b/XBPQ/热播之家.json
new file mode 100644
index 0000000..3c07410
--- /dev/null
+++ b/XBPQ/热播之家.json
@@ -0,0 +1,14 @@
+{
+ "简介": "简介:&&",
+ "数组": "myui-vodbox-content&&",
+ "副标题": "评分:+class=\"score\">&&<",
+ "标题": "title\">&&<",
+ "图片": "src=\"&&\"",
+ "线路数组": "#playlist&&",
+ "链接": "href=\"/index.php/vod/detail/&&.html\"",
+ "链接前缀": "https://www.rebovod.com/index.php/vod/play/",
+ "链接后缀": "/sid/1/nid/1.html",
+ "搜索url": "https://www.rebovod.com/index.php/vod/search.html?wd={wd}",
+ "分类url": "https://www.rebovod.com/index.php/vod/show/area/{area}/by/{by}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "电影$1#电视剧$2#综艺$3#动漫$4#短剧$5"
+}
diff --git a/XBPQ/熊猫影视.json b/XBPQ/熊猫影视.json
new file mode 100644
index 0000000..e7f1794
--- /dev/null
+++ b/XBPQ/熊猫影视.json
@@ -0,0 +1,9 @@
+{
+ "请求头":"手机",
+ "简介":"剧情介绍:+description\">&&",
+ "数组":"module-item\">&&module-item-text",
+ "图片":"data-src=\"&&\"",
+ "副标题":"video-class\">&&",
+ "分类url":"https://xmys1.com/index.php/vod/show/area/{area}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类":"短剧$99#电视剧$79#电影$61#动漫$93#综艺$88"
+}
\ No newline at end of file
diff --git a/XBPQ/爱壹帆.json b/XBPQ/爱壹帆.json
new file mode 100644
index 0000000..e5dfb10
--- /dev/null
+++ b/XBPQ/爱壹帆.json
@@ -0,0 +1,7 @@
+{
+ "简介":"讲述&&<",
+ "主页url": "https://www.iyf.lv/label/hot/",
+ "分类url":"https://www.iyf.lv/k/{cateId}-{area}-{by}-{class}-----{catePg}---{year}.html",
+ "分类":"电影$1#电视剧$2#动漫$4#综艺$3",
+ "副标题": "class=\"module-item-note\">&&<"
+}
\ No newline at end of file
diff --git a/XBPQ/爱我短剧.json b/XBPQ/爱我短剧.json
new file mode 100644
index 0000000..cf3bd09
--- /dev/null
+++ b/XBPQ/爱我短剧.json
@@ -0,0 +1,25 @@
+{
+ "请求头": "User-Agent$MOBILE_UA",
+ "主页url": "https://www.aiwodj.com",
+ "数组": "lazy lazyloaded&&>]",
+ "图片": "data-src=\"&&\"",
+ "标题": "title=\"&&\"",
+ "链接": "href=\"&&\"",
+ "副标题": "+module-item-caption\">&&&&",
+ "播放列表": "",
+ "播放标题": "span>&&",
+ "播放链接": "href=\"&&\"",
+ "跳转播放链接": "urlDecode(var player_*\"url\":\"&&\")",
+ "搜索url": "https://www.aiwodj.com/vodsearch/{wd}----------{pg}---.html",
+ "搜索数组": "lazy lazyload&&>]",
+ "搜索图片": "data-src=\"&&\"",
+ "搜索标题": "+title=\"&&\"",
+ "搜索链接": "href=\"&&\"",
+ "分类url": "https://www.aiwodj.com/vodshow/{cateId}--{by}-{class}-{lang}----{catePg}---{year}.html",
+ "分类": "穿越&战神&重生&爱情&萌娃&神医&古代&玄幻&言情",
+ "分类值": "fenle&fenlei2&fenlei3&fenlei4&guda&shenyi&gudai&xuanhuan&yanqing"
+}
\ No newline at end of file
diff --git a/XBPQ/狐狸君.json b/XBPQ/狐狸君.json
new file mode 100644
index 0000000..1219930
--- /dev/null
+++ b/XBPQ/狐狸君.json
@@ -0,0 +1,21 @@
+{
+ "搜索模式": "1",
+ "搜索url": "https://www.foxjun.com/s/?q={wd}",
+ "搜索数组": "class=\"media\">&&/div>",
+ "搜索图片": "src=\"&&\"",
+ "搜索标题": "《&&》",
+ "搜索副标题": "》&&\"",
+ "搜索链接": "href=\"&&\"",
+ "标题": "《&&》",
+ "副标题": "》&&\"",
+ "影片年代": "上映日期:&&
",
+ "影片类型": "类型:&&",
+ "主演": "主演:&&",
+ "简介": "简介:&&",
+ "播放数组": "
&&",
+ "播放列表": "[包含:magnet]",
+ "播放标题": ">&&<",
+ "播放链接": "href=\"&&\"",
+ "分类url": "https://www.foxjun.com/channel/{cateId}.html?apage1={catepg}",
+ "分类": "国产剧$guochanju#电影$dianying#动画$donghua#美欧剧$meiouju#日韩剧$rihanju"
+}
\ No newline at end of file
diff --git a/XBPQ/瓜子影院.json b/XBPQ/瓜子影院.json
new file mode 100644
index 0000000..6a65726
--- /dev/null
+++ b/XBPQ/瓜子影院.json
@@ -0,0 +1,6 @@
+{
+ "简介": "text-indent: 28px;margin-top: 10px;\">&&",
+ "播放链接": "https://www.guaziys.com/Play/+/Play/&&/",
+ "分类url": "https://www.guaziys.com/Show/{cateId}-{area}--{class}-----{catePg}---{year}/",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/电视剧网.json b/XBPQ/电视剧网.json
new file mode 100644
index 0000000..f160f34
--- /dev/null
+++ b/XBPQ/电视剧网.json
@@ -0,0 +1,6 @@
+{
+ "请求头": "手机",
+ "简介": "剧情介绍:+detail-content\" style=*>&&",
+ "分类url": "https://www.dswenda.com/video/{cateId}/area/{area}/class/{class}/page/{catePg}/year/{year}/",
+ "分类": "短剧$shuangwenduanju#电视剧$lianxuju#电影$dianying#动漫$dongman#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/番茄短剧.json b/XBPQ/番茄短剧.json
new file mode 100644
index 0000000..647622a
--- /dev/null
+++ b/XBPQ/番茄短剧.json
@@ -0,0 +1,37 @@
+{
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "主页url": "https://www.dzwhs.com/zwhstp/5.html",
+ "首页": "120",
+ "起始页": "1",
+ "分类url": "/zwhssw/5--{by}-{cateId}-{lang}-{letter}---{catePg}---{year}.html;",
+ "分类": "古装&反转&穿越&总裁&言情&爽文&女恋&都市",
+ "分类值": "*",
+ "数组": "默认--lazyload\"&&&&",
+ "链接": "href=\"&&\"",
+ "影片年代": "年份:&&&&<",
+ "影片类型": "类型:&&<",
+ "状态": "状态:&&",
+ "线路标题": "(>&&<)",
+ "播放二次截取": "",
+ "播放数组": "playlist clearfix&&",
+ "倒序": "0",
+ "播放列表": "",
+ "播放标题": ">&&<",
+ "播放链接": "href=\"&&\"",
+ "跳转播放链接": "var player_*\"url\":\"&&\"",
+ "搜索请求头": "User-Agent$MOBILE_UA",
+ "搜索url": "/zwhssc/{wd}----------{pg}---.html",
+ "搜索模式": "1",
+ "排序": "最新上映&超高人气&全网热播&高分好评",
+ "排序值": "time&hits&up&score",
+ "筛选": "1"
+}
\ No newline at end of file
diff --git a/XBPQ/白嫖影视.json b/XBPQ/白嫖影视.json
new file mode 100644
index 0000000..eb5b6c5
--- /dev/null
+++ b/XBPQ/白嫖影视.json
@@ -0,0 +1,8 @@
+{
+ "简介": "video-info-item video-info-content vod_content\">&&",
+ "数组": "&&",
+ "播放数组": "module-blocklist\">&&",
+ "分类url": "https://ys.51baipiao.net/index.php/vod/show/area/{area}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "电视剧$2#电影$1#动漫$29#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/看片狂人.json b/XBPQ/看片狂人.json
new file mode 100644
index 0000000..54fa385
--- /dev/null
+++ b/XBPQ/看片狂人.json
@@ -0,0 +1,11 @@
+{
+ "简介": "fed-padding fed-part-both fed-text-muted\">&&",
+ "副标题": "uk-overlay-primary uk-position-bottom\">&&",
+ "线路数组": "fed-btns-info fed-rims-info fed-part-eone&&",
+ "线路标题": "data-linename=\"&&\"[不包含:VIP解析]",
+ "播放列表": "fed-padding fed-col-xs3 fed-col-md2 fed-col-lg1\">&&",
+ "播放标题": "title=\"在线观看《*》&&\">",
+ "分类url": "https://kpkuang.one/vodshow/{cateId}-{area}-{by}-{class}-----{catePg}---{year}--.html",
+ "分类": "短剧$37#国产剧$13#电视剧$2#电影$1#动漫$4#综艺$3",
+ "排序": "按更新$time#按上映$pubdate#按人气$hits#按评分$douban_score,score"
+}
\ No newline at end of file
diff --git a/XBPQ/短剧网.json b/XBPQ/短剧网.json
new file mode 100644
index 0000000..1d5f5a7
--- /dev/null
+++ b/XBPQ/短剧网.json
@@ -0,0 +1,4 @@
+{
+ "分类": "短剧$duanju#电影$dianying#电视剧$dianshiju#动漫$dongman#综艺$zongyi",
+ "分类url": "https://www.duanjuwang.cc/vodtype/{cateId}/page/{catePg}.html"
+}
\ No newline at end of file
diff --git a/XBPQ/秀儿影视.json b/XBPQ/秀儿影视.json
new file mode 100644
index 0000000..4a5a879
--- /dev/null
+++ b/XBPQ/秀儿影视.json
@@ -0,0 +1,12 @@
+{
+ "请求头": "手机",
+ "简介": "tt\" style=*>&&",
+ "数组": "&&",
+ "影片状态": "集数:&&",
+ "线路数组": "module-tab-item tab-item&&",
+ "播放数组": "sort-item-&&",
+ "搜索url": "https://www.xiuer.pro/vod/search/?wd={wd}",
+ "分类url": "https://www.xiuer.pro/show/{cateId}/area/{area}/class/{class}/page/{catePg}/year/{year}/",
+ "分类": "电视剧$dianshiju#电影$dianying#动漫$dongman#短剧$duanju#综艺$zongyi"
+}
\ No newline at end of file
diff --git a/XBPQ/策弛影视.json b/XBPQ/策弛影视.json
new file mode 100644
index 0000000..5edc224
--- /dev/null
+++ b/XBPQ/策弛影视.json
@@ -0,0 +1,13 @@
+{
+ "请求头": "手机",
+ "简介": "剧情:*> &&",
+ "数组": "TPostMv\">&&",
+ "图片": "
&&",
+ "导演": "导演:*\">&&",
+ "主演": "主演:*\">&&",
+ "分类url": "https://www.cizelain.com/tags/{cateId}-{area}-{by}-{class}-----{catePg}---{year}.html",
+ "分类": "电视剧$2#电影$1#动漫$4#短剧$5#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/红果短剧.json b/XBPQ/红果短剧.json
new file mode 100644
index 0000000..4d513f4
--- /dev/null
+++ b/XBPQ/红果短剧.json
@@ -0,0 +1,8 @@
+{
+ "请求头": "手机",
+ "简介": "text\">&&",
+ "线路数组": "javascript:;\">&&",
+ "跳转播放链接": "src=\"blob:&&\"",
+ "分类url": "https://www.hongguodj.cc/type/{cateId}-{catePg}.html",
+ "分类": "现代$6#穿越$2#反转$7#总裁$8#都市$10#古装$11"
+}
\ No newline at end of file
diff --git a/XBPQ/统一影视.json b/XBPQ/统一影视.json
new file mode 100644
index 0000000..a9afed6
--- /dev/null
+++ b/XBPQ/统一影视.json
@@ -0,0 +1,10 @@
+{
+ "数组": "public-list-exp&&",
+ "图片": "data-src=\"&&\"",
+ "标题": "alt=\"&&\"[替换:封面图>>]",
+ "简介": "text cor3\">&&",
+ "线路数组": "fa ds-dianying&&[替换:(点击切换1080P)>>蓝光1080P]",
+ "线路标题": " &&<",
+ "分类url": "https://www.tyys2.com/index.php/vod/show/area/{area}/by/{by}/class/{class}/id/{cateId}/page/{catePg}/year/{year}.html",
+ "分类": "短剧$41#电影$2#电视剧$1#动漫$3#综艺$4"
+}
\ No newline at end of file
diff --git a/XBPQ/茶杯狐.json b/XBPQ/茶杯狐.json
new file mode 100644
index 0000000..4613985
--- /dev/null
+++ b/XBPQ/茶杯狐.json
@@ -0,0 +1,9 @@
+{
+ "简介": "简介:&&",
+ "副标题": "vtitle text-right\">&&",
+ "影片年代": "上映时间:&&",
+ "线路数组": "play-list-toggle\">&&",
+ "播放数组": "play-list fade-in&&",
+ "分类url": "https://www.youmidian.com/mv_type/{cateId}-{area}-{by}-{class}-----{catePg}---{year}.html",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$40#纪录片$41"
+}
\ No newline at end of file
diff --git a/XBPQ/酷猫影视.json b/XBPQ/酷猫影视.json
new file mode 100644
index 0000000..9e2a69e
--- /dev/null
+++ b/XBPQ/酷猫影视.json
@@ -0,0 +1,5 @@
+{
+ "简介": "剧情介绍:+detail-sketch\">&&",
+ "分类url": "https://www.kmvod.cc/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/雪糕影视.json b/XBPQ/雪糕影视.json
new file mode 100644
index 0000000..d90a276
--- /dev/null
+++ b/XBPQ/雪糕影视.json
@@ -0,0 +1,5 @@
+{
+ "分类": "电影$1#剧集$2#综艺$3#动漫$4",
+ "类型": "动作片$1#喜剧片$2#爱情片$21#海外动漫$3#科幻片$4#恐怖片$5#剧情片$6#战争片$7#纪录片$8#其他$9",
+ "分类url": "https://www.xgitv.com/vshow/{cateId}-----------.html"
+}
\ No newline at end of file
diff --git a/XBPQ/面包影视.json b/XBPQ/面包影视.json
new file mode 100644
index 0000000..d943372
--- /dev/null
+++ b/XBPQ/面包影视.json
@@ -0,0 +1,7 @@
+{
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "分类url": "https://v.aiwule.com/vodshow/{cateId}-{area}-{by}-{class}-{lang}-{letter}---{catePg}---{year}.html",
+ "分类": "电影$20#电视剧$21#动漫$23#综艺$22#短剧$47",
+ "简介": "简介:&&"
+}
\ No newline at end of file
diff --git a/XBPQ/飞飞影视.json b/XBPQ/飞飞影视.json
new file mode 100644
index 0000000..218f953
--- /dev/null
+++ b/XBPQ/飞飞影视.json
@@ -0,0 +1,11 @@
+{
+ "请求头": "手机",
+ "简介": "简介:&&",
+ "数组": "fed-col-sm3 fed-col-md2\">&&",
+ "图片": "src=\"&&\"",
+ "标题": "fed-part-eone\" href=*>&&",
+ "副标题": "fed-text-center\">&&",
+ "线路数组": "fed-btns-info fed-rims-info&&",
+ "分类url": "https://www.ffys2.cc/vodshow/{cateId}-{area}--{class}-----{catePg}---{year}.html",
+ "分类": "短剧$20#电视剧$2#电影$1#动漫$4#综艺$3#午夜$22"
+}
\ No newline at end of file
diff --git a/XBPQ/饭团影视.json b/XBPQ/饭团影视.json
new file mode 100644
index 0000000..53ad195
--- /dev/null
+++ b/XBPQ/饭团影视.json
@@ -0,0 +1,6 @@
+{
+ "简介": "&&
",
+ "副标题": "module-item-note\">&&",
+ "分类url": "https://www.kankanqu.vip/type/{cateId}.html",
+ "分类": "电视剧$2#电影$1#动漫$4#综艺$3"
+}
\ No newline at end of file
diff --git a/XBPQ/骚火影视.json b/XBPQ/骚火影视.json
new file mode 100644
index 0000000..4ab6048
--- /dev/null
+++ b/XBPQ/骚火影视.json
@@ -0,0 +1,16 @@
+{
+ "首页": "0",
+ "请求头": "User-Agent$MOBILE_UA#Accept$text/html,application/xhtml+xml,application/xml;;q=0.9,image/avif,image/webp,image/apng,*/*;;q=0.8,application/signed-exchange;;v=b3;;q=0.7&&Accept-Language@zh-CN,zh;;q=0.9",
+ "分类url": "https://shdy2.com/list/{cateId}-{catePg}.html",
+ "分类": "剧集$2#电影$1#港剧$21#台剧$26#日剧$24#韩剧$22#美剧$23",
+ "数组": "class=\"v_img&&/li>",
+ "标题": "alt=\"&&\"",
+ "图片": "original=\"&&\"",
+ "链接": "href=\"&&\"",
+ "简介": "show_part\">&&",
+ "播放数组": "",
+ "倒序": "1"
+}
\ No newline at end of file
diff --git a/XBPQ/麦田影视.json b/XBPQ/麦田影视.json
new file mode 100644
index 0000000..bd2404a
--- /dev/null
+++ b/XBPQ/麦田影视.json
@@ -0,0 +1,38 @@
+{
+ "站名": "麦田appXBPQ",
+ "规则作者": "啦啦啦",
+ "请求头": "User-Agent$MOBILE_UA",
+ "编码": "UTF-8",
+ "主页url": "http://172.247.31.147:25321/mtys.php/v6/index_video",
+ "分类url": "http://172.247.31.147:25321/mtys.php/v6/video?pg={catePg}&tid={cateId}&class={class}&area={area}&lang={lang}&year={year}&order={by};;mr",
+ "分类": "电影&电视剧&综艺&动漫&少儿&短剧&直播",
+ "分类值": "1&2&3&4&25&26&30",
+ "数组": "{&&}",
+ "图片": "vod_pic\":\"&&\"",
+ "标题": "vod_name\":\"&&\"",
+ "副标题": "vod_remarks\":\"&&\"",
+ "链接": "http://172.247.31.147:25321/mtys.php/v6/video_detail?id=+vod_id\":&&,",
+ "线路二次截取": "vod_url_with_player\":\\[{&&}\\][替换:\"name\":\">>\"name\":\"题]",
+ "线路数组": "\"name\":\"&&,",
+ "线路标题": "题&&\"",
+ "播放二次截取": "vod_url_with_player\":\\[{&&}\\]",
+ "播放数组": "url\":&&,[替换:\">>接表题#$>>题接#\\#>>接表表题]",
+ "播放列表": "表&&表",
+ "播放标题": "题&&题",
+ "播放链接": "urlDecode(接&&接)",
+ "解析": "PD源$http://172.247.31.148:25320/jx.php?url=#NB源$https://api.nbyjson.top:7788/api/?key=ws9Lz1EtqfU09AzZKl&url=#ZB源$http://27.25.159.14:6699/api/mgapp.php?url=",
+ "影片类型": "vod_class\":\"&&\"",
+ "影片年代": "vod_year\":\"&&\"",
+ "影片地区": "vod_area\":\"&&\"",
+ "导演": "vod_director\":\"&&\"",
+ "主演": "vod_actor\":\"&&\"",
+ "简介": "vod_content\":\"&&\"",
+ "搜索url": "http://172.247.31.147:25321/mtys.php/v6/search?pg={pg}&tid=0&text={wd}",
+ "搜索模式": "1",
+ "搜索二次截取": "data\":\\[&&\\]",
+ "搜索数组": "{&&}",
+ "搜索图片": "vod_pic\":\"&&\"",
+ "搜索标题": "vod_name\":\"&&\"",
+ "搜索副标题": "vod_remarks\":\"&&\"",
+ "搜索链接": "http://172.247.31.147:25321/mtys.php/v6/video_detail?id=+vod_id\":&&,"
+}
\ No newline at end of file
diff --git a/XBPQ/黑木耳影视.json b/XBPQ/黑木耳影视.json
new file mode 100644
index 0000000..8e95f41
--- /dev/null
+++ b/XBPQ/黑木耳影视.json
@@ -0,0 +1,36 @@
+{
+ "作者": "",
+ "站名": "采集",
+ "头部集合": "User-Agent$Mozilla/5.0 (iPad; CPU OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1#authority$heimuer.tv#referer$https://heimuer.tv/",
+ "分类": "国产剧&喜剧片&爱情片&恐怖片&战争片&科幻片&动作片&冒险片&奇幻片&惊悚片&欧美剧&港剧&台剧&韩剧&日剧&泰剧&国产综艺&港台综艺&韩国综艺&日本综艺&欧美综艺&欧美动漫&日本动漫&国产动漫&古装短剧&虐恋短剧&逆袭短剧&神豪短剧&重生短剧&甜宠短剧&其他短剧",
+ "分类值": "13&10&23&12&34&25&7&8&11&21&30&14&29&15&16&28&38&39&40&41&42&57&58&60&45&46&47&49&50&53&56",
+ "分类url": "https://heimuer.tv/api.php/provide/vod/?ac=list&ac=detail&t={cateId}&pg={catePg}",
+ "数组二次截取": "list\":[&&]",
+ "数组": "{&&}",
+ "图片": "vod_pic\"*\"&&\"",
+ "标题": "vod_name\"*\"&&\"",
+ "副标题": "vod_remarks\"*\"&&\"",
+ "链接": "https://heimuer.tv/api.php/provide/vod/?ac=list&ac=detail&ids=+vod_id\":&&,",
+ "搜索url": "https://heimuer.tv/api.php/provide/vod/?ac=detail&wd={wd}",
+ "搜索模式": "1",
+ "搜索二次截取": "list\":[&&]",
+ "搜索数组": "{&&}",
+ "搜索图片": "vod_pic\"*\"&&\"",
+ "搜索标题": "vod_name\"*\"&&\"",
+ "搜索副标题": "vod_remarks\"*\"&&\"",
+ "搜索链接": "https://heimuer.tv/api.php/provide/vod/?ac=list&ac=detail&ids=+vod_id\":&&,",
+ "影片类型": "vod_class\"*\"&&\"",
+ "导演": "vod_director\"*\"&&\"",
+ "主演": "vod_actor\"*\"&&\"",
+ "简介": "vod_content\"*\"&&\"",
+ "线路二次截取": "\"list\":[&&]",
+ "线路数组": "\"vod_play_from\":&&,",
+ "线路标题": "\"&&\"",
+ "播放数组": "vod_play_url\":&&,[替换:\">>链表题#$>>题链#\\#>>链表表题]",
+ "播放二次截取": "",
+ "播放列表": "表&&表",
+ "播放标题": "题&&题",
+ "播放链接": "链&&链+?sku=OWY3ZDA4ZjVjYzY3YmRhYjM5NTUwYzEyZWRjNjUyZWM1NjQ2ZGRjYTVhMGVkM2Nh&p=1&sign=9a69d1563936ead3677623722660c4d9",
+ "播放请求头": "User-Agent$Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36#Referer$https://heimuer.tv/#Cookie$PHPSESSID=t9a0fl2ngh4t175j3995v2669d",
+ "嗅探词": ".m3u8#.mp4#video_mp4#feiyunNB.mp4#.mp4#cdn.123pan.cn#huoshanvod.com"
+}
\ No newline at end of file
diff --git a/XYQHiker/BT天堂.json b/XYQHiker/BT天堂.json
new file mode 100644
index 0000000..b49de57
--- /dev/null
+++ b/XYQHiker/BT天堂.json
@@ -0,0 +1,71 @@
+{
+ "规则名": "BT天堂",
+ "规则作者": "",
+ "请求头参数": "PC_UA",
+ "网页编码格式": "UTF-8",
+ "图片是否需要代理": "0",
+ "是否开启获取首页数据": "1",
+ "首页推荐链接": "https://www.bttt11.com",
+ "首页列表数组规则": "body&&.ul-imgtxt1",
+ "首页片单列表数组规则": "li",
+ "首页片单是否Jsoup写法": "1",
+ "首页片单标题": "h3&&Text",
+ "首页片单链接": "a&&href",
+ "首页片单图片": "img&&src",
+ "首页片单副标题": "span,-1&&Text",
+ "首页片单链接加前缀": "https://www.bttt11.com",
+ "首页片单链接加后缀": "",
+ "分类起始页码": "0",
+ "分类链接": "https://www.bttt11.com/html/{cateId}-{catePg}.html",
+ //"分类链接": "https://www.clgod.xyz/list/{catePg}-{cateId}-0-0.html",
+ "分类名称": "欧美电影&日韩电影&港台电影&大陆电影&亚太电影&欧美剧&日韩剧&港台剧&国产剧&亚太剧&动漫&综艺&纪录片",
+ "分类名称替换词": "1&2&3&4&5&6&7&8&9&10&11&12&13",
+ //"分类名称": "电影&动作&灵异&奇幻&宗教&励志&犯罪&功夫&喜剧&黑色&幽默&爱情&香港&纪录片&灾难&亲情&暴力&僵尸&丧尸&悬疑&人性&惊悚&血腥&武侠&剧情&历史&战争&经典&漫画&改编&恐怖&穿越&青春&黑帮&文艺&浪漫&同志&冒险&动画&演唱会",
+ //"分类名称替换词": "0&1&2&3&4&5&6&7&8&10&11&12&1315&16&17&18&19&20&21&22&23&24&25&27&28&29&30&31&32&33&34&35&36&37&38&39&40&41&42",
+ "筛选数据": {},
+ "分类截取模式": "1",
+ "分类列表数组规则": ".ul-imgtxt2&&li",
+ "分类片单是否Jsoup写法": "1",
+ "分类片单标题": "h3&&Text",
+ "分类片单链接": "a&&href",
+ "分类片单图片": "img&&src",
+ "分类片单副标题": "span,-1--a&&Text!更新时间:",
+ "分类片单链接加前缀": "https://www.bttt11.com",
+ "分类片单链接加后缀": "",
+ "搜索请求头参数": "User-Agent$PC_UA",
+ "搜索链接": "https://www.bttt11.com/e/search/;post",
+ "POST请求数据": "show=title,newstext&keyboard={wd}&searchtype=影视搜索",
+ "搜索截取模式": "1",
+ "搜索列表数组规则": ".ul-imgtxt2&&li",
+ "搜索片单是否Jsoup写法": "1",
+ "搜索片单图片": "img&&src",
+ "搜索片单标题": "h3&&Text",
+ "搜索片单链接": "a&&href",
+ "搜索片单副标题": "span,-1--a&&Text!更新时间:",
+ "搜索片单链接加前缀": "https://www.bttt11.com",
+ "搜索片单链接加后缀": "",
+ "链接是否直接播放": "0",
+ "直接播放链接加前缀": "",
+ "直接播放链接加后缀": "",
+ "直接播放直链视频请求头": "",
+ "详情是否Jsoup写法": "0",
+ "类型详情": "◎类 别&&
",
+ "年代详情": "◎年 代&&
",
+ "地区详情": "◎产 地&&
",
+ "演员详情": "",
+ "简介详情": "",
+ "线路列表数组规则": "",
+ "线路标题": "",
+ "播放列表数组规则": "body&&.container",
+ "选集列表数组规则": "a[href^=magnet]||a[href^=ed2K]",
+ "选集标题链接是否Jsoup写法": "1",
+ "选集标题": "Text",
+ "选集链接": "a&&href",
+ "是否反转选集序列": "0",
+ "选集链接加前缀": "",
+ "选集链接加后缀": "",
+ "分析MacPlayer": "0",
+ "是否开启手动嗅探": "0",
+ "手动嗅探视频链接关键词": ".mp4#.m3u8#.flv",
+ "手动嗅探视频链接过滤词": ".html"
+}
\ No newline at end of file
diff --git a/XYQHiker/adult/badnews.json b/XYQHiker/adult/badnews.json
new file mode 100644
index 0000000..f60f68d
--- /dev/null
+++ b/XYQHiker/adult/badnews.json
@@ -0,0 +1,246 @@
+{
+ "规则名": "BAD.news-XYQHiker",
+ "规则作者": "飞鱼",
+ "请求头参数":"手机",
+ "网页编码格式":"UTF-8",
+ "图片是否需要代理":"0",
+ "是否开启获取首页数据": "0",
+ //首页推荐数据获取链接
+ "首页推荐链接": "https://bad.news/search/q-学生",
+ //首页推荐列表数组截取。
+ "首页列表数组规则": "#item-container",
+ //首页推荐片单列表数组定位。
+ "首页片单列表数组规则": "table",
+ //首页推荐片单信息jsoup与正则截取写法切换,只作用于html网页,1为jsoup写法(默认),0为正则截取写法
+ "首页片单是否Jsoup写法":"1",
+ //下面这六个首页数据如果不填将调用分类那截取的配置(片单写法需一致且取值也得一致)。
+ //首页片单标题
+ "首页片单标题": "h3&&a&&Text",
+ //首页推荐片单链接
+ "首页片单链接": "h3&&a&&href",
+ //首页推荐片单图片,支持自定义图片链接
+ "首页片单图片": ".my-videos&&poster",
+ //首页推荐片单副标题
+ "首页片单副标题":".ct-time&&Text",
+ //首页推荐片单链接补前缀
+ "首页片单链接加前缀": "https://bad.news",
+ //首页推荐片单链接补后缀
+ "首页片单链接加后缀": "",
+ "分类起始页码": "1",
+ //"分类链接": "https://www.l0l.tv/index.php/vod/show/id/{cateId}/page/{catePg}.html",
+ "分类链接": "https://bad.news{cateId}{by}/page-{catePg}",
+ "分类名称": "热门&无码&中字&周榜&月榜&主题分类&短视频&长视频&动漫",
+ "分类名称替换词": "/av&/av/search/type-tags/q-無碼流出&/av/search/q-中文字幕/via-log&/av/rank-week&/av/rank-month&/av/search/type-tags/q-黑丝&/tag/porn&/tag/long-porn&/dm",
+ //"筛选数据":{},
+ "分类截取模式": "1",
+ "分类列表数组规则": ".stui-vodlist.clearfix||#item-container&&table:not(:contains(ActivityPub))||li:not(:contains(ActivityPub))",
+ "分类片单是否Jsoup写法":"1",
+ "分类片单标题": "h3||h4&&a&&Text",
+ "分类片单链接": "h3||h4&&a&&href",
+ "分类片单图片": ".my-videos||a&&poster||data-echo-background",
+ "分类片单副标题":".ct-time&&Text",
+ "分类片单链接加前缀":"https://bad.news",
+ "分类片单链接加后缀": "",
+ "搜索请求头参数":"User-Agent$手机",
+ "搜索链接": "https://bad.news/dm/search/q-{wd}",
+ "POST请求数据":"0",
+ "搜索截取模式": "1",
+ "搜索列表数组规则": ".stui-vodlist.clearfix||#item-container&&table:not(:contains(ActivityPub))||li:not(:contains(ActivityPub))",
+ "搜索片单是否Jsoup写法":"1",
+ "搜索片单图片": ".my-videos||a&&poster||data-echo-background",
+ "搜索片单标题": "h3||h4&&a&&Text",
+ "搜索片单链接": "h3||h4&&a&&href",
+ "搜索片单副标题":".ct-time&&Text",
+ "搜索片单链接加前缀": "https://bad.news",
+ "搜索片单链接加后缀": "",
+ "链接是否直接播放": "0",
+"直接播放链接加前缀": "",
+"直接播放链接加后缀": "",
+"直接播放直链视频请求头": "",
+"详情是否Jsoup写法":"1",
+ "类型详情": ".detail-content&&p:has(:contains(标签))&&Text!标签:",
+ "年代详情": "",
+ "地区详情": "",
+ "演员详情": ".detail-content&&a,0&&title",
+ "简介详情": ".detail-content&&.desc.margin-0&&Text",
+ "线路列表数组规则": "",
+ "线路标题": "",
+ "播放列表数组规则": ".col-lg-12.clearfix||.coverimg",
+ "选集列表数组规则": "video",
+ "选集标题链接是否Jsoup写法":"1",
+ "选集标题": "'播放'",
+ "选集链接": "video&&data-source",
+ "是否反转选集序列": "0",
+ "选集链接加前缀": "",
+ "选集链接加后缀": "",
+
+
+ "分析MacPlayer":"0",
+ "是否开启手动嗅探":"0",
+ "手动嗅探视频链接关键词":".mp4#.m3u8#.flv",
+ "手动嗅探视频链接过滤词":".html#=http",
+
+ "筛选数据":{
+"/av/search/type-tags/q-黑丝":[
+ {"key":"cateId","name":"分类","value":[
+{"v":"/av/search/type-tags/q-黑丝","n":"黑丝"},
+{"v":"/av/search/type-tags/q-过膝袜","n":"过膝袜"},
+{"v":"/av/search/type-tags/q-运动裝","n":"运动裝"},
+{"v":"/av/search/type-tags/q-肉丝","n":"肉丝"},
+{"v":"/av/search/type-tags/q-丝袜","n":"丝袜"},
+{"v":"/av/search/type-tags/q-眼镜娘","n":"眼镜娘"},
+{"v":"/av/search/type-tags/q-兽耳","n":"兽耳"},
+{"v":"/av/search/type-tags/q-渔网","n":"渔网"},
+{"v":"/av/search/type-tags/q-水着","n":"水着"},
+{"v":"/av/search/type-tags/q-校服","n":"校服"},
+{"v":"/av/search/type-tags/q-旗袍","n":"旗袍"},
+{"v":"/av/search/type-tags/q-婚纱","n":"婚纱"},
+{"v":"/av/search/type-tags/q-女仆","n":"女仆"},
+{"v":"/av/search/type-tags/q-和服","n":"和服"},
+{"v":"/av/search/type-tags/q-吊带袜","n":"吊带袜"},
+{"v":"/av/search/type-tags/q-兔女郎","n":"兔女郎"},
+{"v":"/av/search/type-tags/q-Cosplay","n":"Cosplay"},
+{"v":"/av/search/type-tags/q-黑肉","n":"黑肉"},
+{"v":"/av/search/type-tags/q-长身","n":"长身"},
+{"v":"/av/search/type-tags/q-软体","n":"软体"},
+{"v":"/av/search/type-tags/q-贫乳","n":"贫乳"},
+{"v":"/av/search/type-tags/q-萝莉","n":"萝莉"},
+{"v":"/av/search/type-tags/q-美腿","n":"美腿"},
+{"v":"/av/search/type-tags/q-美尻","n":"美尻"},
+{"v":"/av/search/type-tags/q-纹身","n":"纹身"},
+{"v":"/av/search/type-tags/q-短发","n":"短发"},
+{"v":"/av/search/type-tags/q-白虎","n":"白虎"},
+{"v":"/av/search/type-tags/q-熟女","n":"熟女"},
+{"v":"/av/search/type-tags/q-巨乳","n":"巨乳"},
+{"v":"/av/search/type-tags/q-少女","n":"少女"},
+{"v":"/av/search/type-tags/q-颜射","n":"颜射"},
+{"v":"/av/search/type-tags/q-脚交","n":"脚交"},
+{"v":"/av/search/type-tags/q-肛交","n":"肛交"},
+{"v":"/av/search/type-tags/q-痉挛","n":"痉挛"},
+{"v":"/av/search/type-tags/q-潮吹","n":"潮吹"},
+{"v":"/av/search/type-tags/q-深喉","n":"深喉"},
+{"v":"/av/search/type-tags/q-接吻","n":"接吻"},
+{"v":"/av/search/type-tags/q-口爆","n":"口爆"},
+{"v":"/av/search/type-tags/q-口交","n":"口交"},
+{"v":"/av/search/type-tags/q-乳交","n":"乳交"},
+{"v":"/av/search/type-tags/q-中出","n":"中出"},
+{"v":"/av/search/type-tags/q-露出","n":"露出"},
+{"v":"/av/search/type-tags/q-轮奸","n":"轮奸"},
+{"v":"/av/search/type-tags/q-调教","n":"调教"},
+{"v":"/av/search/type-tags/q-捆绑","n":"捆绑"},
+{"v":"/av/search/type-tags/q-瞬间插入","n":"瞬间插入"},
+{"v":"/av/search/type-tags/q-痴汉","n":"痴汉"},
+{"v":"/av/search/type-tags/q-痴女","n":"痴女"},
+{"v":"/av/search/type-tags/q-男M","n":"男M"},
+{"v":"/av/search/type-tags/q-泥醉","n":"泥醉"},
+{"v":"/av/search/type-tags/q-泡姬","n":"泡姬"},
+{"v":"/av/search/type-tags/q-母乳","n":"母乳"},
+{"v":"/av/search/type-tags/q-放尿","n":"放尿"},
+{"v":"/av/search/type-tags/q-按摩","n":"按摩"},
+{"v":"/av/search/type-tags/q-强奸","n":"强奸"},
+{"v":"/av/search/type-tags/q-多P","n":"多P"},
+{"v":"/av/search/type-tags/q-刑具","n":"刑具"},
+{"v":"/av/search/type-tags/q-凌辱","n":"凌辱"},
+{"v":"/av/search/type-tags/q-一日十回","n":"一日十回"},
+{"v":"/av/search/type-tags/q-3P","n":"3P"},
+{"v":"/av/search/type-tags/q-黑人","n":"黑人"},
+{"v":"/av/search/type-tags/q-丑男","n":"丑男"},
+{"v":"/av/search/type-tags/q-诱惑","n":"诱惑"},
+{"v":"/av/search/type-tags/q-童贞","n":"童贞"},
+{"v":"/av/search/type-tags/q-时间停止","n":"时间停止"},
+{"v":"/av/search/type-tags/q-复仇","n":"复仇"},
+{"v":"/av/search/type-tags/q-年龄差","n":"年龄差"},
+{"v":"/av/search/type-tags/q-巨汉","n":"巨汉"},
+{"v":"/av/search/type-tags/q-媚药","n":"媚药"},
+{"v":"/av/search/type-tags/q-夫目前犯","n":"夫目前犯"},
+{"v":"/av/search/type-tags/q-出轨","n":"出轨"},
+{"v":"/av/search/type-tags/q-催眠","n":"催眠"},
+{"v":"/av/search/type-tags/q-偷拍","n":"偷拍"},
+{"v":"/av/search/type-tags/q-不伦","n":"不伦"},
+{"v":"/av/search/type-tags/q-下雨天","n":"下雨天"},
+{"v":"/av/search/type-tags/q-NTR","n":"NTR"},
+{"v":"/av/search/type-tags/q-风俗娘","n":"风俗娘"},
+{"v":"/av/search/type-tags/q-医生","n":"医生"},
+{"v":"/av/search/type-tags/q-逃犯","n":"逃犯"},
+{"v":"/av/search/type-tags/q-护士","n":"护士"},
+{"v":"/av/search/type-tags/q-老师","n":"老师"},
+{"v":"/av/search/type-tags/q-空姐","n":"空姐"},
+{"v":"/av/search/type-tags/q-球队经理","n":"球队经理"},
+{"v":"/av/search/type-tags/q-未亡人","n":"未亡人"},
+{"v":"/av/search/type-tags/q-搜查官","n":"搜查官"},
+{"v":"/av/search/type-tags/q-情侣","n":"情侣"},
+{"v":"/av/search/type-tags/q-家政妇","n":"家政妇"},
+{"v":"/av/search/type-tags/q-家庭教師","n":"家庭教師"},
+{"v":"/av/search/type-tags/q-偶像","n":"偶像"},
+{"v":"/av/search/type-tags/q-人妻","n":"人妻"},
+{"v":"/av/search/type-tags/q-主播","n":"主播"},
+{"v":"/av/search/type-tags/q-OL","n":"OL"},
+{"v":"/av/search/type-tags/q-魔镜号","n":"魔镜号"},
+{"v":"/av/search/type-tags/q-电车","n":"电车"},
+{"v":"/av/search/type-tags/q-处女","n":"处女"},
+{"v":"/av/search/type-tags/q-监狱","n":"监狱"},
+{"v":"/av/search/type-tags/q-温泉","n":"温泉"},
+{"v":"/av/search/type-tags/q-洗浴场","n":"洗浴场"},
+{"v":"/av/search/type-tags/q-泳池","n":"泳池"},
+{"v":"/av/search/type-tags/q-汽车","n":"汽车"},
+{"v":"/av/search/type-tags/q-厕所","n":"厕所"},
+{"v":"/av/search/type-tags/q-学校","n":"学校"},
+{"v":"/av/search/type-tags/q-图书馆","n":"图书馆"},
+{"v":"/av/search/type-tags/q-健身房","n":"健身房"},
+{"v":"/av/search/type-tags/q-便利店","n":"便利店"},
+{"v":"/av/search/type-tags/q-录像","n":"录像"},
+{"v":"/av/search/type-tags/q-处女作/引退作","n":"处女作/引退作"},
+{"v":"/av/search/type-tags/q-综艺","n":"综艺"},
+{"v":"/av/search/type-tags/q-节日主题","n":"节日主题"},
+{"v":"/av/search/type-tags/q-感谢祭","n":"感谢祭"},
+{"v":"/av/search/type-tags/q-4小时以上","n":"4小时以上"}
+
+]
+}
+ ],
+"/dm":[
+ {"key":"cateId","name":"分类","value":[
+{"v":"/dm","n":"动漫"},
+{"v":"/dm/type/q-3D","n":"3d动漫"},
+{"v":"/dm/type/q-%E5%90%8C%E4%BA%BA","n":"同人作品"},
+{"v":"/dm/type/q-Cosplay","n":"Cosplay"}
+]
+}],
+"sort/id/295":[
+ {"key":"cateId","name":"分类","value":[
+{"v":"sort/id/295","n":"无码专区"},
+{"v":"sort/id/297","n":"制服诱惑"},
+{"v":"sort/id/298","n":"三级伦理"},
+{"v":"sort/id/299","n":"AI换脸"},
+{"v":"sort/id/300","n":"中文字幕"},
+{"v":"sort/id/301","n":"卡通动漫"},
+{"v":"sort/id/302","n":"欧美系列"},
+{"v":"sort/id/303","n":"美女主播"},
+{"v":"sort/id/304","n":"国产自拍"},
+{"v":"sort/id/305","n":"熟女人妻"},
+{"v":"sort/id/306","n":"萝莉少女"},
+{"v":"sort/id/307","n":"女同性爱"},
+{"v":"sort/id/308","n":"多人群交"},
+{"v":"sort/id/309","n":"美乳巨乳"},
+{"v":"sort/id/310","n":"强奸乱伦"},
+{"v":"sort/id/311","n":"抖阴视频"},
+{"v":"sort/id/312","n":"韩国主播"},
+{"v":"sort/id/313","n":"网红头条"},
+{"v":"sort/id/314","n":"网爆黑料"},
+{"v":"sort/id/315","n":"欧美无码"},
+{"v":"sort/id/316","n":"女优明星"},
+{"v":"sort/id/317","n":"SM调教"},
+{"v":"sort/id/326","n":"AV解说"}
+]}],
+
+"/tag/porn":[
+
+{"key":"by",
+ "name":"排序",
+ "value":[
+ {"v":"/sort-new","n":"最新"},
+ {"v":"/sort-hot","n":"热门"},
+ {"v":"/sort-score","n":"最高分"},
+ {"v":"/sort-better","n":"精选"}]}
+]}
+}
\ No newline at end of file
diff --git a/XYQHiker/农民影视.json b/XYQHiker/农民影视.json
new file mode 100644
index 0000000..32f2acb
--- /dev/null
+++ b/XYQHiker/农民影视.json
@@ -0,0 +1,71 @@
+{
+ "规则名": "农民影视",
+ "规则作者": "香雅情",
+ "请求头参数": "User-Agent$手机#accept$text/html#Referer$https://vip.wwgz.cn:5200/",
+ "网页编码格式": "UTF-8",
+ "图片是否需要代理": "0",
+ "是否开启获取首页数据": "1",
+ "首页推荐链接": "https://vip.wwgz.cn:5200/",
+ "首页列表数组规则": "body&&.globalPicList",
+ "首页片单列表数组规则": "li:has(img)",
+ "首页片单是否Jsoup写法": "1",
+ "首页片单标题": ".sTit&&Text",
+ "首页片单链接": "a&&href",
+ "首页片单图片": "img&&data-echo||data-src||src",
+ "首页片单副标题": ".sBottom&&Text",
+ "首页片单链接加前缀": "https://vip.wwgz.cn:5200/",
+ "首页片单链接加后缀": "",
+ "分类起始页码": "1",
+ "分类链接": "https://vip.wwgz.cn:5200/vod-list-id-{cateId}-pg-{catePg}-order--by-{by}-class--year-{year}-letter--area-{area}-lang-.html",
+ "分类名称": "电影&电视剧&综艺&动漫&短剧",
+ "分类名称替换词": "1&2&3&4&26",
+ "筛选数据": "ext",
+ "筛选子分类名称": "动作片&喜剧片&爱情片&科幻片&恐怖片&剧情片&战争片&惊悚片&奇幻片||国产剧&港台泰&日韩剧&欧美剧||空||动漫剧&动漫片",
+ "筛选子分类替换词": "5&6&7&8&9&10&11&16&17||12&13&14&15||空||18&19",
+ "筛选地区名称": "大陆&香港&台湾&美国&韩国&日本&泰国&新加坡&马来西亚&印度&英国&法国&加拿大&西班牙&俄罗斯&其它",
+ "筛选地区替换词": "*",
+ "分类截取模式": "1",
+ "分类列表数组规则": ".globalPicList&&li",
+ "分类片单是否Jsoup写法": "1",
+ "分类片单标题": ".sTit&&Text",
+ "分类片单链接": "a&&href",
+ "分类片单图片": "img&&data-echo||data-src||src",
+ "分类片单副标题": ".sBottom&&Text",
+ "分类片单链接加前缀": "https://vip.wwgz.cn:5200/",
+ "分类片单链接加后缀": "",
+ "搜索请求头参数": "User-Agent$手机#Referer$https://vip.wwgz.cn:5200/",
+ "search_url": "https://vip.wwgz.cn:5200/index.php?m=vod-search;post",
+ "sea_PtBody": "wd={wd}",
+ "search_mode": "1",
+ "sea_arr_rule": "#data_list&&li",
+ "sea_is_jsoup": "1",
+ "sea_pic": ".lazyload&&data-src",
+ "sea_title": ".sTit&&Text",
+ "sea_url": "a&&href",
+ "搜索片单副标题": ".sDes,-1&&Text",
+ "搜索片单链接加前缀": "https://vip.wwgz.cn:5200/",
+ "搜索片单链接加后缀": "",
+ "链接是否直接播放": "0",
+ "直接播放链接加前缀": "https://live.52sf.ga/huya/",
+ "直接播放链接加后缀": "#isVideo=true#",
+ "直接播放直链视频请求头": "authority$ku.peizq.online#Referer$https://play.peizq.online",
+ "详情是否Jsoup写法": "1",
+ "类型详情": ".type-title&&Text",
+ "年代详情": "body&&span:contains(年代:)&&Text!年代:",
+ "地区详情": "",
+ "演员详情": "body&&.sDes:contains(主演:)&&Text!主演:",
+ "简介详情": ".detail-con&&p&&Text!简介:",
+ "线路列表数组规则": "#leftTabBox&&ul&&li",
+ "播放列表数组规则": "#leftTabBox&&.numList",
+ "选集列表数组规则": "li",
+ "选集标题链接是否Jsoup写法": "1",
+ "选集标题": "a&&Text",
+ "选集链接": "a&&href",
+ "是否反转选集序列": "1",
+ "选集链接加前缀": "https://vip.wwgz.cn:5200/",
+ "选集链接加后缀": "",
+ "分析MacPlayer": "0",
+ "是否开启手动嗅探": "1",
+ "手动嗅探视频链接关键词": ".mp4#.m3u8#item/video#video_mp4#video/tos",
+ "手动嗅探视频链接过滤词": ".html#=http"
+}
\ No newline at end of file
diff --git a/XYQHiker/巴士动漫.json b/XYQHiker/巴士动漫.json
new file mode 100644
index 0000000..d2132d8
--- /dev/null
+++ b/XYQHiker/巴士动漫.json
@@ -0,0 +1,81 @@
+{
+ "规则名": "动漫巴士",
+ "规则作者": "",
+ "请求头参数": "User-Agent$MOBILE_UA#Accept$text/html#accept-language$zh-CN,zh;q=0.8",
+ "网页编码格式": "UTF-8",
+ "图片是否需要代理": "0",
+ "是否开启获取首页数据": "1",
+ "首页推荐链接": "https://dm84.net",
+ "首页列表数组规则": "body&&.v_list",
+ "首页片单列表数组规则": "li",
+ "首页片单是否Jsoup写法": "1",
+ "分类起始页码": "1",
+ "分类链接": "https://dm84.net/list-{cateId}-{catePg}.html[firstPage=https://dm84.net/list-{cateId}.html]",
+ "分类名称": "国产动漫&日本动漫&欧美动漫&动漫电影",
+ "分类名称替换词": "1&2&3&4",
+ "筛选数据": {},
+ //"筛选数据": "ext",
+ //{cateId}
+ "筛选子分类名称": "",
+ "筛选子分类替换词": "",
+ //{class}
+ "筛选类型名称": "",
+ "筛选类型替换词": "*",
+ //{area}
+ "筛选地区名称": "",
+ "筛选地区替换词": "*",
+ //{year}
+ "筛选年份名称": "",
+ "筛选年份替换词": "*",
+ //{lang}
+ "筛选语言名称": "",
+ "筛选语言替换词": "*",
+ //{by}
+ "筛选排序名称": "时间&人气&评分",
+ "筛选排序替换词": "time&hits&score",
+ "分类截取模式": "1",
+ "分类列表数组规则": ".v_list&&li",
+ "分类片单是否Jsoup写法": "1",
+ "分类片单标题": "a&&title!在线观看",
+ "分类片单链接": "a&&href",
+ "分类片单图片": ".lazy&&data-bg",
+ "分类片单副标题": ".desc&&Text",
+ "分类片单链接加前缀": "https://dm84.net",
+ "分类片单链接加后缀": "",
+ "搜索请求头参数": "User-Agent$手机#Accept$text/html#accept-language$zh-CN,zh;q=0.8",
+ "搜索链接": "https://dm84.net/s-{wd}---------{SearchPg}.html",
+ "POST请求数据": "",
+ "搜索截取模式": "1",
+ "搜索列表数组规则": ".v_list&&li",
+ "搜索片单是否Jsoup写法": "1",
+ "搜索片单图片": ".lazy&&data-bg",
+ "搜索片单标题": "a&&title!在线观看",
+ "搜索片单链接": "a&&href",
+ "搜索片单副标题": ".desc&&Text",
+ "搜索片单链接加前缀": "https://dm84.net",
+ "搜索片单链接加后缀": "",
+ "链接是否直接播放": "0",
+ "直接播放链接加前缀": "",
+ "直接播放链接加后缀": "",
+ "直接播放直链视频请求头": "",
+ "详情是否Jsoup写法": "1",
+ "类型详情": "",
+ "年代详情": "",
+ "地区详情": "",
+ "演员详情": "",
+ "简介详情": ".intro&&-p&&Text",
+ "线路列表数组规则": ".play_from&&li",
+ "线路标题": "Text",
+ "播放列表数组规则": ".tab_content&&.play_list",
+ "选集列表数组规则": "a",
+ "选集标题链接是否Jsoup写法": "1",
+ "选集标题": "a&&Text",
+ "选集链接": "a&&href",
+ "是否反转选集序列": "1",
+ "选集链接加前缀": "https://dm84.net",
+ "选集链接加后缀": "",
+ "分析MacPlayer": "0",
+ "是否开启手动嗅探": "0",
+ "手动嗅探视频链接关键词": ".mp4#.m3u8#.flv#video/tos",
+ "手动嗅探视频链接过滤词": ".html#=http"
+}
\ No newline at end of file
diff --git a/XYQHiker/电影港.json b/XYQHiker/电影港.json
new file mode 100644
index 0000000..85fda74
--- /dev/null
+++ b/XYQHiker/电影港.json
@@ -0,0 +1,131 @@
+//写法思路来海阔视界,xpath筛选。本人是海阔用户,所以搬了海阔的jsoup写法过来。2022年9月17日
+//jsoup规则写法请查阅海阔视界或者海阔影视相关教程。不支持js写法
+//本文档为完整模板,请不要去无中生有添加多余的键值参数。
+{
+ //规则名
+ "title": "电影港",
+ //作者
+ "author": "香雅情",
+ //请求头UA,键名$键值,每一组用#分开,不填则默认okhttp/3.12.11,可填MOBILE_UA或PC_UA使用内置的手机版或电脑版UA
+ //多个请求头参数写法示例,"User-Agent$PC_UA#Referer$http://ww.baidu.com#Cookie$ser=ok",每一组用#分开。
+ //习惯查看手机源码写建议用手机版UA,习惯查看PC版源码写建议用电脑版UA
+ "Headers":"PC_UA",
+ //网页编码格式默认UTF-8编码,UTF-8,GBK,GB2312
+ "Coding_format":"gb2312",
+ //图片是否需要代理
+ "PicNeedProxy":"0",
+ //是否开启获取首页数据,0关闭,1开启
+ "homeContent":"0",
+ //分类链接起始页码,禁止负数和含小数点。
+ "firstpage": "1",
+ //分类链接,{cateId}是分类,{catePg}是页码,第一页没有页码的可以这样写 第二页链接[firstPage=第一页的链接]
+ "class_url": "https://www.dygang.tv/{cateId}/index_{catePg}.htm[firstPage=https://www.dygang.cc/{cateId}/index.htm]",
+ //分类名,分类1&分类2&分类3
+ "class_name": "最新电影&经典高清&国配电影&经典港片&国剧&日韩剧&美剧&综艺&动漫&纪录片&高清原盘&4K高清区&3D电影&电影专题",
+ //分类名替换词,替换词1&替换词2&替换词3,替换词包含英文&的用两个中文&&代替,示例:&&id=0&&&id=1
+ "class_value": "ys&bd&gy&gp&dsj&dsj1&yx&zy&dmq&jilupian&1080p&4K&3d&dyzt",
+ //筛选数据,json格式,参考xpath的筛选写法
+ "filterdata":{},
+
+ //分类页面截取数据模式,0为json,其它数字为普通网页。
+ "cat_mode": "1",
+ //分类列表数组定位,最多支持3层,能力有限,不是所有页面都能支持
+ "cat_arr_rule": "body&&table[width=388]",
+ //分类片单信息jsoup与xb截取写法切换,只作用于html网页,1为jsoup写法(默认),0为xb写法
+ "cat_is_jsoup":"1",
+ //分类片单标题
+ "cat_title": "img&&alt",
+ //分类片单链接
+ "cat_url": "a&&href",
+ //分类片单图片,支持自定义图片链接
+ "cat_pic": "img&&src",
+ //分类片单副标题
+ "cat_subtitle":"[align=center]&&Text",
+ //分类片单链接补前缀
+ "cat_prefix": "https://www.dygang.tv",
+ //分类片单链接补后缀
+ "cat_suffix": "",
+
+ //搜索请求头参数,不填则默认okhttp/3.12.11,可填MOBILE_UA或PC_UA使用内置的手机版或电脑版UA
+ //多个请求头参数写法示例,键名$键值,每一组用#分开。"User-Agent$PC_UA#Referer$http://ww.baidu.com#Cookie$ser=ok"。
+ "SHeaders":"User-Agent$PC_UA#Content-Type$charset=gb2312",
+ //搜索链接,搜索关键字用{wd}表示,post请求的最后面加;post
+ //POST链接示例 http://www.lezhutv.com/index.php?m=vod-search;post
+ "search_url": "https://www.dygang.tv/e/search/index123.php;post",
+ //POST搜索body,填写搜索关键字的键值,一般常见的是searchword和wd,不是POST搜索的可留空或删除。
+ "sea_PtBody":"keyboard={wd}&submit=搜+索&show=title,smalltext&tempid=1&tbname=article",
+
+ //搜索截取模式,0为json搜索,只支持列表在list数组里的,其它数字为网页截取。
+ "search_mode": "1",
+ //搜索列表数组定位,不填默认内置list,最多支持3层,能力有限,不是所有页面都能支持。
+ "sea_arr_rule": "body&&table[width=388]",
+ //搜索片单信息jsoup与xb截取写法切换,只作用于html网页,1为jsoup写法(默认),0为xb写法
+ "sea_is_jsoup":"1",
+ //搜索片单图片,支持自定义图片链接
+ "sea_pic": "img&&src",
+ //搜索片单标题
+ "sea_title": "img&&alt",
+ //搜索片单链接
+ "sea_url": "a&&href",
+ //搜索片单副标题
+ "sea_subtitle":"",
+ //搜索片单链接补前缀
+ "search_prefix": "https://www.dygang.tv",
+ //搜索片单链接补后缀,这个一般json搜索的需要
+ "search_suffix": "",
+
+ //片单链接是否直接播放,0否,1分类片单链接直接播放,2详情选集链接直接播放。
+ //设置成直接播放后,后面3个参数请注意该留空的请务必留空。
+ "force_play": "0",
+ //直接播放链接补前缀
+ "play_prefix": "",
+ //直接播放链接补后缀,设置为#isVideo=true#可强制识别为视频链接
+ "play_suffix": "",
+ //直接播放链接设置请求头,只对直链视频有效,每一组用#分开
+ "play_header": "",
+
+ //项目信息jsoup与xb截取写法切换,1为jsoup写法(默认),0为xb写法
+ "proj_is_jsoup":"0",
+ //类型数据,截取前缀&&截取后缀
+ "proj_cate": "",
+ //年代数据,截取前缀&&截取后缀
+ "proj_year": "",
+ //地区数据,截取前缀&&截取后缀
+ "proj_area": "",
+ //演员数据,截取前缀&&截取后缀
+ "proj_actor": "演 员&&",
+ //简介内容,截取前缀&&截取后缀
+ "proj_plot": "简 介&&",
+
+ //线路截取区域,如果不需要请把tab_title或tab_arr_rule置空或者全部不要填。
+ //线路截取数组
+ "tab_arr_rule": "#dede_content",
+ //线路标题,截取前缀&&截取后缀
+ "tab_title": "strong&&Text",
+
+ //列表数组截取,必须
+ "list_arr_rule": "#dede_content",
+ //集数数组截取,必须
+ "epi_arr_rule": "table&&[href*=magnet]",
+ //集数标题,截取前缀&&截取后缀
+ "epi_title": "a&&Text",
+ //集数链接,截取前缀&&截取后缀
+ "epi_url": "a&&href",
+ //选集是否反转显示
+ "epi_reverse": "0",
+ //集数链接补前缀
+ "epiurl_prefix": "",
+ //集数链接补后缀
+ "epiurl_suffix": "",
+
+ //下面几个参数请勿乱用。否则可能会有副作用。
+ //分析网页源码中有'
+ next_data_match = re.search(next_data_pattern, html_content, re.DOTALL)
+ if not next_data_match:
+ return {'list': []}
+
+ next_data_json = json.loads(next_data_match.group(1))
+ page_props = next_data_json.get("props", {}).get("pageProps", {})
+
+ # 处理轮播图数据
+ if "bannerList" in page_props:
+ for banner in page_props["bannerList"]:
+ if banner.get("bookId"):
+ videos.append({
+ "vod_id": f"/drama/{banner['bookId']}",
+ "vod_name": banner.get("bookName", ""),
+ "vod_pic": banner.get("coverWap", ""),
+ "vod_remarks": f"{banner.get('statusDesc', '')} {banner.get('totalChapterNum', '')}集".strip()
+ })
+
+ # 处理SEO分类推荐
+ if "seoColumnVos" in page_props:
+ for column in page_props["seoColumnVos"]:
+ for book in column.get("bookInfos", []):
+ if book.get("bookId"):
+ videos.append({
+ "vod_id": f"/drama/{book['bookId']}",
+ "vod_name": book.get("bookName", ""),
+ "vod_pic": book.get("coverWap", ""),
+ "vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}集".strip()
+ })
+
+ # 去重处理
+ seen = set()
+ unique_videos = []
+ for video in videos:
+ key = (video["vod_id"], video["vod_name"])
+ if key not in seen:
+ seen.add(key)
+ unique_videos.append(video)
+
+ except Exception as e:
+ print(f"获取首页推荐内容出错: {e}")
+ unique_videos = []
+
+ return {'list': unique_videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {'list': [], 'page': pg, 'pagecount': 1, 'limit': 20, 'total': 0}
+ url = f"{self.siteUrl}/browse/{tid}/{pg}"
+
+ response = self.fetch(url)
+ if not response:
+ return result
+
+ html_content = response.text
+ next_data_match = re.search(r'', html_content, re.DOTALL)
+ if not next_data_match:
+ return result
+
+ try:
+ next_data_json = json.loads(next_data_match.group(1))
+ page_props = next_data_json.get("props", {}).get("pageProps", {})
+
+ current_page = page_props.get("page", 1)
+ total_pages = page_props.get("pages", 1)
+ book_list = page_props.get("bookList", [])
+
+ videos = []
+ for book in book_list:
+ if book.get("bookId"):
+ videos.append({
+ "vod_id": f"/drama/{book['bookId']}",
+ "vod_name": book.get("bookName", ""),
+ "vod_pic": book.get("coverWap", ""),
+ "vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}集".strip()
+ })
+
+ result.update({
+ 'list': videos,
+ 'page': int(current_page),
+ 'pagecount': total_pages,
+ 'limit': len(videos),
+ 'total': len(videos) * total_pages if videos else 0
+ })
+
+ except Exception as e:
+ print(f"分类内容获取出错: {e}")
+
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ return self.searchContentPage(key, quick, pg)
+
+ def searchContentPage(self, key, quick, pg=1):
+ result = {'list': [], 'page': pg, 'pagecount': 1, 'limit': 20, 'total': 0}
+ search_url = f"{self.siteUrl}/search?searchValue={quote(key)}&page={pg}"
+
+ response = self.fetch(search_url)
+ if not response:
+ return result
+
+ html_content = response.text
+ next_data_match = re.search(r'', html_content, re.DOTALL)
+ if not next_data_match:
+ return result
+
+ try:
+ next_data_json = json.loads(next_data_match.group(1))
+ page_props = next_data_json.get("props", {}).get("pageProps", {})
+
+ total_pages = page_props.get("pages", 1)
+ book_list = page_props.get("bookList", [])
+
+ videos = []
+ for book in book_list:
+ if book.get("bookId"):
+ videos.append({
+ "vod_id": f"/drama/{book['bookId']}",
+ "vod_name": book.get("bookName", ""),
+ "vod_pic": book.get("coverWap", ""),
+ "vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}集".strip()
+ })
+
+ result.update({
+ 'list': videos,
+ 'pagecount': total_pages,
+ 'total': len(videos) * total_pages if videos else 0
+ })
+
+ except Exception as e:
+ print(f"搜索内容出错: {e}")
+
+ return result
+
+ def detailContent(self, ids):
+ result = {'list': []}
+ if not ids:
+ return result
+
+ vod_id = ids[0]
+ if not vod_id.startswith('/drama/'):
+ vod_id = f'/drama/{vod_id}'
+
+ drama_url = f"{self.siteUrl}{vod_id}"
+ response = self.fetch(drama_url)
+ if not response:
+ return result
+
+ html = response.text
+ next_data_match = re.search(r'', html, re.DOTALL)
+ if not next_data_match:
+ return result
+
+ try:
+ next_data = json.loads(next_data_match.group(1))
+ page_props = next_data.get("props", {}).get("pageProps", {})
+ book_info = page_props.get("bookInfoVo", {})
+ chapter_list = page_props.get("chapterList", [])
+
+ if not book_info.get("bookId"):
+ return result
+
+ # 基本信息
+ categories = [c.get("name", "") for c in book_info.get("categoryList", [])]
+ performers = [p.get("name", "") for p in book_info.get("performerList", [])]
+
+ vod = {
+ "vod_id": vod_id,
+ "vod_name": book_info.get("title", ""),
+ "vod_pic": book_info.get("coverWap", ""),
+ "type_name": ",".join(categories),
+ "vod_year": "",
+ "vod_area": book_info.get("countryName", ""),
+ "vod_remarks": f"{book_info.get('statusDesc', '')} {book_info.get('totalChapterNum', '')}集".strip(),
+ "vod_actor": ", ".join(performers),
+ "vod_director": "",
+ "vod_content": book_info.get("introduction", "")
+ }
+
+ # 处理剧集
+ play_urls = self.processEpisodes(vod_id, chapter_list)
+ if play_urls:
+ vod['vod_play_from'] = '河马剧场'
+ vod['vod_play_url'] = '$$$'.join(play_urls)
+
+ result['list'] = [vod]
+
+ except Exception as e:
+ print(f"详情页解析出错: {e}")
+ traceback.print_exc()
+
+ return result
+
+ def processEpisodes(self, vod_id, chapter_list):
+ play_urls = []
+ episodes = []
+
+ for chapter in chapter_list:
+ chapter_id = chapter.get("chapterId", "")
+ chapter_name = chapter.get("chapterName", "")
+
+ if not chapter_id or not chapter_name:
+ continue
+
+ # 尝试获取直接视频链接
+ video_url = self.getDirectVideoUrl(chapter)
+ if video_url:
+ episodes.append(f"{chapter_name}${video_url}")
+ continue
+
+ # 回退方案
+ episodes.append(f"{chapter_name}${vod_id}${chapter_id}${chapter_name}")
+
+ if episodes:
+ play_urls.append("#".join(episodes))
+
+ return play_urls
+
+ def getDirectVideoUrl(self, chapter):
+ if "chapterVideoVo" not in chapter or not chapter["chapterVideoVo"]:
+ return None
+
+ video_info = chapter["chapterVideoVo"]
+ for key in ["mp4", "mp4720p", "vodMp4Url"]:
+ if key in video_info and video_info[key] and ".mp4" in video_info[key].lower():
+ return video_info[key]
+ return None
+
+ def playerContent(self, flag, id, vipFlags):
+ result = {
+ "parse": 0,
+ "url": id,
+ "header": json.dumps(self.headers)
+ }
+
+ # 如果已经是视频链接直接返回
+ if 'http' in id and ('.mp4' in id or '.m3u8' in id):
+ return result
+
+ # 解析参数
+ parts = id.split('$')
+ if len(parts) < 2:
+ return result
+
+ drama_id = parts[0].replace('/drama/', '')
+ chapter_id = parts[1]
+
+ # 尝试获取视频链接
+ video_url = self.getEpisodeVideoUrl(drama_id, chapter_id)
+ if video_url:
+ result["url"] = video_url
+
+ return result
+
+ def getEpisodeVideoUrl(self, drama_id, chapter_id):
+ episode_url = f"{self.siteUrl}/episode/{drama_id}/{chapter_id}"
+ response = self.fetch(episode_url)
+ if not response:
+ return None
+
+ html = response.text
+
+ # 方法1: 从NEXT_DATA提取
+ next_data_match = re.search(r'', html, re.DOTALL)
+ if next_data_match:
+ try:
+ next_data = json.loads(next_data_match.group(1))
+ page_props = next_data.get("props", {}).get("pageProps", {})
+ chapter_info = page_props.get("chapterInfo", {})
+
+ if chapter_info and "chapterVideoVo" in chapter_info:
+ video_info = chapter_info["chapterVideoVo"]
+ for key in ["mp4", "mp4720p", "vodMp4Url"]:
+ if key in video_info and video_info[key] and ".mp4" in video_info[key].lower():
+ return video_info[key]
+ except:
+ pass
+
+ # 方法2: 直接从HTML提取
+ mp4_matches = re.findall(r'(https?://[^"\']+\.mp4)', html)
+ if mp4_matches:
+ for url in mp4_matches:
+ if chapter_id in url or drama_id in url:
+ return url
+ return mp4_matches[0]
+
+ return None
+
+ def localProxy(self, param):
+ return [200, "video/MP2T", {}, param]
+
+ def destroy(self):
+ pass
\ No newline at end of file
diff --git a/py/火车影视.py b/py/火车影视.py
new file mode 100644
index 0000000..6a80179
--- /dev/null
+++ b/py/火车影视.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+from urllib.parse import urlparse
+sys.path.append("..")
+import re
+import hashlib
+import hmac
+import random
+import string
+from Crypto.Util.Padding import unpad
+from concurrent.futures import ThreadPoolExecutor
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import PKCS1_v1_5, AES
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.device = self.device_id()
+ self.host = self.gethost()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ filters = {}
+ classes = []
+ bba = self.url()
+ data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
+ data1 = self.aes(data)
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ data1['data']['movie_screen']['sort'].pop(0)
+ for item in data1['data']['movie_screen']['sort']:
+ item['n'] = item.pop('name')
+ item['v'] = item.pop('value')
+ for item in data1['data']['movie_screen']['filter']:
+ has_non_empty_field = False
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in item and item[key]:
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ filters[str(item["id"])].append(
+ {"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
+ for dkey in item:
+ if dkey in dy and item[dkey]:
+ item[dkey].pop(0)
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in item[dkey]
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ bba = self.url()
+ url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()
+ videos = []
+ for item in data['data']:
+ if len(item['list']) > 0:
+ for it in item['list']:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
+ "area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
+ "pageSize": "21"}
+ result = {}
+ list = []
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data']['list']
+ for item in data:
+ list.append(self.voides(item))
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = {"id": ids[0]}
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ video = {'vod_name': data.get('name'), 'type_name': data.get('type_name'), 'vod_year': data.get('year'),
+ 'vod_area': data.get('area'), 'vod_remarks': data.get('dynami'), 'vod_content': data.get('content')}
+ play = []
+ names = []
+ tasks = []
+ for itt in data["play_from"]:
+ name = itt["name"]
+ a = []
+ if len(itt["list"]) > 0:
+ names.append(name)
+ play.append(self.playeach(itt['list']))
+ else:
+ tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
+ names.append(name)
+ if tasks:
+ with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
+ results = executor.map(self.playlist, tasks)
+ for result in results:
+ if result:
+ play.append(result)
+ else:
+ play.append("")
+ video["vod_play_from"] = "$$$".join(names)
+ video["vod_play_url"] = "$$$".join(play)
+ result = {"list": [video]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
+ "res_type": "by_movie_name"}
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data'].get('list')
+ videos = []
+ for it in data:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if not re.search(r"\.m3u8|\.mp4", url):
+ try:
+ data = json.loads(b64decode(id.encode('utf-8')).decode('utf-8'))
+ bba = self.url(data)
+ data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
+ headers=self.header()).json()['data']
+ url = data2.get('play_url') or data2.get('download_url')
+ except Exception as e:
+ pass
+ if re.search(r'\.jpg|\.png|\.jpeg', url):
+ url = self.Mproxy(url)
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'user-agent': 'okhttp/4.9.2'}
+ return result
+
+ def localProxy(self, param):
+ return self.Mlocal(param)
+
+ def Mproxy(self, url):
+ return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+
+ def Mlocal(self, param,header=None):
+ url = self.d64(param["url"])
+ ydata = self.fetch(url, headers=header, allow_redirects=False)
+ data = ydata.content.decode('utf-8')
+ if ydata.headers.get('Location'):
+ url = ydata.headers['Location']
+ data = self.fetch(url, headers=header).content.decode('utf-8')
+ parsed_url = urlparse(url)
+ durl = parsed_url.scheme + "://" + parsed_url.netloc
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ if '#EXT' not in string and 'http' not in string:
+ last_slash_index = string.rfind('/')
+ lpath = string[:last_slash_index + 1]
+ lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def device_id(self):
+ characters = string.ascii_lowercase + string.digits
+ random_string = ''.join(random.choices(characters, k=32))
+ return random_string
+
+ def gethost(self):
+ try:
+ url = 'https://dns.alidns.com/dns-query'
+ headers = {
+ 'User-Agent': 'okhttp/4.9.2',
+ 'Accept': 'application/dns-message'
+ }
+ params = {
+ 'dns': 'AAABAAABAAAAAAAACWJmbTExYXM5ZgdmdXFpeXVuAmNuAAAcAAE'
+ }
+ response = self.fetch(url, headers=headers, params=params)
+ host=self.parse_dns_name(response.content, 12)
+ return f"https://{host}"
+ except:
+ return "https://bfm11as9f.fuqiyun.cn"
+
+ def parse_dns_name(self, data, offset):
+ parts = []
+ while True:
+ length = data[offset]
+ if length == 0:
+ break
+ offset += 1
+ parts.append(data[offset:offset + length].decode('utf-8'))
+ offset += length
+ return '.'.join(parts)
+
+ def header(self):
+ headers = {
+ 'User-Agent': 'Android',
+ 'Accept': 'application/prs.55App.v2+json',
+ 'timestamp': str(int(time.time())),
+ 'x-client-setting': '{"pure-mode":0}',
+ 'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
+ 'x-client-version': '3096 '
+ }
+ return headers
+
+ def url(self, id=None):
+ if not id:
+ id = {}
+ id["timestamp"] = str(int(time.time()))
+ public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
+ encrypted_text = json.dumps(id)
+ public_key = RSA.import_key(b64decode(public_key))
+ cipher = PKCS1_v1_5.new(public_key)
+ encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
+ encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
+ result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
+ key = '635a580fcb5dc6e60caa39c31a7bde48'
+ sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
+ return result, sign
+
+ def playlist(self, body):
+ try:
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ return self.playeach(data)
+ except Exception:
+ return []
+
+ def playeach(self, data):
+ play_urls = []
+ for it in data:
+ if re.search(r"mp4|m3u8", it["play_url"]):
+ play_urls.append(f"{it['episode_name']}${it['play_url']}")
+ else:
+ vd={"from_code": it['from_code'], "play_url": it['play_url'], "episode_id": it['episode_id'], "type": "play"}
+ play_urls.append(
+ f"{it['episode_name']}${b64encode(json.dumps(vd).encode('utf-8')).decode('utf-8')}"
+ )
+ return '#'.join(play_urls)
+
+ def voides(self, item):
+ if item['name'] or item['title']:
+ voide = {
+ "vod_id": item.get('id') or item.get('click'),
+ 'vod_name': item.get('name') or item.get('title'),
+ 'vod_pic': item.get('cover') or item.get('image'),
+ 'vod_year': item.get('year') or item.get('label'),
+ 'vod_remarks': item.get('dynamic') or item.get('sub_title')
+ }
+ return voide
+
+ def aes(self, text):
+ text = text.replace('-', '+').replace('_', '/') + '=='
+ key = b"e6d5de5fcc51f53d"
+ iv = b"2f13eef7dfc6c613"
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
+ return json.loads(pt)
diff --git a/py/爱奇艺.py b/py/爱奇艺.py
new file mode 100644
index 0000000..2b0af3d
--- /dev/null
+++ b/py/爱奇艺.py
@@ -0,0 +1,248 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import random
+import sys
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from urllib.parse import urlencode
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = self.random_str(32)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost = 'https://www.iqiyi.com'
+
+ hhost='https://mesh.if.iqiyi.com'
+
+ dhost='https://miniapp.iqiyi.com'
+
+ headers = {
+ 'Origin': rhost,
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
+ 'Referer': f'{rhost}/',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "全部": "1009",
+ "电影": "1",
+ "剧集": "2",
+ "综艺": "6",
+ "动漫": "4",
+ "儿童": "15",
+ "微剧": "35",
+ "纪录片": "3"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.hhost}/portal/lw/v5/channel/recommend?v=13.014.21150', headers=self.headers).json()
+ vlist = []
+ for i in data['items'][1:]:
+ for j in i['video'][0]['data']:
+ id = j.get('firstId')
+ pic=j.get('prevue',{}).get('image_url') or j.get('album_image_url_hover')
+ if id and pic:
+ pu=j.get('prevue',{}).get('page_url') or j.get('page_url').split('?')[0]
+ id = f'{id}@{self.e64(pu)}'
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': j.get('display_name'),
+ 'vod_pic': pic,
+ 'vod_year': j.get('sns_score'),
+ 'vod_remarks': j.get('dq_updatestatus') or j.get('rank_prefix')
+ })
+ return {'list':vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ if pg == "1":
+ self.sid = ''
+ new_data = {'mode':'24'}
+ for key, value in extend.items():
+ if value:
+ key_value_pairs = self.d64(value).split(',')
+ for pair in key_value_pairs:
+ k, v = pair.split('=')
+ if k in new_data:
+ new_data[k] += "," + v
+ else:
+ new_data[k] = v
+ path=f'/portal/lw/videolib/data?uid=&passport_id=&ret_num=60&version=13.014.21150&device_id={self.did}&channel_id={tid}&page_id={pg}&session={self.sid}&os=&conduit_id=&vip=0&auth&recent_selected_tag=&ad=%5B%7B%22lm%22:%225%22,%22ai%22:%225%22,%22fp%22:%226%22,%22sei%22:%22Sa867aa9d326e2bd8654d8c2a8636055e%22,%22position%22:%22library%22%7D%5D&adExt=%7B%22r%22:%221.2.1-ares6-pure%22%7D&dfp=a12f96215b2f7842a98c082799ca0c3d9236be00946701b106829754d8ece3aaf8&filter={urlencode(new_data)}'
+ data=self.fetch(f'{self.hhost}{path}', headers=self.headers).json()
+ self.sid = data['session']
+ videos = []
+ for i in data['data']:
+ id = i.get('firstId') or i.get('tv_id')
+ if not id:
+ id=i.get('play_url').split(';')[0].split('=')[-1]
+ if id and not i.get('h'):
+ id=f'{id}@{self.e64(i.get("page_url"))}'
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': i.get('display_name'),
+ 'vod_pic': i.get('album_image_url_hover'),
+ 'vod_year': i.get('sns_score'),
+ 'vod_remarks': i.get('dq_updatestatus') or i.get('pay_mark')
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@')
+ ids[-1] = self.d64(ids[-1])
+ data = self.fetch(f'{self.dhost}/h5/mina/baidu/play/body/v1/{ids[0]}/', headers=self.headers).json()
+ v=data['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('albumName'),
+ 'type_name': v.get('tags'),
+ 'vod_year': v.get('albumYear'),
+ 'vod_remarks': v.get('updateStrategy'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('albumDesc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': ''
+ }
+ if data.get('data') and data['data'].get('videoList') and data['data']['videoList'].get('videos'):
+ purl=[f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ pg=data['data']['videoList'].get('totalPages')
+ if pg and pg > 1:
+ id = v['albumId']
+ pages = list(range(2, pg + 1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, id): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ purl.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(purl)
+ else:
+ vdata=self.fetch(f'{self.dhost}/h5/mina/baidu/play/head/v1/{ids[0]}/', headers=self.headers).json()
+ v=vdata['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('shortTitle'),
+ 'type_name': v.get('channelName'),
+ 'vod_year': v.get('year'),
+ 'vod_remarks': v.get('focus'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': f'{v.get("shortTitle")}${ids[-1]}'
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.hhost}/portal/lw/search/homePageV3?key={key}¤t_page={pg}&mode=1&source=input&suggest=&version=13.014.21150&pageNum={pg}&pageSize=25&pu=&u={self.did}&scale=150&token=&userVip=0&conduit=&vipType=-1&os=&osShortName=win10&dataType=&appMode=', headers=self.headers).json()
+ videos = []
+ vdata=data['data']['templates']
+ for i in data['data']['templates']:
+ if i.get('intentAlbumInfos'):
+ vdata=[{'albumInfo': c} for c in i['intentAlbumInfos']]+vdata
+
+ for i in vdata:
+ if i.get('albumInfo') and (i['albumInfo'].get('playQipuId','') or i['albumInfo'].get('qipuId')) and i['albumInfo'].get('pageUrl'):
+ b=i['albumInfo']
+ id=f"{(b.get('playQipuId','') or b.get('qipuId'))}@{self.e64(b.get('pageUrl'))}"
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': b.get('title'),
+ 'vod_pic': b.get('img'),
+ 'vod_year': (b.get('year',{}) or {}).get('value'),
+ 'vod_remarks': b.get('subscriptContent') or b.get('channel') or b.get('vipTips')
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def fetch_page_data(self, page, id):
+ try:
+ url = f'{self.dhost}/h5/mina/avlist/{page}/{id}/'
+ data = self.fetch(url, headers=self.headers).json()
+ return [f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ except:
+ return []
+
+ def getf(self,body):
+ data=self.fetch(f'{self.hhost}/portal/lw/videolib/tag?channel_id={body["type_id"]}&tagAdd=&selected_tag_name=&version=13.014.21150&device={self.did}&uid=', headers=self.headers).json()
+ ft = []
+ # for i in data[:-1]:
+ for i in data:
+ try:
+ value_array = [{"n": value['text'], "v": self.e64(value['tag_param'])} for value in i['tags'] if
+ value.get('tag_param')]
+ ft.append({"key": i['group'], "name": i['group'], "value": value_array})
+ except:
+ print(i)
+ return (body['type_id'], ft)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text: str):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def random_str(self,length=16):
+ hex_chars = '0123456789abcdef'
+ return ''.join(random.choice(hex_chars) for _ in range(length))
diff --git a/py/爱看短剧.py b/py/爱看短剧.py
new file mode 100644
index 0000000..b447431
--- /dev/null
+++ b/py/爱看短剧.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://www.toule.top'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'Referer':f'{host}/',
+ 'Origin':host
+ }
+
+ def homeContent(self, filter):
+ data=self.getpq()
+ result = {}
+ classes = []
+ for k in data('.swiper-wrapper .swiper-slide').items():
+ classes.append({
+ 'type_name': k.text(),
+ 'type_id': k.text()
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('.container.items ul li'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data=self.getpq(f"/index.php/vod/show/class/{tid}/id/1/page/{pg}.html")
+ result = {}
+ result['list'] = self.getlist(data('.container.items ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.getpq(ids[0])
+ v=data('.container.detail-content')
+ vod = {
+ 'vod_remarks': v('.items-tags a').text(),
+ 'vod_content': v('.text-content .detail').text(),
+ 'vod_play_from': '爱看短剧',
+ 'vod_play_url': '#'.join([f"{i.text()}${i('a').attr('href')}" for i in data('.swiper-wrapper .swiper-slide').items()])
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.getpq(f"/index.php/vod/search/page/{pg}/wd/{key}.html")
+ return {'list':self.getlist(data('.container.items ul li')),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ data=self.getpq(id)
+ try:
+ jstr=data('.player-content script').eq(0).text()
+ jt=json.loads(jstr.split('=',1)[-1])
+ p,url=0,jt['url']
+ except Exception as e:
+ print(f"获取播放地址失败: {e}")
+ p,url=1,f'{self.host}{id}'
+ return {'parse': p, 'url': url, 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def getpq(self, path=''):
+ data=self.fetch(f"{self.host}{path}",headers=self.headers).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def getlist(self,data):
+ videos = []
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('.image-line').attr('href'),
+ 'vod_name': i('img').attr('alt'),
+ 'vod_pic': i('img').attr('src'),
+ 'vod_remarks': i('.remarks.light').text()
+ })
+ return videos
\ No newline at end of file
diff --git a/py/猎手影视.py b/py/猎手影视.py
new file mode 100644
index 0000000..1a6a4d7
--- /dev/null
+++ b/py/猎手影视.py
@@ -0,0 +1,279 @@
+# coding=utf-8
+# !/usr/bin/python
+# by嗷呜(finally)
+import sys
+import os
+sys.path.append("..")
+import re
+import hashlib
+import hmac
+import random
+import string
+from Crypto.Util.Padding import unpad
+from concurrent.futures import ThreadPoolExecutor
+from Crypto.PublicKey import RSA
+from Crypto.Cipher import PKCS1_v1_5, AES
+from base64 import b64encode, b64decode
+import json
+import time
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def getName(self):
+ return "电影猎手"
+
+ def init(self, extend=""):
+ self.device = self.device_id()
+ self.host = self.gethost()
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def action(self, action):
+ pass
+
+ def destroy(self):
+ pass
+
+ t = str(int(time.time()))
+
+ def homeContent(self, filter):
+ result = {}
+ filters = {}
+ classes = []
+ bba = self.url()
+ data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
+ data1 = self.aes(data)
+ dy = {"class":"类型","area":"地区","lang":"语言","year":"年份","letter":"字母","by":"排序","sort":"排序"}
+ data1['data']['movie_screen']['sort'].pop(0)
+ for item in data1['data']['movie_screen']['sort']:
+ item['n'] = item.pop('name')
+ item['v'] = item.pop('value')
+ for item in data1['data']['movie_screen']['filter']:
+ has_non_empty_field = False
+ classes.append({"type_name": item["name"], "type_id": str(item["id"])})
+ for key in dy:
+ if key in item and item[key]:
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["id"])] = []
+ filters[str(item["id"])].append(
+ {"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
+ for dkey in item:
+ if dkey in dy and item[dkey]:
+ item[dkey].pop(0)
+ value_array = [
+ {"n": value.strip(), "v": value.strip()}
+ for value in item[dkey]
+ if value.strip() != ""
+ ]
+ filters[str(item["id"])].append(
+ {"key": dkey, "name": dy[dkey], "value": value_array}
+ )
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ bba = self.url()
+ url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()
+ videos = []
+ for item in data['data']:
+ if len(item['list']) > 0:
+ for it in item['list']:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos}
+ return result
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
+ "area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
+ "pageSize": "21"}
+ result = {}
+ list = []
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data']['list']
+ for item in data:
+ list.append(self.voides(item))
+ result["list"] = list
+ result["page"] = pg
+ result["pagecount"] = 9999
+ result["limit"] = 90
+ result["total"] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body = {"id": ids[0]}
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ video = {'vod_name': data.get('name'),'type_name': data.get('type_name'),'vod_year': data.get('year'),'vod_area': data.get('area'),'vod_remarks': data.get('dynami'),'vod_content': data.get('content')}
+ play = []
+ names = []
+ tasks = []
+ for itt in data["play_from"]:
+ name = itt["name"]
+ a = []
+ if len(itt["list"]) > 0:
+ names.append(name)
+ play.append(self.playeach(itt['list']))
+ else:
+ tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
+ names.append(name)
+ if tasks:
+ with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
+ results = executor.map(self.playlist, tasks)
+ for result in results:
+ if result:
+ play.append(result)
+ else:
+ play.append("")
+ video["vod_play_from"] = "$$$".join(names)
+ video["vod_play_url"] = "$$$".join(play)
+ result = {"list": [video]}
+ return result
+
+ def searchContent(self, key, quick, pg=1):
+ body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
+ "res_type": "by_movie_name"}
+ bba = self.url(body)
+ url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
+ data = self.fetch(url, headers=self.header()).json()['data'].get('list')
+ videos = []
+ for it in data:
+ try:
+ videos.append(self.voides(it))
+ except Exception as e:
+ continue
+ result = {"list": videos, "page": pg}
+ return result
+
+ def playerContent(self, flag, id, vipFlags):
+ url = id
+ if "m3u8" not in url and "mp4" not in url:
+ try:
+ add = id.split('|||')
+ data = {"from_code": add[0], "play_url": add[1], "episode_id": add[2], "type": "play"}
+ bba = self.url(data)
+ data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
+ headers=self.header()).json()['data']
+ url = data2.get('play_url') or data2.get('download_url')
+ try:
+ url1 = self.fetch(url, headers=self.header(), allow_redirects=False).headers['Location']
+ if url1 and "http" in url1:
+ url = url1
+ except:
+ pass
+ except Exception as e:
+ pass
+ if '.jpg' in url or '.jpeg' in url or '.png' in url:
+ url = self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
+ result = {}
+ result["parse"] = 0
+ result["url"] = url
+ result["header"] = {'user-agent': 'okhttp/4.9.2'}
+ return result
+
+ def localProxy(self, param):
+ url = b64decode(param["url"]).decode('utf-8')
+ durl = url[:url.rfind('/')]
+ data = self.fetch(url, headers=self.header()).content.decode("utf-8")
+ lines = data.strip().split('\n')
+ for index, string in enumerate(lines):
+ # if 'URI="' in string and 'http' not in string:
+ # lines[index] = index
+ # 暂时预留,貌似用不到
+ if '#EXT' not in string and 'http' not in string:
+ lines[index] = durl + ('' if string.startswith('/') else '/') + string
+ data = '\n'.join(lines)
+ return [200, "application/vnd.apple.mpegur", data]
+
+ def device_id(self):
+ characters = string.ascii_lowercase + string.digits
+ random_string = ''.join(random.choices(characters, k=32))
+ return random_string
+
+ def gethost(self):
+ headers = {
+ 'User-Agent': 'okhttp/4.9.2',
+ 'Connection': 'Keep-Alive',
+ }
+ response = self.fetch('https://app-site.ecoliving168.com/domain_v5.json', headers=headers).json()
+ url = response['api_service'].replace('/api/', '')
+ return url
+
+ def header(self):
+ headers = {
+ 'User-Agent': 'Android',
+ 'Accept': 'application/prs.55App.v2+json',
+ 'timestamp': self.t,
+ 'x-client-setting': '{"pure-mode":1}',
+ 'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
+ 'x-client-version': '3096 '
+ }
+ return headers
+
+ def url(self, id=None):
+ if not id:
+ id = {}
+ id["timestamp"] = self.t
+ public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
+ encrypted_text = json.dumps(id)
+ public_key = RSA.import_key(b64decode(public_key))
+ cipher = PKCS1_v1_5.new(public_key)
+ encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
+ encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
+ result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
+ key = '635a580fcb5dc6e60caa39c31a7bde48'
+ sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
+ return result, sign
+
+ def playlist(self, body):
+ try:
+ bba = self.url(body)
+ url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
+ data = self.fetch(url, headers=self.header()).json()['data']
+ return self.playeach(data)
+ except Exception:
+ return []
+
+ def playeach(self,data):
+ play_urls = []
+ for it in data:
+ if re.search(r"mp4|m3u8", it["play_url"]):
+ play_urls.append(f"{it['episode_name']}${it['play_url']}")
+ else:
+ play_urls.append(
+ f"{it['episode_name']}${it['from_code']}|||{it['play_url']}|||{it['episode_id']}"
+ )
+ return '#'.join(play_urls)
+
+ def voides(self, item):
+ if item['name'] or item['title']:
+ voide = {
+ "vod_id": item.get('id') or item.get('click'),
+ 'vod_name': item.get('name') or item.get('title'),
+ 'vod_pic': item.get('cover') or item.get('image'),
+ 'vod_year': item.get('year') or item.get('label'),
+ 'vod_remarks': item.get('dynamic') or item.get('sub_title')
+ }
+ return voide
+
+ def aes(self, text):
+ text = text.replace('-', '+').replace('_', '/') + '=='
+ key = b"e6d5de5fcc51f53d"
+ iv = b"2f13eef7dfc6c613"
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
+ return json.loads(pt)
diff --git a/py/甜圈短剧.py b/py/甜圈短剧.py
new file mode 100644
index 0000000..40cac38
--- /dev/null
+++ b/py/甜圈短剧.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ return "甜圈短剧"
+
+ def isVideoFormat(self, url):
+ return True
+
+ def manualVideoCheck(self):
+ return False
+
+ def destroy(self):
+ pass
+
+ # 更新为新的域名
+ ahost = 'https://mov.cenguigui.cn'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'DNT': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'Sec-Fetch-Mode': 'no-cors',
+ 'Sec-Fetch-Dest': 'video',
+ 'Sec-Fetch-Storage-Access': 'active',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+
+ def homeContent(self, filter):
+ result = {'class': [{'type_id': '推荐榜', 'type_name': '🔥 推荐榜'},
+ {'type_id': '新剧', 'type_name': '🎬 新剧'},
+ {'type_id': '逆袭', 'type_name': '🎬 逆袭'},
+ {'type_id': '霸总', 'type_name': '🎬 霸总'},
+ {'type_id': '现代言情', 'type_name': '🎬 现代言情'},
+ {'type_id': '打脸虐渣', 'type_name': '🎬 打脸虐渣'},
+ {'type_id': '豪门恩怨', 'type_name': '🎬 豪门恩怨'},
+ {'type_id': '神豪', 'type_name': '🎬 神豪'},
+ {'type_id': '马甲', 'type_name': '🎬 马甲'},
+ {'type_id': '都市日常', 'type_name': '🎬 都市日常'},
+ {'type_id': '战神归来', 'type_name': '🎬 战神归来'},
+ {'type_id': '小人物', 'type_name': '🎬 小人物'},
+ {'type_id': '女性成长', 'type_name': '🎬 女性成长'},
+ {'type_id': '大女主', 'type_name': '🎬 大女主'},
+ {'type_id': '穿越', 'type_name': '🎬 穿越'},
+ {'type_id': '都市修仙', 'type_name': '🎬 都市修仙'},
+ {'type_id': '强者回归', 'type_name': '🎬 强者回归'},
+ {'type_id': '亲情', 'type_name': '🎬 亲情'},
+ {'type_id': '古装', 'type_name': '🎬 古装'},
+ {'type_id': '重生', 'type_name': '🎬 重生'},
+ {'type_id': '闪婚', 'type_name': '🎬 闪婚'},
+ {'type_id': '赘婿逆袭', 'type_name': '🎬 赘婿逆袭'},
+ {'type_id': '虐恋', 'type_name': '🎬 虐恋'},
+ {'type_id': '追妻', 'type_name': '🎬 追妻'},
+ {'type_id': '天下无敌', 'type_name': '🎬 天下无敌'},
+ {'type_id': '家庭伦理', 'type_name': '🎬 家庭伦理'},
+ {'type_id': '萌宝', 'type_name': '🎬 萌宝'},
+ {'type_id': '古风权谋', 'type_name': '🎬 古风权谋'},
+ {'type_id': '职场', 'type_name': '🎬 职场'},
+ {'type_id': '奇幻脑洞', 'type_name': '🎬 奇幻脑洞'},
+ {'type_id': '异能', 'type_name': '🎬 异能'},
+ {'type_id': '无敌神医', 'type_name': '🎬 无敌神医'},
+ {'type_id': '古风言情', 'type_name': '🎬 古风言情'},
+ {'type_id': '传承觉醒', 'type_name': '🎬 传承觉醒'},
+ {'type_id': '现言甜宠', 'type_name': '🎬 现言甜宠'},
+ {'type_id': '奇幻爱情', 'type_name': '🎬 奇幻爱情'},
+ {'type_id': '乡村', 'type_name': '🎬 乡村'},
+ {'type_id': '历史古代', 'type_name': '🎬 历史古代'},
+ {'type_id': '王妃', 'type_name': '🎬 王妃'},
+ {'type_id': '高手下山', 'type_name': '🎬 高手下山'},
+ {'type_id': '娱乐圈', 'type_name': '🎬 娱乐圈'},
+ {'type_id': '强强联合', 'type_name': '🎬 强强联合'},
+ {'type_id': '破镜重圆', 'type_name': '🎬 破镜重圆'},
+ {'type_id': '暗恋成真', 'type_name': '🎬 暗恋成真'},
+ {'type_id': '民国', 'type_name': '🎬 民国'},
+ {'type_id': '欢喜冤家', 'type_name': '🎬 欢喜冤家'},
+ {'type_id': '系统', 'type_name': '🎬 系统'},
+ {'type_id': '真假千金', 'type_name': '🎬 真假千金'},
+ {'type_id': '龙王', 'type_name': '🎬 龙王'},
+ {'type_id': '校园', 'type_name': '🎬 校园'},
+ {'type_id': '穿书', 'type_name': '🎬 穿书'},
+ {'type_id': '女帝', 'type_name': '🎬 女帝'},
+ {'type_id': '团宠', 'type_name': '🎬 团宠'},
+ {'type_id': '年代爱情', 'type_name': '🎬 年代爱情'},
+ {'type_id': '玄幻仙侠', 'type_name': '🎬 玄幻仙侠'},
+ {'type_id': '青梅竹马', 'type_name': '🎬 青梅竹马'},
+ {'type_id': '悬疑推理', 'type_name': '🎬 悬疑推理'},
+ {'type_id': '皇后', 'type_name': '🎬 皇后'},
+ {'type_id': '替身', 'type_name': '🎬 替身'},
+ {'type_id': '大叔', 'type_name': '🎬 大叔'},
+ {'type_id': '喜剧', 'type_name': '🎬 喜剧'},
+ {'type_id': '剧情', 'type_name': '🎬 剧情'}]}
+ return result
+
+ def homeVideoContent(self):
+ return []
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {
+ 'classname': tid,
+ 'offset': str((int(pg) - 1)),
+ }
+ # 更新请求路径为 /duanju/api.php
+ data = self.fetch(f'{self.ahost}/duanju/api.php', params=params, headers=self.headers).json()
+ videos = []
+ for k in data['data']:
+ videos.append({
+ 'vod_id': k.get('book_id'),
+ 'vod_name': k.get('title'),
+ 'vod_pic': k.get('cover'),
+ 'vod_year': k.get('score'),
+ 'vod_remarks': f"{k.get('sub_title')}|{k.get('episode_cnt')}"
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ # 更新请求路径为 /duanju/api.php
+ v = self.fetch(f'{self.ahost}/duanju/api.php', params={'book_id': ids[0]}, headers=self.headers).json()
+ vod = {
+ 'vod_id': ids[0],
+ 'vod_name': v.get('title'),
+ 'type_name': v.get('category'),
+ 'vod_year': v.get('time'),
+ 'vod_remarks': v.get('duration'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '爱看短剧',
+ 'vod_play_url': '#'.join([f"{i['title']}${i['video_id']}" for i in v['data']])
+ }
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ return self.categoryContent(key, pg, True, {})
+
+ def playerContent(self, flag, id, vipFlags):
+ # 更新请求路径为 /duanju/api.php
+ data = self.fetch(f'{self.ahost}/duanju/api.php', params={'video_id': id}, headers=self.headers).json()
+ return {'parse': 0, 'url': data['data']['url'], 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
\ No newline at end of file
diff --git a/py/界影视.py b/py/界影视.py
new file mode 100644
index 0000000..e4820bf
--- /dev/null
+++ b/py/界影视.py
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+# @Author : Doubebly
+# @Time : 2025/1/21 23:07
+
+import hashlib
+import re
+import sys
+import time
+import requests
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+ def getName(self):
+ return "JieYingShi"
+
+ def init(self, extend):
+ self.home_url = 'https://www.hkybqufgh.com'
+ self.error_url = 'https://json.doube.eu.org/error/4gtv/index.m3u8'
+ self.headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
+ }
+
+ def getDependence(self):
+ return []
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def homeContent(self, filter):
+
+ return {'class': [
+ {
+ 'type_id': '1',
+ 'type_name': '电影'
+ },
+ {
+ 'type_id': '2',
+ 'type_name': '电视剧'
+ },
+ {
+ 'type_id': '4',
+ 'type_name': '动漫'
+ },
+ {
+ 'type_id': '3',
+ 'type_name': '综艺'
+ }
+ ]}
+
+ def homeVideoContent(self):
+ a = self.get_data(self.home_url)
+ return {'list': a, 'parse': 0, 'jx': 0}
+
+ def categoryContent(self, cid, page, filter, ext):
+ url = self.home_url + f'/vod/show/id/{cid}/page/{page}'
+ data = self.get_data(url)
+ return {'list': data, 'parse': 0, 'jx': 0}
+
+
+ def detailContent(self, did):
+ ids = did[0]
+ data = self.get_detail_data(ids)
+ return {"list": data, 'parse': 0, 'jx': 0}
+
+ def searchContent(self, key, quick, page='1'):
+ if int(page) > 1:
+ return {'list': [], 'parse': 0, 'jx': 0}
+ url = self.home_url + f'/vod/search/{key}'
+ data = self.get_data(url)
+ return {'list': data, 'parse': 0, 'jx': 0}
+
+ def playerContent(self, flag, pid, vipFlags):
+ url = self.get_play_data(pid)
+ return {"url": url, "header": self.headers, "parse": 1, "jx": 0}
+
+ def localProxy(self, params):
+ pass
+
+ def destroy(self):
+ return '正在Destroy'
+
+
+ def get_data(self, url):
+ data = []
+ try:
+ res = requests.get(url, headers=self.headers)
+ if res.status_code != 200:
+ return data
+ vod_id_s = re.findall(r'\\"vodId\\":(.*?),', res.text)
+ vod_name_s = re.findall(r'\\"vodName\\":\\"(.*?)\\"', res.text)
+ vod_pic_s = re.findall(r'\\"vodPic\\":\\"(.*?)\\"', res.text)
+ vod_remarks_s = re.findall(r'\\"vodRemarks\\":\\"(.*?)\\"', res.text)
+
+ for i in range(len(vod_id_s)):
+ data.append(
+ {
+ 'vod_id': vod_id_s[i],
+ 'vod_name': vod_name_s[i],
+ 'vod_pic': vod_pic_s[i],
+ 'vod_remarks': vod_remarks_s[i],
+ }
+ )
+ except requests.RequestException as e:
+ print(e)
+ return data
+
+ def get_detail_data(self, ids):
+ url = self.home_url + f'/api/mw-movie/anonymous/video/detail?id={ids}'
+ t = str(int(time.time() * 1000))
+ headers = self.get_headers(t, f'id={ids}&key=cb808529bae6b6be45ecfab29a4889bc&t={t}')
+ try:
+ res = requests.get(url, headers=headers)
+ if res.status_code != 200:
+ return []
+ i = res.json()['data']
+ urls = []
+ for ii in res.json()['data']['episodeList']:
+ name = ii['name']
+ url = ii['nid']
+ urls.append(f'{name}${ids}-{url}')
+ data = {
+ 'type_name': i['vodClass'],
+ 'vod_id': i['vodId'],
+ 'vod_name': i['vodName'],
+ 'vod_remarks': i['vodRemarks'],
+ 'vod_year': i['vodYear'],
+ 'vod_area': i['vodArea'],
+ 'vod_actor': i['vodActor'],
+ 'vod_director': i['vodDirector'],
+ 'vod_content': i['vodContent'],
+ 'vod_play_from': '默认',
+ 'vod_play_url': '#'.join(urls),
+
+ }
+ return [data]
+
+ except requests.RequestException as e:
+ print(e)
+ return []
+
+ def get_play_data(self, play):
+ info = play.split('-')
+ _id = info[0]
+ _pid = info[1]
+ url = self.home_url + f'/api/mw-movie/anonymous/v2/video/episode/url?id={_id}&nid={_pid}'
+ t = str(int(time.time() * 1000))
+ headers = self.get_headers(t, f'id={_id}&nid={_pid}&key=cb808529bae6b6be45ecfab29a4889bc&t={t}')
+ try:
+ res = requests.get(url, headers=headers)
+ if res.status_code != 200:
+ return self.error_url
+ return res.json()['data']['list'][0]['url']
+ except requests.RequestException as e:
+ print(e)
+ return self.error_url
+
+ @staticmethod
+ def get_headers(t, e):
+ sign = hashlib.sha1(hashlib.md5(e.encode()).hexdigest().encode()).hexdigest()
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'sign': sign,
+ 'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
+ 't': t,
+ 'referer': 'https://www.hkybqufgh.com/',
+ }
+ return headers
+
+if __name__ == '__main__':
+ pass
+
+
+
+
diff --git a/py/绝对影视.py b/py/绝对影视.py
new file mode 100644
index 0000000..820abf2
--- /dev/null
+++ b/py/绝对影视.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import base64
+import re
+import sys
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import unpad
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://www.jdys.art'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'dnt': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'origin': host,
+ 'sec-fetch-site': 'cross-site',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-dest': 'empty',
+ 'referer': f'{host}/',
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ 'priority': 'u=1, i',
+ }
+
+ def homeContent(self, filter):
+ data = self.getpq(self.fetch(self.host, headers=self.headers).text)
+ result = {}
+ classes = []
+ for k in list(data('.navtop .navlist li').items())[:9]:
+ classes.append({
+ 'type_name': k('a').text(),
+ 'type_id': k('a').attr('href'),
+ })
+ result['class'] = classes
+ result['list'] = self.getlist(data('.mi_btcon .bt_img ul li'))
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ data = self.getpq(self.fetch(f"{tid}{'' if pg == '1' else f'page/{pg}/'}", headers=self.headers).text)
+ result = {}
+ result['list'] = self.getlist(data('.mi_cont .bt_img ul li'))
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data = self.getpq(self.fetch(ids[0], headers=self.headers).text)
+ data2 = data('.moviedteail_list li')
+ vod = {
+ 'vod_name': data('.dytext h1').text(),
+ 'type_name': data2.eq(0).text(),
+ 'vod_year': data2.eq(2).text(),
+ 'vod_area': data2.eq(1).text(),
+ 'vod_remarks': data2.eq(4).text(),
+ 'vod_actor': data2.eq(7).text(),
+ 'vod_director': data2.eq(5).text(),
+ 'vod_content': data('.yp_context').text().strip()
+ }
+ vdata = data('.paly_list_btn a')
+ play = []
+ for i in vdata.items():
+ a = i.text() + "$" + i.attr.href
+ play.append(a)
+ vod["vod_play_from"] = "在线播放"
+ vod["vod_play_url"] = "#".join(play)
+ result = {"list": [vod]}
+ return result
+
+ def searchContent(self, key, quick, pg="1"):
+ data = self.getpq(self.fetch(f"{self.host}/page/{pg}/?s={key}", headers=self.headers).text)
+ return {'list': self.getlist(data('.mi_cont .bt_img ul li')), 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ data = self.getpq(self.fetch(id, headers=self.headers).text)
+ try:
+ sc = data('.videoplay script').eq(-1).text()
+ strd = re.findall(r'var\s+[^=]*=\s*"([^"]*)";', sc)
+ kdata = re.findall(r'parse\((.*?)\);', sc)
+ jm = self.aes(strd[0], kdata[0].replace('"', ''), kdata[1].replace('"', ''))
+ url = re.search(r'url: "(.*?)"', jm).group(1)
+ p = 0
+ except:
+ p = 1
+ url = id
+ result = {}
+ result["parse"] = p
+ result["url"] = url
+ result["header"] = self.headers
+ return result
+
+ def localProxy(self, param):
+ pass
+
+ def getpq(self, text):
+ try:
+ return pq(text)
+ except Exception as e:
+ print(f"{str(e)}")
+ return pq(text.encode('utf-8'))
+
+ def getlist(self, data):
+ videos = []
+ for i in data.items():
+ videos.append({
+ 'vod_id': i('a').attr('href'),
+ 'vod_name': i('a img').attr('alt'),
+ 'vod_pic': i('a img').attr('src'),
+ 'vod_remarks': i('.dycategory').text(),
+ 'vod_year': i('.dyplayinfo').text() or i('.rating').text(),
+ })
+ return videos
+
+ def aes(self, word, key, iv):
+ key = key.encode('utf-8')
+ iv = iv.encode('utf-8')
+ encrypted_data = base64.b64decode(word)
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ decrypted_data = cipher.decrypt(encrypted_data)
+ decrypted_data = unpad(decrypted_data, AES.block_size)
+ return decrypted_data.decode('utf-8')
diff --git a/py/网络直播.py b/py/网络直播.py
new file mode 100644
index 0000000..4e54c02
--- /dev/null
+++ b/py/网络直播.py
@@ -0,0 +1,767 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+import time
+from base64 import b64decode, b64encode
+from urllib.parse import parse_qs
+import requests
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ tid = 'douyin'
+ headers = self.gethr(0, tid)
+ response = requests.head(self.hosts[tid], headers=headers)
+ ttwid = response.cookies.get('ttwid')
+ headers.update({
+ 'authority': self.hosts[tid].split('//')[-1],
+ 'cookie': f'ttwid={ttwid}' if ttwid else ''
+ })
+ self.dyheaders = headers
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = [
+ {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
+ },
+ {
+ "User-Agent": "Dart/3.4 (dart:io)"
+ }
+ ]
+
+ excepturl = 'https://www.baidu.com'
+
+ hosts = {
+ "huya": ["https://www.huya.com","https://mp.huya.com"],
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://www.douyu.com",
+ "wangyi": "https://cc.163.com",
+ "bili": ["https://api.live.bilibili.com", "https://api.bilibili.com"]
+ }
+
+ referers = {
+ "huya": "https://live.cdn.huya.com",
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://m.douyu.com",
+ "bili": "https://live.bilibili.com"
+ }
+
+ playheaders = {
+ "wangyi": {
+ "User-Agent": "ExoPlayer",
+ "Connection": "Keep-Alive",
+ "Icy-MetaData": "1"
+ },
+ "bili": {
+ 'Accept': '*/*',
+ 'Icy-MetaData': '1',
+ 'referer': referers['bili'],
+ 'user-agent': headers[0]['User-Agent']
+ },
+ 'douyin': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ },
+ 'huya': {
+ 'User-Agent': 'ExoPlayer',
+ 'Connection': 'Keep-Alive',
+ 'Icy-MetaData': '1'
+ },
+ 'douyu': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ }
+ }
+
+ def process_bili(self):
+ try:
+ self.blfdata = self.fetch(
+ f'{self.hosts["bili"][0]}/room/v1/Area/getList?need_entrance=1&parent_id=0',
+ headers=self.gethr(0, 'bili')
+ ).json()
+ return ('bili', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['name'], 'v': str(i['id'])}
+ for i in self.blfdata['data']]}])
+ except Exception as e:
+ print(f"bili处理错误: {e}")
+ return 'bili', None
+
+ def process_douyin(self):
+ try:
+ data = self.getpq(self.hosts['douyin'], headers=self.dyheaders)('script')
+ for i in data.items():
+ if 'categoryData' in i.text():
+ content = i.text()
+ start = content.find('{')
+ end = content.rfind('}') + 1
+ if start != -1 and end != -1:
+ json_str = content[start:end]
+ json_str = json_str.replace('\\"', '"')
+ try:
+ self.dyifdata = json.loads(json_str)
+ return ('douyin', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['partition']['title'],
+ 'v': f"{i['partition']['id_str']}@@{i['partition']['title']}"}
+ for i in self.dyifdata['categoryData']]}])
+ except json.JSONDecodeError as e:
+ print(f"douyin解析错误: {e}")
+ return 'douyin', None
+ except Exception as e:
+ print(f"douyin请求或处理错误: {e}")
+ return 'douyin', None
+
+ def process_douyu(self):
+ try:
+ self.dyufdata = self.fetch(
+ f'{self.referers["douyu"]}/api/cate/list',
+ headers=self.headers[1]
+ ).json()
+ return ('douyu', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['cate1Name'], 'v': str(i['cate1Id'])}
+ for i in self.dyufdata['data']['cate1Info']]}])
+ except Exception as e:
+ print(f"douyu错误: {e}")
+ return 'douyu', None
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "虎牙": "huya",
+ "抖音": "douyin",
+ "斗鱼": "douyu",
+ "网易": "wangyi"
+ }
+ classes = []
+ filters = {
+ 'huya': [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': '网游', 'v': '1'}, {'n': '单机', 'v': '2'},
+ {'n': '娱乐', 'v': '8'}, {'n': '手游', 'v': '3'}]}]
+ }
+
+ with ThreadPoolExecutor(max_workers=3) as executor:
+ futures = {
+ executor.submit(self.process_bili): 'bili',
+ executor.submit(self.process_douyin): 'douyin',
+ executor.submit(self.process_douyu): 'douyu'
+ }
+
+ for future in futures:
+ platform, filter_data = future.result()
+ if filter_data:
+ filters[platform] = filter_data
+
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ vdata = []
+ result = {}
+ pagecount = 9999
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ if tid == 'wangyi':
+ vdata, pagecount = self.wyccContent(tid, pg, filter, extend, vdata)
+ elif 'bili' in tid:
+ vdata, pagecount = self.biliContent(tid, pg, filter, extend, vdata)
+ elif 'huya' in tid:
+ vdata, pagecount = self.huyaContent(tid, pg, filter, extend, vdata)
+ elif 'douyin' in tid:
+ vdata, pagecount = self.douyinContent(tid, pg, filter, extend, vdata)
+ elif 'douyu' in tid:
+ vdata, pagecount = self.douyuContent(tid, pg, filter, extend, vdata)
+ result['list'] = vdata
+ result['pagecount'] = pagecount
+ return result
+
+ def wyccContent(self, tid, pg, filter, extend, vdata):
+ params = {
+ 'format': 'json',
+ 'start': (int(pg) - 1) * 20,
+ 'size': '20',
+ }
+ response = self.fetch(f'{self.hosts[tid]}/api/category/live/', params=params, headers=self.headers[0]).json()
+ for i in response['lives']:
+ if i.get('cuteid'):
+ bvdata = self.buildvod(
+ vod_id=f"{tid}@@{i['cuteid']}",
+ vod_name=i.get('title'),
+ vod_pic=i.get('cover'),
+ vod_remarks=i.get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(bvdata)
+ return vdata, 9999
+
+ def biliContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.blfdata['data']:
+ if str(i['id']) == extend['cate']:
+ for j in i['list']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['id']}@@{j['id']}",
+ vod_name=j.get('name'),
+ vod_pic=j.get('pic'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/xlive/web-interface/v1/second/getListByArea?platform=web&sort=online&page_size=30&page={pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/xlive/web-interface/v1/second/getList?platform=web&parent_area_id={ids[1]}&area_id={ids[-1]}&sort_type=&page={pg}'
+ data = self.fetch(f'{self.hosts[tid][0]}{path}', headers=self.gethr(0, tid)).json()
+ for i in data['data']['list']:
+ if i.get('roomid'):
+ data = self.buildvod(
+ f"{tid}@@{i['roomid']}",
+ i.get('title'),
+ i.get('cover'),
+ i.get('watched_show', {}).get('text_large'),
+ 0,
+ i.get('uname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(data)
+ return vdata, 9999
+
+ def huyaContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ id = extend.get('cate')
+ data = self.fetch(f'{self.referers[tid]}/liveconfig/game/bussLive?bussType={id}',
+ headers=self.headers[1]).json()
+ for i in data['data']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{int(i['gid'])}",
+ vod_name=i.get('gameFullName'),
+ vod_pic=f'https://huyaimg.msstatic.com/cdnimage/game/{int(i["gid"])}-MS.jpg',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ gid = ''
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ gid = f'&gameId={ids[1]}'
+ data = self.fetch(f'{self.hosts[tid][0]}/cache.php?m=LiveList&do=getLiveListByPage&tagAll=0{gid}&page={pg}',
+ headers=self.headers[1]).json()
+ for i in data['data']['datas']:
+ if i.get('profileRoom'):
+ v = self.buildvod(
+ f"{tid}@@{i['profileRoom']}",
+ i.get('introduction'),
+ i.get('screenshot'),
+ str(int(i.get('totalCount', '1')) / 10000) + '万',
+ 0,
+ i.get('nick'),
+ style={"type": "rect", "ratio": 1.33}
+
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyinContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ ids = extend.get('cate').split('@@')
+ for i in self.dyifdata['categoryData']:
+ c = i['partition']
+ if c['id_str'] == ids[0] and c['title'] == ids[1]:
+ vlist = i['sub_partition'].copy()
+ vlist.insert(0, {'partition': c})
+ for j in vlist:
+ j = j['partition']
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{j['id_str']}@@{j['type']}",
+ vod_name=j.get('title'),
+ vod_pic='https://p3-pc-weboff.byteimg.com/tos-cn-i-9r5gewecjs/pwa_v3/512x512-1.png',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition=720&partition_type=1'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition={ids[1]}&partition_type={ids[-1]}&req_from=2'
+ data = self.fetch(f'{self.hosts[tid]}{path}', headers=self.dyheaders).json()
+ for i in data['data']['data']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['web_rid']}",
+ vod_name=i['room'].get('title'),
+ vod_pic=i['room']['cover'].get('url_list')[0],
+ vod_year=i.get('user_count_str'),
+ vod_remarks=i['room']['owner'].get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyuContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.dyufdata['data']['cate2Info']:
+ if str(i['cate1Id']) == extend['cate']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['cate2Id']}",
+ vod_name=i.get('cate2Name'),
+ vod_pic=i.get('icon'),
+ vod_remarks=i.get('count'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/japi/weblist/apinc/allpage/6/{pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/gapi/rkc/directory/mixList/2_{ids[1]}/{pg}'
+ url = f'{self.hosts[tid]}{path}'
+ data = self.fetch(url, headers=self.headers[1]).json()
+ for i in data['data']['rl']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['rid']}",
+ vod_name=i.get('rn'),
+ vod_pic=i.get('rs16'),
+ vod_year=str(int(i.get('ol', 1)) / 10000) + '万',
+ vod_remarks=i.get('nn'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@@')
+ if ids[0] == 'wangyi':
+ vod = self.wyccDetail(ids)
+ elif ids[0] == 'bili':
+ vod = self.biliDetail(ids)
+ elif ids[0] == 'huya':
+ vod = self.huyaDetail(ids)
+ elif ids[0] == 'douyin':
+ vod = self.douyinDetail(ids)
+ elif ids[0] == 'douyu':
+ vod = self.douyuDetail(ids)
+ return {'list': [vod]}
+
+ def wyccDetail(self, ids):
+ try:
+ vdata = self.getpq(f'{self.hosts[ids[0]]}/{ids[1]}', self.headers[0])('script').eq(-1).text()
+
+ def get_quality_name(vbr):
+ if vbr <= 600:
+ return "标清"
+ elif vbr <= 1000:
+ return "高清"
+ elif vbr <= 2000:
+ return "超清"
+ else:
+ return "蓝光"
+
+ data = json.loads(vdata)['props']['pageProps']['roomInfoInitData']
+ name = data['live'].get('title', ids[0])
+ vod = self.buildvod(vod_name=data.get('keywords_suffix'), vod_remarks=data['live'].get('title'),
+ vod_content=data.get('description_suffix'))
+ resolution_data = data['live']['quickplay']['resolution']
+ all_streams = {}
+ sorted_qualities = sorted(resolution_data.items(),
+ key=lambda x: x[1]['vbr'],
+ reverse=True)
+ for quality, data in sorted_qualities:
+ vbr = data['vbr']
+ quality_name = get_quality_name(vbr)
+ for cdn_name, url in data['cdn'].items():
+ if cdn_name not in all_streams and type(url) == str and url.startswith('http'):
+ all_streams[cdn_name] = []
+ if isinstance(url, str) and url.startswith('http'):
+ all_streams[cdn_name].extend([quality_name, url])
+ plists = []
+ names = []
+ for i, (cdn_name, stream_list) in enumerate(all_streams.items(), 1):
+ names.append(f'线路{i}')
+ pstr = f"{name}${ids[0]}@@{self.e64(json.dumps(stream_list))}"
+ plists.append(pstr)
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plists)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def biliDetail(self, ids):
+ try:
+ vdata = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v1/index/getInfoByRoom?room_id={ids[1]}&wts={int(time.time())}',
+ headers=self.gethr(0, ids[0])).json()
+ v = vdata['data']['room_info']
+ vod = self.buildvod(
+ vod_name=v.get('title'),
+ type_name=v.get('parent_area_name') + '/' + v.get('area_name'),
+ vod_remarks=v.get('tags'),
+ vod_play_from=v.get('title'),
+ )
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0%2C1&format=0%2C1%2C2&codec=0%2C1&platform=web',
+ headers=self.gethr(0, ids[0])).json()
+ vdnams = data['data']['playurl_info']['playurl']['g_qn_desc']
+ all_accept_qns = []
+ streams = data['data']['playurl_info']['playurl']['stream']
+ for stream in streams:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ if 'accept_qn' in codec:
+ all_accept_qns.append(codec['accept_qn'])
+ max_accept_qn = max(all_accept_qns, key=len) if all_accept_qns else []
+ quality_map = {
+ item['qn']: item['desc']
+ for item in vdnams
+ }
+ quality_names = [f"{quality_map.get(qn)}${ids[0]}@@{ids[1]}@@{qn}" for qn in max_accept_qn]
+ vod['vod_play_url'] = "#".join(quality_names)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def huyaDetail(self, ids):
+ try:
+ vdata = self.fetch(f'{self.hosts[ids[0]][1]}/cache.php?m=Live&do=profileRoom&roomid={ids[1]}',
+ headers=self.headers[0]).json()
+ v = vdata['data']['liveData']
+ vod = self.buildvod(
+ vod_name=v.get('introduction'),
+ type_name=v.get('gameFullName'),
+ vod_director=v.get('nick'),
+ vod_remarks=v.get('contentIntro'),
+ )
+ data = dict(reversed(list(vdata['data']['stream'].items())))
+ names = []
+ plist = []
+
+ for stream_type, stream_data in data.items():
+ if isinstance(stream_data, dict) and 'multiLine' in stream_data and 'rateArray' in stream_data:
+ names.append(f"线路{len(names) + 1}")
+ qualities = sorted(
+ stream_data['rateArray'],
+ key=lambda x: (x['iBitRate'], x['sDisplayName']),
+ reverse=True
+ )
+ cdn_urls = []
+ for cdn in stream_data['multiLine']:
+ quality_urls = []
+ for quality in qualities:
+ quality_name = quality['sDisplayName']
+ bit_rate = quality['iBitRate']
+ base_url = cdn['url']
+ if bit_rate > 0:
+ if '.m3u8' in base_url:
+ new_url = base_url.replace(
+ 'ratio=2000',
+ f'ratio={bit_rate}'
+ )
+ else:
+ new_url = base_url.replace(
+ 'imgplus.flv',
+ f'imgplus_{bit_rate}.flv'
+ )
+ else:
+ new_url = base_url
+ quality_urls.extend([quality_name, new_url])
+ encoded_urls = self.e64(json.dumps(quality_urls))
+ cdn_urls.append(f"{cdn['cdnType']}${ids[0]}@@{encoded_urls}")
+
+ if cdn_urls:
+ plist.append('#'.join(cdn_urls))
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyinDetail(self, ids):
+ url = f'{self.hosts[ids[0]]}/webcast/room/web/enter/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&enter_from=web_live&web_rid={ids[1]}&room_id_str=&enter_source=&Room-Enter-User-Login-Ab=0&is_need_double_stream=false&cookie_enabled=true&screen_width=1980&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=125.0.0.0'
+ data = self.fetch(url, headers=self.dyheaders).json()
+ try:
+ vdata = data['data']['data'][0]
+ vod = self.buildvod(
+ vod_name=vdata['title'],
+ vod_remarks=vdata['user_count_str'],
+ )
+ resolution_data = vdata['stream_url']['live_core_sdk_data']['pull_data']['options']['qualities']
+ stream_json = vdata['stream_url']['live_core_sdk_data']['pull_data']['stream_data']
+ stream_json = json.loads(stream_json)
+ available_types = []
+ if any(sdk_key in stream_json['data'] and 'main' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('main')
+ if any(sdk_key in stream_json['data'] and 'backup' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('backup')
+ plist = []
+ for line_type in available_types:
+ format_arrays = {'flv': [], 'hls': [], 'lls': []}
+ qualities = sorted(resolution_data, key=lambda x: x['level'], reverse=True)
+ for quality in qualities:
+ sdk_key = quality['sdk_key']
+ if sdk_key in stream_json['data'] and line_type in stream_json['data'][sdk_key]:
+ stream_info = stream_json['data'][sdk_key][line_type]
+ if stream_info.get('flv'):
+ format_arrays['flv'].extend([quality['name'], stream_info['flv']])
+ if stream_info.get('hls'):
+ format_arrays['hls'].extend([quality['name'], stream_info['hls']])
+ if stream_info.get('lls'):
+ format_arrays['lls'].extend([quality['name'], stream_info['lls']])
+ format_urls = []
+ for format_name, url_array in format_arrays.items():
+ if url_array:
+ encoded_urls = self.e64(json.dumps(url_array))
+ format_urls.append(f"{format_name}${ids[0]}@@{encoded_urls}")
+
+ if format_urls:
+ plist.append('#'.join(format_urls))
+
+ names = ['线路1', '线路2'][:len(plist)]
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyuDetail(self, ids):
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{ids[1]}')
+ try:
+ data = self.fetch(f'{self.hosts[ids[0]]}/betard/{ids[1]}', headers=headers).json()
+ vname = data['room']['room_name']
+ vod = self.buildvod(
+ vod_name=vname,
+ vod_remarks=data['room'].get('second_lvl_name'),
+ vod_director=data['room'].get('nickname'),
+ )
+ vdata = self.fetch(f'{self.hosts[ids[0]]}/swf_api/homeH5Enc?rids={ids[1]}', headers=headers).json()
+ json_body = vdata['data']
+ json_body = {"html": self.douyu_text(json_body[f'room{ids[1]}']), "rid": ids[1]}
+ sign = self.post('http://alive.nsapps.cn/api/AllLive/DouyuSign', json=json_body, headers=self.headers[1]).json()['data']
+ body = f'{sign}&cdn=&rate=-1&ver=Douyu_223061205&iar=1&ive=1&hevc=0&fa=0'
+ body=self.params_to_json(body)
+ nubdata = self.post(f'{self.hosts[ids[0]]}/lapi/live/getH5Play/{ids[1]}', data=body, headers=headers).json()
+ plist = []
+ names = []
+ for i,x in enumerate(nubdata['data']['cdnsWithName']):
+ names.append(f'线路{i+1}')
+ d = {'sign': sign, 'cdn': x['cdn'], 'id': ids[1]}
+ plist.append(
+ f'{vname}${ids[0]}@@{self.e64(json.dumps(d))}@@{self.e64(json.dumps(nubdata["data"]["multirates"]))}')
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyu_text(self, text):
+ function_positions = [m.start() for m in re.finditer('function', text)]
+ total_functions = len(function_positions)
+ if total_functions % 2 == 0:
+ target_index = total_functions // 2 + 1
+ else:
+ target_index = (total_functions - 1) // 2 + 1
+ if total_functions >= target_index:
+ cut_position = function_positions[target_index - 1]
+ ctext = text[4:cut_position]
+ return re.sub(r'eval\(strc\)\([\w\d,]+\)', 'strc', ctext)
+ return text
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ ids = id.split('@@')
+ p = 1
+ if ids[0] in ['wangyi', 'douyin','huya']:
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'bili':
+ p, url = self.biliplay(ids)
+ elif ids[0] == 'huya':
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'douyu':
+ p, url = self.douyuplay(ids)
+ return {'parse': p, 'url': url, 'header': self.playheaders[ids[0]]}
+ except Exception as e:
+ return {'parse': 1, 'url': self.excepturl, 'header': self.headers[0]}
+
+ def biliplay(self, ids):
+ try:
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0,1&format=0,2&codec=0&platform=web&qn={ids[2]}',
+ headers=self.gethr(0, ids[0])).json()
+ urls = []
+ line_index = 1
+ for stream in data['data']['playurl_info']['playurl']['stream']:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ for url_info in codec['url_info']:
+ full_url = f"{url_info['host']}/{codec['base_url'].lstrip('/')}{url_info['extra']}"
+ urls.extend([f"线路{line_index}", full_url])
+ line_index += 1
+ return 0, urls
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyuplay(self, ids):
+ try:
+ sdata = json.loads(self.d64(ids[1]))
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{sdata["id"]}')
+ ldata = json.loads(self.d64(ids[2]))
+ result_obj = {}
+ with ThreadPoolExecutor(max_workers=len(ldata)) as executor:
+ futures = [
+ executor.submit(
+ self.douyufp,
+ sdata,
+ quality,
+ headers,
+ self.hosts[ids[0]],
+ result_obj
+ ) for quality in ldata
+ ]
+ for future in futures:
+ future.result()
+
+ result = []
+ for bit in sorted(result_obj.keys(), reverse=True):
+ result.extend(result_obj[bit])
+
+ if result:
+ return 0, result
+ return 1, self.excepturl
+
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyufp(self, sdata, quality, headers, host, result_obj):
+ try:
+ body = f'{sdata["sign"]}&cdn={sdata["cdn"]}&rate={quality["rate"]}'
+ body=self.params_to_json(body)
+ data = self.post(f'{host}/lapi/live/getH5Play/{sdata["id"]}',
+ data=body, headers=headers).json()
+ if data.get('data'):
+ play_url = data['data']['rtmp_url'] + '/' + data['data']['rtmp_live']
+ bit = quality.get('bit', 0)
+ if bit not in result_obj:
+ result_obj[bit] = []
+ result_obj[bit].extend([quality['name'], play_url])
+ except Exception as e:
+ print(f"Error fetching {quality['name']}: {str(e)}")
+
+ def localProxy(self, param):
+ pass
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+ def params_to_json(self, query_string):
+ parsed_data = parse_qs(query_string)
+ result = {key: value[0] for key, value in parsed_data.items()}
+ return result
+
+ def buildvod(self, vod_id='', vod_name='', vod_pic='', vod_year='', vod_tag='', vod_remarks='', style='',
+ type_name='', vod_area='', vod_actor='', vod_director='',
+ vod_content='', vod_play_from='', vod_play_url=''):
+ vod = {
+ 'vod_id': vod_id,
+ 'vod_name': vod_name,
+ 'vod_pic': vod_pic,
+ 'vod_year': vod_year,
+ 'vod_tag': 'folder' if vod_tag else '',
+ 'vod_remarks': vod_remarks,
+ 'style': style,
+ 'type_name': type_name,
+ 'vod_area': vod_area,
+ 'vod_actor': vod_actor,
+ 'vod_director': vod_director,
+ 'vod_content': vod_content,
+ 'vod_play_from': vod_play_from,
+ 'vod_play_url': vod_play_url
+ }
+ vod = {key: value for key, value in vod.items() if value}
+ return vod
+
+ def getpq(self, url, headers=None, cookies=None):
+ data = self.fetch(url, headers=headers, cookies=cookies).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"解析页面错误: {str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def gethr(self, index, rf='', zr=''):
+ headers = self.headers[index]
+ if zr:
+ headers['referer'] = zr
+ else:
+ headers['referer'] = f"{self.referers[rf]}/"
+ return headers
+
+ def handle_exception(self, e):
+ print(f"报错: {str(e)}")
+ return {'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'翻车啦${self.excepturl}'}
+
diff --git a/py/美帕影视.py b/py/美帕影视.py
new file mode 100644
index 0000000..bcb3a51
--- /dev/null
+++ b/py/美帕影视.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+ def getName(self):
+ return "mp"
+
+ def init(self, extend=""):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://g.c494.com'
+
+ header = {
+ 'User-Agent': 'Dart/2.10 (dart:io)',
+ 'platform_version': 'RP1A.200720.011',
+ 'version': '2.2.3',
+ 'copyright': 'xiaogui',
+ 'platform': 'android',
+ 'client_name': '576O5p+P5b2x6KeG',
+ }
+
+ def homeContent(self, filter):
+ data = self.fetch(f'{self.host}/api.php/app/nav?token=', headers=self.header).json()
+ dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
+ "sort": "排序"}
+ filters = {}
+ classes = []
+ json_data = data["list"]
+ for item in json_data:
+ has_non_empty_field = False
+ jsontype_extend = item["type_extend"]
+ classes.append({"type_name": item["type_name"], "type_id": str(item["type_id"])})
+ for key in dy:
+ if key in jsontype_extend and jsontype_extend[key].strip() != "":
+ has_non_empty_field = True
+ break
+ if has_non_empty_field:
+ filters[str(item["type_id"])] = []
+ for dkey in jsontype_extend:
+ if dkey in dy and jsontype_extend[dkey].strip() != "":
+ values = jsontype_extend[dkey].split(",")
+ value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
+ value.strip() != ""]
+ filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
+ result = {}
+ result["class"] = classes
+ result["filters"] = filters
+ return result
+
+ def homeVideoContent(self):
+ rsp = self.fetch(f"{self.host}/api.php/app/index_video?token=", headers=self.header)
+ root = rsp.json()['list']
+ videos = [item for vodd in root for item in vodd['vlist']]
+ return {'list': videos}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ parms = {"pg": pg, "tid": tid, "class": extend.get("class", ""), "area": extend.get("area", ""),
+ "lang": extend.get("lang", ""), "year": extend.get("year", ""), "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video', params=parms, headers=self.header).json()
+ return data
+
+ def detailContent(self, ids):
+ parms = {"id": ids[0], "token": ""}
+ data = self.fetch(f'{self.host}/api.php/app/video_detail', params=parms, headers=self.header).json()
+ vod = data['data']
+ vod.pop('pause_advert_list', None)
+ vod.pop('init_advert_list', None)
+ vod.pop('vod_url_with_player', None)
+ return {"list": [vod]}
+
+ def searchContent(self, key, quick, pg='1'):
+ parms = {'pg': pg, 'text': key, 'token': ''}
+ data = self.fetch(f'{self.host}/api.php/app/search', params=parms, headers=self.header).json()
+ return data
+
+ def playerContent(self, flag, id, vipFlags):
+ return {"parse": 0, "url": id, "header": {'User-Agent': 'User-Agent: Lavf/58.12.100'}}
+
+ def localProxy(self, param):
+ pass
diff --git a/py/腾讯视频.py b/py/腾讯视频.py
new file mode 100644
index 0000000..7a5218f
--- /dev/null
+++ b/py/腾讯视频.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import uuid
+import copy
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor, as_completed
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.dbody = {
+ "page_params": {
+ "channel_id": "",
+ "filter_params": "sort=75",
+ "page_type": "channel_operation",
+ "page_id": "channel_list_second_page"
+ }
+ }
+ self.body = self.dbody
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://v.qq.com'
+
+ apihost = 'https://pbaccess.video.qq.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
+ 'origin': host,
+ 'referer': f'{host}/'
+ }
+
+ def homeContent(self, filter):
+ cdata = {
+ "电视剧": "100113",
+ "电影": "100173",
+ "综艺": "100109",
+ "纪录片": "100105",
+ "动漫": "100119",
+ "少儿": "100150",
+ "短剧": "110755"
+ }
+ result = {}
+ classes = []
+ filters = {}
+ for k in cdata:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cdata[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ futures = [executor.submit(self.get_filter_data, item['type_id']) for item in classes]
+ for future in futures:
+ cid, data = future.result()
+ if not data.get('data', {}).get('module_list_datas'):
+ continue
+ filter_dict = {}
+ try:
+ items = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ for item in items:
+ if not item.get('item_params', {}).get('index_item_key'):
+ continue
+ params = item['item_params']
+ filter_key = params['index_item_key']
+ if filter_key not in filter_dict:
+ filter_dict[filter_key] = {
+ 'key': filter_key,
+ 'name': params['index_name'],
+ 'value': []
+ }
+ filter_dict[filter_key]['value'].append({
+ 'n': params['option_name'],
+ 'v': params['option_value']
+ })
+ except (IndexError, KeyError):
+ continue
+ filters[cid] = list(filter_dict.values())
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ json_data = {'page_context':None,'page_params':{'page_id':'100101','page_type':'channel','skip_privacy_types':'0','support_click_scan':'1','new_mark_label_enabled':'1','ams_cookies':'',},'page_bypass_params':{'params':{'caller_id':'','data_mode':'default','page_id':'','page_type':'channel','platform_id':'2','user_mode':'default',},'scene':'channel','abtest_bypass_id':'',}}
+ data = self.post(f'{self.apihost}/trpc.vector_layout.page_view.PageService/getPage',headers=self.headers, json=json_data).json()
+ vlist = []
+ for it in data['data']['CardList'][0]['children_list']['list']['cards']:
+ if it.get('params'):
+ p = it['params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ id = it.get('id') or p.get('cid')
+ name = p.get('mz_title') or p.get('title')
+ if name and 'http' not in id:
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': p.get('image_url'),
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ return {'list': vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ params = {
+ "sort": extend.get('sort', '75'),
+ "attr": extend.get('attr', '-1'),
+ "itype": extend.get('itype', '-1'),
+ "ipay": extend.get('ipay', '-1'),
+ "iarea": extend.get('iarea', '-1'),
+ "iyear": extend.get('iyear', '-1'),
+ "theater": extend.get('theater', '-1'),
+ "award": extend.get('award', '-1'),
+ "recommend": extend.get('recommend', '-1')
+ }
+ if pg == '1':
+ self.body = self.dbody.copy()
+ self.body['page_params']['channel_id'] = tid
+ self.body['page_params']['filter_params'] = self.josn_to_params(params)
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=self.body, headers=self.headers).json()
+ ndata = data['data']
+ if ndata['has_next_page']:
+ result['pagecount'] = 9999
+ self.body['page_context'] = ndata['next_page_context']
+ else:
+ result['pagecount'] = int(pg)
+ vlist = []
+ for its in ndata['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']:
+ id = its.get('item_params', {}).get('cid')
+ if id:
+ p = its['item_params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ name = p.get('mz_title') or p.get('title')
+ pic = p.get('new_pic_hz') or p.get('new_pic_vt')
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': pic,
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ result['list'] = vlist
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody = {"page_params":{"req_from":"web","cid":ids[0],"vid":"","lid":"","page_type":"detail_operation","page_id":"detail_page_introduction"},"has_cache":1}
+ body = {"page_params":{"req_from":"web_vsite","page_id":"vsite_episode_list","page_type":"detail_operation","id_type":"1","page_size":"","cid":ids[0],"vid":"","lid":"","page_num":"","page_context":"","detail_page_type":"1"},"has_cache":1}
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ future_detail = executor.submit(self.get_vdata, vbody)
+ future_episodes = executor.submit(self.get_vdata, body)
+ vdata = future_detail.result()
+ data = future_episodes.result()
+
+ pdata = self.process_tabs(data, body, ids)
+ if not pdata:
+ return self.handle_exception(None, "No pdata available")
+
+ try:
+ star_list = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][
+ 0].get('sub_items', {}).get('star_list', {}).get('item_datas', [])
+ actors = [star['item_params']['name'] for star in star_list]
+ names = ['腾讯视频', '预告片']
+ plist, ylist = self.process_pdata(pdata, ids)
+ if not plist:
+ del names[0]
+ if not ylist:
+ del names[1]
+ vod = self.build_vod(vdata, actors, plist, ylist, names)
+ return {'list': [vod]}
+ except Exception as e:
+ return self.handle_exception(e, "Error processing detail")
+
+ def searchContent(self, key, quick, pg="1"):
+ headers = self.headers.copy()
+ headers.update({'Content-Type': 'application/json'})
+ body = {'version':'25021101','clientType':1,'filterValue':'','uuid':str(uuid.uuid4()),'retry':0,'query':key,'pagenum':int(pg)-1,'pagesize':30,'queryFrom':0,'searchDatakey':'','transInfo':'','isneedQc':True,'preQid':'','adClientInfo':'','extraInfo':{'isNewMarkLabel':'1','multi_terminal_pc':'1','themeType':'1',},}
+ data = self.post(f'{self.apihost}/trpc.videosearch.mobile_search.MultiTerminalSearch/MbSearch?vplatform=2',
+ json=body, headers=headers).json()
+ vlist = []
+ vname=["电视剧", "电影", "综艺", "纪录片", "动漫", "少儿", "短剧"]
+ v=data['data']['normalList']['itemList']
+ d=data['data']['areaBoxList'][0]['itemList']
+ q=v+d
+ if v[0].get('doc') and v[0]['doc'].get('id') =='MainNeed':q=d+v
+ for k in q:
+ if k.get('doc') and k.get('videoInfo') and k['doc'].get('id') and '外站' not in k['videoInfo'].get('subTitle') and k['videoInfo'].get('title') and k['videoInfo'].get('typeName') in vname:
+ img_tag = k.get('videoInfo', {}).get('imgTag')
+ if img_tag is not None and isinstance(img_tag, str):
+ try:
+ tag = json.loads(img_tag)
+ except json.JSONDecodeError as e:
+ tag = {}
+ else:
+ tag = {}
+ pic = k.get('videoInfo', {}).get('imgUrl')
+ vlist.append({
+ 'vod_id': k['doc']['id'],
+ 'vod_name': self.removeHtmlTags(k['videoInfo']['title']),
+ 'vod_pic': pic,
+ 'vod_year': k['videoInfo'].get('typeName') +' '+ tag.get('tag_2', {}).get('text', ''),
+ 'vod_remarks': tag.get('tag_4', {}).get('text', '')
+ })
+ return {'list': vlist, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = id.split('@')
+ url = f"{self.host}/x/cover/{ids[0]}/{ids[1]}.html"
+ return {'jx':1,'parse': 1, 'url': url, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def get_filter_data(self, cid):
+ hbody = self.dbody.copy()
+ hbody['page_params']['channel_id'] = cid
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=hbody, headers=self.headers).json()
+ return cid, data
+
+ def get_vdata(self, body):
+ try:
+ vdata = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=3000010&vplatform=2&vversion_name=8.2.96',
+ json=body, headers=self.headers
+ ).json()
+ return vdata
+ except Exception as e:
+ print(f"Error in get_vdata: {str(e)}")
+ return {'data': {'module_list_datas': []}}
+
+ def process_pdata(self, pdata, ids):
+ plist = []
+ ylist = []
+ for k in pdata:
+ if k.get('item_id'):
+ pid = f"{k['item_params']['union_title']}${ids[0]}@{k['item_id']}"
+ if '预告' in k['item_params']['union_title']:
+ ylist.append(pid)
+ else:
+ plist.append(pid)
+ return plist, ylist
+
+ def build_vod(self, vdata, actors, plist, ylist, names):
+ d = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][0]['item_params']
+ urls = []
+ if plist:
+ urls.append('#'.join(plist))
+ if ylist:
+ urls.append('#'.join(ylist))
+ vod = {
+ 'type_name': d.get('sub_genre', ''),
+ 'vod_name': d.get('title', ''),
+ 'vod_year': d.get('year', ''),
+ 'vod_area': d.get('area_name', ''),
+ 'vod_remarks': d.get('holly_online_time', '') or d.get('hotval', ''),
+ 'vod_actor': ','.join(actors),
+ 'vod_content': d.get('cover_description', ''),
+ 'vod_play_from': '$$$'.join(names),
+ 'vod_play_url': '$$$'.join(urls)
+ }
+ return vod
+
+ def handle_exception(self, e, message):
+ print(f"{message}: {str(e)}")
+ return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': '翻车啦#555'}]}
+
+ def process_tabs(self, data, body, ids):
+ try:
+ pdata = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ tabs = data['data']['module_list_datas'][-1]['module_datas'][-1]['module_params'].get('tabs')
+ if tabs and len(json.loads(tabs)):
+ tabs = json.loads(tabs)
+ remaining_tabs = tabs[1:]
+ task_queue = []
+ for tab in remaining_tabs:
+ nbody = copy.deepcopy(body)
+ nbody['page_params']['page_context'] = tab['page_context']
+ task_queue.append(nbody)
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_map = {executor.submit(self.get_vdata, task): idx for idx, task in enumerate(task_queue)}
+ results = [None] * len(task_queue)
+ for future in as_completed(future_map.keys()):
+ idx = future_map[future]
+ results[idx] = future.result()
+ for result in results:
+ if result:
+ page_data = result['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists'][
+ 'item_datas']
+ pdata.extend(page_data)
+ return pdata
+ except Exception as e:
+ print(f"Error processing episodes: {str(e)}")
+ return []
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+
diff --git a/py/芒果视频.py b/py/芒果视频.py
new file mode 100644
index 0000000..6ba8e34
--- /dev/null
+++ b/py/芒果视频.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+import time
+from concurrent.futures import ThreadPoolExecutor, as_completed
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost='https://www.mgtv.com'
+
+ host='https://pianku.api.mgtv.com'
+
+ vhost='https://pcweb.api.mgtv.com'
+
+ mhost='https://dc.bz.mgtv.com'
+
+ shost='https://mobileso.bz.mgtv.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'origin': rhost,
+ 'referer': f'{rhost}/'
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "电影": "3",
+ "电视剧": "2",
+ "综艺": "1",
+ "动画": "50",
+ "少儿": "10",
+ "纪录片": "51",
+ "教育": "115"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.mhost}/dynamic/v1/channel/index/0/0/0/1000000/0/0/17/1354?type=17&version=5.0&t={str(int(time.time()*1000))}&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']:
+ if i.get('DSLList') and len(i['DSLList']):
+ for j in i['DSLList']:
+ if j.get('data') and j['data'].get('items') and len(j['data']['items']):
+ for k in j['data']['items']:
+ videoList.append({
+ 'vod_id': k["videoId"],
+ 'vod_name': k['videoName'],
+ 'vod_pic': k['img'],
+ 'vod_year': k.get('cornerTitle'),
+ 'vod_remarks': k.get('time') or k.get('desc'),
+ })
+ return {'list':videoList}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body={
+ 'allowedRC': '1',
+ 'platform': 'pcweb',
+ 'channelId': tid,
+ 'pn': pg,
+ 'pc': '80',
+ 'hudong': '1',
+ '_support': '10000000'
+ }
+ body.update(extend)
+ data=self.fetch(f'{self.host}/rider/list/pcweb/v3', params=body, headers=self.headers).json()
+ videoList = []
+ for i in data['data']['hitDocs']:
+ videoList.append({
+ 'vod_id': i["playPartId"],
+ 'vod_name': i['title'],
+ 'vod_pic': i['img'],
+ 'vod_year': (i.get('rightCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': i['updateInfo']
+ })
+ result = {}
+ result['list'] = videoList
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody={'allowedRC': '1', 'vid': ids[0], 'type': 'b', '_support': '10000000'}
+ vdata=self.fetch(f'{self.vhost}/video/info', params=vbody, headers=self.headers).json()
+ d=vdata['data']['info']['detail']
+ vod = {
+ 'vod_name': vdata['data']['info']['title'],
+ 'type_name': d.get('kind'),
+ 'vod_year': d.get('releaseTime'),
+ 'vod_area': d.get('area'),
+ 'vod_lang': d.get('language'),
+ 'vod_remarks': d.get('updateInfo'),
+ 'vod_actor': d.get('leader'),
+ 'vod_director': d.get('director'),
+ 'vod_content': d.get('story'),
+ 'vod_play_from': '芒果TV',
+ 'vod_play_url': ''
+ }
+ data,pdata=self.fetch_page_data('1', ids[0],True)
+ pagecount=data['data'].get('total_page') or 1
+ if int(pagecount)>1:
+ pages = list(range(2, pagecount+1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, ids[0]): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ pdata.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(pdata)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.shost}/applet/search/v1?channelCode=mobile-wxap&q={key}&pn={pg}&pc=10&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']['contents']:
+ if i.get('data') and len(i['data']):
+ k = i['data'][0]
+ if k.get('vid') and k.get('img'):
+ try:
+ videoList.append({
+ 'vod_id': k['vid'],
+ 'vod_name': k['title'],
+ 'vod_pic': k['img'],
+ 'vod_year': (i.get('rightTopCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': '/'.join(i.get('desc',[])),
+ })
+ except:
+ print(k)
+ return {'list':videoList,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ id=f'{self.rhost}{id}'
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def getf(self, body):
+ params = {
+ 'allowedRC': '1',
+ 'channelId': body['type_id'],
+ 'platform': 'pcweb',
+ '_support': '10000000',
+ }
+ data = self.fetch(f'{self.host}/rider/config/channel/v1', params=params, headers=self.headers).json()
+ ft = []
+ for i in data['data']['listItems']:
+ try:
+ value_array = [{"n": value['tagName'], "v": value['tagId']} for value in i['items'] if
+ value.get('tagName')]
+ ft.append({"key": i['eName'], "name": i['typeName'], "value": value_array})
+ except:
+ print(i)
+ return body['type_id'], ft
+
+ def fetch_page_data(self, page, id, b=False):
+ body = {'version': '5.5.35', 'video_id': id, 'page': page, 'size': '30',
+ 'platform': '4', 'src': 'mgtv', 'allowedRC': '1', '_support': '10000000'}
+ data = self.fetch(f'{self.vhost}/episode/list', params=body, headers=self.headers).json()
+ ldata = [f'{i["t3"]}${i["url"]}' for i in data['data']['list']]
+ if b:
+ return data, ldata
+ else:
+ return ldata
diff --git a/py/追星影视.py b/py/追星影视.py
new file mode 100644
index 0000000..8f00cac
--- /dev/null
+++ b/py/追星影视.py
@@ -0,0 +1,182 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import string
+import sys
+from base64 import b64decode, b64encode
+from urllib.parse import quote, unquote
+sys.path.append('..')
+import concurrent.futures
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://xy.51gy.top'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.9.1',
+ 'mark-time': 'null',
+ 'fn-api-version': '3.1.9',
+ 'versionCode': '19',
+ 'product': 'gysg',
+ 'sg': '22664e555e0015684f988833803b3055',
+ }
+
+ def homeContent(self, filter):
+ data=self.fetch(f"{self.host}/api.php/vod/type", headers=self.headers).json()
+ result,filters,videos = {},{},[]
+ classes = [{'type_id': i['type_name'], 'type_name': i['type_name']} for i in data['list'][1:]]
+ body={'token':'', 'type_id':data['list'][0]['type_id']}
+ ldata=self.post(f"{self.host}/api.php/vod/category", data=body, headers=self.headers).json()
+ for i in ldata['data']['banner']:
+ videos.append({
+ 'vod_id':i.get('vod_id'),
+ 'vod_name':i.get('vod_name'),
+ 'vod_pic':i.get('vod_pic_thumb')
+ })
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ future_to_aid = {executor.submit(self.fts, aid): aid for aid in classes}
+ for future in concurrent.futures.as_completed(future_to_aid):
+ aid = future_to_aid[future]
+ try:
+ aid_id, fts = future.result()
+ filters[aid_id] = fts
+ except Exception as e:
+ print(f"Error processing aid {aid}: {e}")
+ result['class'] = classes
+ result['filters'] = filters
+ result['list'] = videos
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params={'state':extend.get('state',tid) or tid,'class':extend.get('classes','全部'),'area':extend.get('area','全部'),'year':extend.get('year','全部'),'lang':extend.get('lang','全部'),'version':extend.get('version','全部'),'pg':pg}
+ data=self.fetch(f"{self.host}/api.php/vod/list", params=params, headers=self.headers).json()
+ result = {}
+ videos = []
+ for i in data['data']['list']:
+ if str(i.get('vod_id', 0)) != '0':
+ videos.append({
+ 'vod_id': i.get('vod_id'),
+ 'vod_name': i.get('vod_name'),
+ 'vod_pic': i.get('vod_pic'),
+ 'vod_year': f"{i.get('vod_score')}分",
+ 'vod_remarks': i.get('vod_remarks')
+ })
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ body={'ids':ids[0],'uni_code':self.getunc(),'ac':'detail','token':''}
+ data=self.post(f"{self.host}/api.php/vod/detail2", data=body, headers=self.headers).json()
+ v=data['data']
+ vod = {
+ 'type_name': v.get('type_name'),
+ 'vod_year': v.get('vod_year'),
+ 'vod_area': v.get('vod_area'),
+ 'vod_lang': v.get('vod_lang'),
+ 'vod_remarks': v.get('vod_remarks'),
+ 'vod_actor': v.get('vod_actor'),
+ 'vod_director': v.get('vod_director'),
+ 'vod_content': v.get('vod_content')
+ }
+ n,p=[],[]
+ for i in v['vod_play_list']:
+ pp=i['player_info']
+ n.append(pp['show'])
+ np=[]
+ for j in i['urls']:
+ cd={'parse':pp.get('parse'),'url':j['url'],'headers':pp.get('headers')}
+ np.append(f"{j['name']}${self.e64(json.dumps(cd))}")
+ p.append('#'.join(np))
+ vod.update({'vod_play_from':'$$$'.join(n),'vod_play_url':'$$$'.join(p)})
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f"{self.host}/api.php/vod/search", params={'keywords':key,'type':'1','pg':pg}, headers=self.headers).json()
+ return {'list':data['list'],'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids=json.loads(self.d64(id))
+ headers = {}
+ urls=ids['url']
+ if ids.get('headers'):
+ hs=ids['headers'].split('=>',1)
+ headers[hs[0].strip()]=hs[-1].strip()
+ if isinstance(ids.get('parse'), list) and len(ids['parse']) > 0:
+ urls=[]
+ for i,x in enumerate(ids['parse']):
+ su=f"{self.getProxyUrl()}&url={quote(x+ids['url'])}"
+ urls.extend([f'解析{i+1}',su])
+ return {'parse': 0, 'url': urls, 'header': headers}
+
+ def localProxy(self, param):
+ try:
+ body = {'url':unquote(param['url'])}
+ data=self.post(f"{self.host}/api.php/vod/m_jie_xi", data=body, headers=self.headers).json()
+ url=data.get('url') or data['data'].get('url')
+ return [302,'video/MP2T',None,{'Location':url}]
+ except:
+ return []
+
+ def liveContent(self, url):
+ pass
+
+ def fts(self, tdata):
+ params={'state':tdata['type_id'],'pg':'1'}
+ data = self.fetch(f"{self.host}/api.php/vod/list", params=params, headers=self.headers).json()
+ ftks = ["classes", "area", "lang", "year", "version", "state"]
+ filter = [
+ {
+ 'name': k,
+ 'key': k,
+ 'value': [{'n': i, 'v': i} for i in v.split(',')]
+ }
+ for k, v in data['data']['classes']["type_extend"].items()
+ if k in ftks and v
+ ]
+ return tdata['type_id'],filter
+
+ def getunc(self):
+ chars = string.ascii_lowercase + string.digits
+ data = ''.join(random.choice(chars) for _ in range(16))
+ return self.e64(data)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ return ""
\ No newline at end of file
diff --git a/py/金牌影视.py b/py/金牌影视.py
new file mode 100644
index 0000000..815951a
--- /dev/null
+++ b/py/金牌影视.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import threading
+import uuid
+import requests
+sys.path.append('..')
+from base.spider import Spider
+import time
+from Crypto.Hash import MD5, SHA1
+
+class Spider(Spider):
+ '''
+ 配置示例:
+ {
+ "key": "xxxx",
+ "name": "xxxx",
+ "type": 3,
+ "api": ".所在路径/金牌.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "changeable": 1,
+ "ext": {
+ "site": "https://www.jiabaide.cn,域名2,域名3"
+ }
+ },
+ '''
+ def init(self, extend=""):
+ if extend:
+ hosts=json.loads(extend)['site']
+ self.host = self.host_late(hosts)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
+ fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
+ result = {}
+ classes = []
+ filters={}
+ for k in cdata['data']:
+ classes.append({
+ 'type_name': k['typeName'],
+ 'type_id': str(k['typeId']),
+ })
+ sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
+ for tid, d in fdata['data'].items():
+ current_sort_values = sort_values.copy()
+ if tid == '1':
+ del current_sort_values[0]
+ filters[tid] = [
+ {"key": "type", "name": "类型",
+ "value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
+
+ *([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
+ "value": [{"n": i["itemText"], "v": i["itemText"]}
+ for i in d["plotList"]]}]),
+
+ {"key": "area", "name": "地区",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
+
+ {"key": "year", "name": "年份",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
+
+ {"key": "lang", "name": "语言",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
+
+ {"key": "sort", "name": "排序", "value": current_sort_values}
+ ]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
+ data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
+ data=[]
+ for i in data1['data'].values():
+ data.extend(i['list'])
+ data.extend(data2['data'])
+ vods=self.getvod(data)
+ return {'list':vods}
+
+ def categoryContent(self, tid, pg, filter, extend):
+
+ params = {
+ "area": extend.get('area', ''),
+ "filterStatus": "1",
+ "lang": extend.get('lang', ''),
+ "pageNum": pg,
+ "pageSize": "30",
+ "sort": extend.get('sort', '1'),
+ "sortBy": "1",
+ "type": extend.get('type', ''),
+ "type1": tid,
+ "v_class": extend.get('v_class', ''),
+ "year": extend.get('year', '')
+ }
+ data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
+ result = {}
+ result['list'] = self.getvod(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
+ vod=self.getvod([data['data']])[0]
+ vod['vod_play_from']='金牌'
+ vod['vod_play_url'] = '#'.join(
+ f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
+ vod['episodelist'])
+ vod.pop('episodelist', None)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {
+ "keyword": key,
+ "pageNum": pg,
+ "pageSize": "8",
+ "sourceCode": "1"
+ }
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
+ vods=self.getvod(data['data']['result']['list'])
+ return {'list':vods,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ self.header = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'DNT': '1',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'Origin': self.host,
+ 'Referer': f'{self.host}/'
+ }
+ ids=id.split('@@')
+ pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
+ vlist=[]
+ for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
+ return {'parse':0,'url':vlist,'header':self.header}
+
+ def localProxy(self, param):
+ pass
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def md5(self, sign_key):
+ md5_hash = MD5.new()
+ md5_hash.update(sign_key.encode('utf-8'))
+ md5_result = md5_hash.hexdigest()
+ return md5_result
+
+ def js(self, param):
+ return '&'.join(f"{k}={v}" for k, v in param.items())
+
+ def getheaders(self, param=None):
+ if param is None:param = {}
+ t=str(int(time.time()*1000))
+ param['key']='cb808529bae6b6be45ecfab29a4889bc'
+ param['t']=t
+ sha1_hash = SHA1.new()
+ sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
+ sign = sha1_hash.hexdigest()
+ deviceid = str(uuid.uuid4())
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'sign': sign,
+ 't': t,
+ 'deviceid':deviceid
+ }
+ return headers
+
+ def convert_field_name(self, field):
+ field = field.lower()
+ if field.startswith('vod') and len(field) > 3:
+ field = field.replace('vod', 'vod_')
+ if field.startswith('type') and len(field) > 4:
+ field = field.replace('type', 'type_')
+ return field
+
+ def getvod(self, array):
+ return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]
+
diff --git a/py/锦鲤短剧.py b/py/锦鲤短剧.py
new file mode 100644
index 0000000..b3a2dbc
--- /dev/null
+++ b/py/锦鲤短剧.py
@@ -0,0 +1,147 @@
+from base.spider import Spider
+import re,sys,json
+sys.path.append('..')
+
+class Spider(Spider):
+ api_host = 'https://api.jinlidj.com'
+ origin = 'https://www.jinlidj.com'
+ api_path = '/api/search'
+ headers = {
+ 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
+ 'Content-Type': "application/json",
+ 'accept-language': "zh-CN,zh;q=0.9",
+ 'cache-control': "no-cache",
+ 'origin': origin,
+ 'pragma': "no-cache",
+ 'priority': "u=1, i",
+ 'referer': origin+'/',
+ 'sec-ch-ua': "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Google Chrome\";v=\"138\"",
+ 'sec-ch-ua-mobile': "?0",
+ 'sec-ch-ua-platform': "\"Windows\"",
+ 'sec-fetch-dest': "empty",
+ 'sec-fetch-mode': "cors",
+ 'sec-fetch-site': "same-site"
+ }
+
+ def homeContent(self, filter):
+ return {'class': [{'type_id': 1, 'type_name': '情感关系'}, {'type_id': 2, 'type_name': '成长逆袭'}, {'type_id': 3, 'type_name': '奇幻异能'}, {'type_id': 4, 'type_name': '战斗热血'}, {'type_id': 5, 'type_name': '伦理现实'}, {'type_id': 6, 'type_name': '时空穿越'}, {'type_id': 7, 'type_name': '权谋身份'}]}
+
+ def homeVideoContent(self):
+ payload = {
+ "page": 1,
+ "limit": 24,
+ "type_id": "",
+ "year": "",
+ "keyword": ""
+ }
+ response = self.post(f"{self.api_host}{self.api_path}", data=json.dumps(payload), headers=self.headers).json()
+ data = response['data']
+ videos = []
+ for i in data['list']:
+ videos.append({
+ 'vod_id': i.get('vod_id'),
+ 'vod_name': i.get('vod_name'),
+ 'vod_class': i.get('vod_class'),
+ 'vod_pic': i.get('vod_pic'),
+ 'vod_year': i.get('vod_year'),
+ 'vod_remarks': i.get('vod_total')+'集',
+ 'vod_score': i.get('vod_score')
+ })
+ return {'list': videos}
+
+ def detailContent(self, ids):
+ response = self.post(f'{self.api_host}/api/detail/{ids[0]}', data=json.dumps({}), headers=self.headers).json()
+ data = response['data']
+ videos = []
+ vod_play_url = ''
+ for name,url in data['player'].items():
+ vod_play_url += f'{name}${url}#'
+ vod_play_url.rstrip('#')
+ videos.append({
+ 'vod_id': data.get('vod_id'),
+ 'vod_name': data.get('vod_name'),
+ 'vod_content': data.get('vod_blurb'),
+ 'vod_remarks': '集数:' + data.get('vod_total'),
+ "vod_director": data.get('vod_director'),
+ "vod_actor": data.get('vod_actor'),
+ 'vod_year': data.get('vod_year'),
+ 'vod_area': data.get('vod_area'),
+ 'vod_play_from': '锦鲤短剧',
+ 'vod_play_url': vod_play_url
+ })
+ return {'list': videos}
+
+ def searchContent(self, key, quick, pg="1"):
+ payload = {
+ "page": pg,
+ "limit": 24,
+ "type_id": "",
+ "keyword": key
+ }
+ response = self.post(f'{self.api_host}{self.api_path}', data=json.dumps(payload), headers=self.headers).json()
+ data = response['data']
+ videos = []
+ for i in data['list']:
+ videos.append({
+ "vod_id": i['vod_id'],
+ "vod_name": i['vod_name'],
+ "vod_class": i['vod_class'],
+ "vod_pic": i['vod_pic'],
+ 'vod_year': i.get('vod_year'),
+ "vod_remarks": i['vod_total'] + '集'
+ })
+ return {'list': videos, 'page': pg, 'total': data['total'], 'limit': 24}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ payload = {
+ "page": pg,
+ "limit": 24,
+ "type_id": tid,
+ "year": "",
+ "keyword": ""
+ }
+ response = self.post(f'{self.api_host}{self.api_path}', data=json.dumps(payload), headers=self.headers).json()
+ data = response['data']
+ videos = []
+ for i in data['list']:
+ videos.append({
+ 'vod_id': i.get('vod_id'),
+ 'vod_name': i.get('vod_name'),
+ 'vod_class': i.get('vod_class'),
+ 'vod_pic': i.get('vod_pic'),
+ 'vod_remarks': i.get('vod_total')+'集',
+ 'vod_year': i.get('vod_year'),
+ 'vod_score': i.get('vod_score')
+ })
+ return {'list': videos}
+
+ def playerContent(self, flag, id, vipflags):
+ parse = 0
+ header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36'}
+ try:
+ response = self.fetch(id, headers=self.headers).text
+ match = re.search(r'let\s+data\s*=\s*(\{[^}]*http[^}]*\});', response, re.IGNORECASE)
+ data = match.group(1)
+ data2 = json.loads(data)
+ url = data2['url']
+ except Exception:
+ url, parse, header = id, 1, self.headers
+ return {'parse': parse, 'url': url,'header': header}
+
+ def init(self, extend=''):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def localProxy(self, param):
+ pass
diff --git a/py/零度影视.py b/py/零度影视.py
new file mode 100644
index 0000000..0caa59e
--- /dev/null
+++ b/py/零度影视.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import random
+import sys
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ did=self.getdid()
+ self.headers.update({'deviceId': did})
+ token=self.gettk()
+ self.headers.update({'token': token})
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='http://ldys.sq1005.top'
+
+ headers = {
+ 'User-Agent': 'okhttp/4.12.0',
+ 'client': 'app',
+ 'deviceType': 'Android'
+ }
+
+ def homeContent(self, filter):
+ data=self.post(f"{self.host}/api/v1/app/screen/screenType", headers=self.headers).json()
+ result = {}
+ cate = {
+ "类型": "classify",
+ "地区": "region",
+ "年份": "year"
+ }
+ sort={
+ 'key':'sreecnTypeEnum',
+ 'name': '排序',
+ 'value':[{'n':'最新','v':'NEWEST'},{'n':'人气','v':'POPULARITY'},{'n':'评分','v':'COLLECT'},{'n':'热搜','v':'HOT'}]
+ }
+ classes = []
+ filters = {}
+ for k in data['data']:
+ classes.append({
+ 'type_name': k['name'],
+ 'type_id': k['id']
+ })
+ filters[k['id']] = []
+ for v in k['children']:
+ filters[k['id']].append({
+ 'name': v['name'],
+ 'key': cate[v['name']],
+ 'value':[{'n':i['name'],'v':i['name']} for i in v['children']]
+ })
+ filters[k['id']].append(sort)
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ jdata={"condition":64,"pageNum":1,"pageSize":40}
+ data=self.post(f"{self.host}/api/v1/app/recommend/recommendSubList", headers=self.headers, json=jdata).json()
+ return {'list':self.getlist(data['data']['records'])}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ jdata = {
+ 'condition': {
+ 'sreecnTypeEnum': 'NEWEST',
+ 'typeId': tid,
+ },
+ 'pageNum': int(pg),
+ 'pageSize': 40,
+ }
+ jdata['condition'].update(extend)
+ data = self.post(f"{self.host}/api/v1/app/screen/screenMovie", headers=self.headers, json=jdata).json()
+ result = {}
+ result['list'] = self.getlist(data['data']['records'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@@')
+ jdata = {"id": int(ids[0]), "typeId": ids[-1]}
+ v = self.post(f"{self.host}/api/v1/app/play/movieDesc", headers=self.headers, json=jdata).json()
+ v = v['data']
+ vod = {
+ 'type_name': v.get('classify'),
+ 'vod_year': v.get('year'),
+ 'vod_area': v.get('area'),
+ 'vod_actor': v.get('star'),
+ 'vod_director': v.get('director'),
+ 'vod_content': v.get('introduce'),
+ 'vod_play_from': '',
+ 'vod_play_url': ''
+ }
+ c = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
+ l = c['data']['moviePlayerList']
+ n = {str(i['id']): i['moviePlayerName'] for i in l}
+ m = jdata.copy()
+ m.update({'playerId': str(l[0]['id'])})
+ pd = self.getv(m, c['data']['episodeList'])
+ if len(l)-1:
+ with ThreadPoolExecutor(max_workers=len(l)-1) as executor:
+ future_to_player = {executor.submit(self.getd, jdata, player): player for player in l[1:]}
+ for future in future_to_player:
+ try:
+ o,p = future.result()
+ pd.update(self.getv(o,p))
+ except Exception as e:
+ print(f"请求失败: {e}")
+ w, e = [],[]
+ for i, x in pd.items():
+ if x:
+ w.append(n[i])
+ e.append(x)
+ vod['vod_play_from'] = '$$$'.join(w)
+ vod['vod_play_url'] = '$$$'.join(e)
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ jdata={
+ "condition": {
+ "value": key
+ },
+ "pageNum": int(pg),
+ "pageSize": 40
+ }
+ data=self.post(f"{self.host}/api/v1/app/search/searchMovie", headers=self.headers, json=jdata).json()
+ return {'list':self.getlist(data['data']['records']),'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ jdata=json.loads(self.d64(id))
+ data = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
+ try:
+ params={'playerUrl':data['data']['url'],'playerId':jdata['playerId']}
+ pd=self.fetch(f"{self.host}/api/v1/app/play/analysisMovieUrl", headers=self.headers, params=params).json()
+ url,p=pd['data'],0
+ except Exception as e:
+ print(f"请求失败: {e}")
+ url,p=data['data']['url'],0
+ return {'parse': p, 'url': url, 'header': {'User-Agent': 'okhttp/4.12.0'}}
+
+ def localProxy(self, param):
+ pass
+
+ def liveContent(self, url):
+ pass
+
+ def gettk(self):
+ data=self.fetch(f"{self.host}/api/v1/app/user/visitorInfo", headers=self.headers).json()
+ return data['data']['token']
+
+ def getdid(self):
+ did=self.getCache('ldid')
+ if not did:
+ hex_chars = '0123456789abcdef'
+ did =''.join(random.choice(hex_chars) for _ in range(16))
+ self.setCache('ldid',did)
+ return did
+
+ def getd(self,jdata,player):
+ x = jdata.copy()
+ x.update({'playerId': str(player['id'])})
+ response = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=x).json()
+ return x, response['data']['episodeList']
+
+ def getv(self,d,c):
+ f={d['playerId']:''}
+ g=[]
+ for i in c:
+ j=d.copy()
+ j.update({'episodeId':str(i['id'])})
+ g.append(f"{i['episode']}${self.e64(json.dumps(j))}")
+ f[d['playerId']]='#'.join(g)
+ return f
+
+ def getlist(self,data):
+ videos = []
+ for i in data:
+ videos.append({
+ 'vod_id': f"{i['id']}@@{i['typeId']}",
+ 'vod_name': i.get('name'),
+ 'vod_pic': i.get('cover'),
+ 'vod_year': i.get('year'),
+ 'vod_remarks': i.get('totalEpisode')
+ })
+ return videos
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
diff --git a/spider.jar b/spider.jar
new file mode 100644
index 0000000..b370608
Binary files /dev/null and b/spider.jar differ
diff --git a/upload_to_webdav.py b/upload_to_webdav.py
new file mode 100644
index 0000000..a92560d
--- /dev/null
+++ b/upload_to_webdav.py
@@ -0,0 +1,181 @@
+import os
+import requests
+from requests.auth import HTTPBasicAuth
+
+def upload_folder_to_webdav(local_folder_path, remote_webdav_path, webdav_url, username, password, exclude_patterns=None):
+ """
+ 将本地文件夹中的所有文件复制到指定的WebDAV路径,支持排除指定目录和文件
+
+ Args:
+ local_folder_path (str): 本地文件夹路径
+ remote_webdav_path (str): WebDAV上的目标路径
+ webdav_url (str): WebDAV服务器URL
+ username (str): WebDAV用户名
+ password (str): WebDAV密码
+ exclude_patterns (list): 要排除的目录或文件名列表,默认排除 ['.git', '.DS_Store']
+
+ Returns:
+ bool: 上传成功返回True,否则返回False
+ """
+
+ # 默认排除列表
+ if exclude_patterns is None:
+ exclude_patterns = ['.git', '.DS_Store', '__pycache__', '.svn', '.hg']
+
+ # 检查本地文件夹是否存在
+ if not os.path.exists(local_folder_path):
+ print(f"本地文件夹 {local_folder_path} 不存在")
+ return False
+
+ if not os.path.isdir(local_folder_path):
+ print(f"{local_folder_path} 不是一个文件夹")
+ return False
+
+ print(f"开始上传文件夹 {local_folder_path} 到 {webdav_url}{remote_webdav_path}")
+ print(f"排除模式: {exclude_patterns}")
+
+ # 遍历本地文件夹中的所有文件和子文件夹
+ for root, dirs, files in os.walk(local_folder_path):
+ # 过滤目录,移除需要排除的目录
+ dirs[:] = [d for d in dirs if d not in exclude_patterns]
+
+ # 过滤文件,移除需要排除的文件
+ files = [f for f in files if f not in exclude_patterns]
+
+ # 计算相对路径
+ relative_path = os.path.relpath(root, local_folder_path)
+ if relative_path == ".":
+ relative_path = ""
+
+ # 创建远程路径
+ if relative_path:
+ remote_dir_path = os.path.join(remote_webdav_path, relative_path).replace("\\", "/")
+ else:
+ remote_dir_path = remote_webdav_path
+
+ # 确保远程目录存在
+ if not create_webdav_directory(remote_dir_path, webdav_url, username, password):
+ print(f"创建远程目录 {remote_dir_path} 失败")
+ continue
+
+ # 上传文件
+ for file in files:
+ local_file_path = os.path.join(root, file)
+ # 计算远程文件路径
+ if relative_path:
+ remote_file_path = os.path.join(remote_webdav_path, relative_path, file).replace("\\", "/")
+ else:
+ remote_file_path = os.path.join(remote_webdav_path, file).replace("\\", "/")
+
+ if not upload_file_to_webdav(local_file_path, remote_file_path, webdav_url, username, password):
+ print(f"上传文件 {local_file_path} 失败")
+ continue
+
+ print(f"文件夹 {local_folder_path} 上传完成")
+ return True
+
+def create_webdav_directory(remote_dir_path, webdav_url, username, password):
+ """
+ 在WebDAV上创建目录
+
+ Args:
+ remote_dir_path (str): WebDAV上的目录路径
+ webdav_url (str): WebDAV服务器URL
+ username (str): WebDAV用户名
+ password (str): WebDAV密码
+
+ Returns:
+ bool: 创建成功返回True,否则返回False
+ """
+ try:
+ response = requests.request("MKCOL", f"{webdav_url}{remote_dir_path}",
+ auth=HTTPBasicAuth(username, password))
+ # 201表示创建成功,405表示目录已存在
+ if response.status_code in [201, 405]:
+ return True
+ else:
+ # 忽略目录已存在的错误
+ if response.status_code == 405:
+ return True
+ print(f"创建目录 {remote_dir_path} 失败: {response.status_code}")
+ return False
+ except Exception as e:
+ print(f"创建目录 {remote_dir_path} 时出错: {e}")
+ return False
+
+def upload_file_to_webdav(local_file_path, remote_file_path, webdav_url, username, password):
+ """
+ 上传单个文件到WebDAV
+
+ Args:
+ local_file_path (str): 本地文件路径
+ remote_file_path (str): WebDAV上的文件路径
+ webdav_url (str): WebDAV服务器URL
+ username (str): WebDAV用户名
+ password (str): WebDAV密码
+
+ Returns:
+ bool: 上传成功返回True,否则返回False
+ """
+ try:
+ # 先删除WebDAV上同名文件(如果存在)
+ delete_from_webdav(remote_file_path, webdav_url, username, password)
+
+ with open(local_file_path, 'rb') as f:
+ response = requests.put(
+ f"{webdav_url}{remote_file_path}",
+ data=f,
+ auth=HTTPBasicAuth(username, password)
+ )
+ if response.status_code in [201, 204]:
+ print(f"文件 {local_file_path} 上传成功到 {remote_file_path}")
+ return True
+ else:
+ print(f"文件 {local_file_path} 上传失败: {response.status_code} {response.text}")
+ return False
+ except Exception as e:
+ print(f"上传文件 {local_file_path} 时出错: {e}")
+ return False
+
+def delete_from_webdav(remote_path, webdav_url, username, password):
+ """
+ 从WebDAV删除文件
+
+ Args:
+ remote_path (str): WebDAV上的文件路径
+ webdav_url (str): WebDAV服务器URL
+ username (str): WebDAV用户名
+ password (str): WebDAV密码
+
+ Returns:
+ bool: 删除成功或文件不存在返回True,否则返回False
+ """
+ try:
+ response = requests.delete(
+ f"{webdav_url}{remote_path}",
+ auth=HTTPBasicAuth(username, password)
+ )
+ # 204表示删除成功,404表示文件不存在(也认为是成功)
+ if response.status_code in [204, 404]:
+ return True
+ else:
+ # 忽略删除失败(可能文件不存在)
+ return True
+ except Exception as e:
+ print(f"删除文件 {remote_path} 时出错: {e}")
+ return False
+
+# 使用示例
+if __name__ == "__main__":
+ # 配置参数
+ local_folder = "../tvbox" # 本地文件夹路径
+ remote_path = "/home/TVBox/Private/tvbox" # WebDAV目标路径
+ webdav_url = "http://47.106.254.96:9120/dav" # WebDAV服务器URL
+ username = "lwang" # 用户名
+ password = "lw19971017" # 密码
+
+ # 排除列表
+ exclude_list = ['.git', '.DS_Store', '__pycache__', '.svn', '.hg', 'node_modules']
+
+ # 执行上传
+ upload_folder_to_webdav(local_folder, remote_path, webdav_url, username, password, exclude_list)
\ No newline at end of file