forked from mirrors/pkg-proxy
Compare commits
44 commits
main
...
apply-go-f
| Author | SHA1 | Date | |
|---|---|---|---|
| c655399a07 | |||
|
|
e3cc5516b5 |
||
|
|
50772d8947 |
||
|
|
2816ff2414 |
||
|
|
d3d11445c6 |
||
|
|
0c02455dc9 |
||
|
|
9849ac9f16 |
||
|
|
7346008aa5 |
||
|
|
c01f0a5c05 |
||
|
|
47681066b5 |
||
|
|
02738651ab |
||
|
|
d62c42b8d7 |
||
|
|
7985a28839 |
||
|
|
773fe55bd9 |
||
|
|
d919d9ce3e |
||
|
|
73d78c28fb |
||
|
|
7d0ac94c6c |
||
|
|
5c4f6f3b85 |
||
|
|
497afdf317 |
||
|
|
55db8f94fc |
||
|
|
d64fcb5b98 |
||
|
|
01b4e7210d |
||
|
|
8b762ffb39 |
||
|
|
94f4a7dfa6 |
||
| a947a7546a | |||
|
|
e1d2331ff0 |
||
|
|
e36a92433e |
||
|
|
941ed51f76 |
||
|
|
b68184cbab |
||
|
|
bcbb883d1b |
||
|
|
33d99e337b |
||
|
|
81f505757f |
||
|
|
7a758b9450 |
||
|
|
3bccc01776 |
||
|
|
43a164ed72 |
||
|
|
9708fe31a8 |
||
|
|
cb9bbbc385 |
||
|
|
75ff85f2f0 |
||
|
|
70fe686953 |
||
|
|
24d5e77443 |
||
|
|
15c133f1fa |
||
|
|
e45706d808 |
||
|
|
34009bad98 |
||
|
|
ec9c437498 |
72 changed files with 7171 additions and 1384 deletions
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
|
|
@ -7,6 +7,7 @@ on:
|
|||
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
|
|
@ -18,6 +19,8 @@ jobs:
|
|||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -34,6 +34,18 @@ archives:
|
|||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
||||
signs:
|
||||
- cmd: cosign
|
||||
certificate: "${artifact}.pem"
|
||||
args:
|
||||
- sign-blob
|
||||
- "--output-certificate=${certificate}"
|
||||
- "--output-signature=${signature}"
|
||||
- "${artifact}"
|
||||
- "--yes"
|
||||
artifacts: checksum
|
||||
output: true
|
||||
|
||||
snapshot:
|
||||
version_template: "{{ incpatch .Version }}-next"
|
||||
|
||||
|
|
|
|||
232
LICENSE
Normal file
232
LICENSE
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
“This License” refers to version 3 of the GNU General Public License.
|
||||
|
||||
“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
|
||||
|
||||
“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
|
||||
|
||||
To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
|
||||
|
||||
A “covered work” means either the unmodified Program or a work based on the Program.
|
||||
|
||||
To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
|
||||
|
||||
To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
|
||||
|
||||
A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
|
||||
|
||||
The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
|
||||
|
||||
c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
|
||||
|
||||
A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
|
||||
|
||||
“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
|
||||
|
||||
An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
|
||||
|
||||
A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
|
||||
|
||||
A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an “about box”.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school, if any, to sign a “copyright disclaimer” for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <https://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
68
README.md
68
README.md
|
|
@ -18,9 +18,7 @@ cooldown:
|
|||
|
||||
A 3-day cooldown means that when `lodash` publishes version `4.18.0`, your builds keep using `4.17.21` until 3 days have passed. If the new release turns out to be compromised, you were never exposed.
|
||||
|
||||
Resolution order: package override, then ecosystem override, then global default. This lets you set a conservative default and carve out exceptions for packages where you need faster updates.
|
||||
|
||||
Currently works with npm, PyPI, pub.dev, Composer, and Cargo, which all include publish timestamps in their metadata. See [docs/configuration.md](docs/configuration.md) for the full config reference.
|
||||
Resolution order: package override, then ecosystem override, then global default. This lets you set a conservative default and carve out exceptions for packages where you need faster updates. See [docs/configuration.md](docs/configuration.md) for the full config reference.
|
||||
|
||||
## Supported Registries
|
||||
|
||||
|
|
@ -28,16 +26,16 @@ Currently works with npm, PyPI, pub.dev, Composer, and Cargo, which all include
|
|||
|----------|-------------------|:--------:|:---------:|
|
||||
| npm | JavaScript | Yes | ✓ |
|
||||
| Cargo | Rust | Yes | ✓ |
|
||||
| RubyGems | Ruby | | ✓ |
|
||||
| RubyGems | Ruby | Yes | ✓ |
|
||||
| Go proxy | Go | | ✓ |
|
||||
| Hex | Elixir | | ✓ |
|
||||
| Hex | Elixir | Yes* | ✓ |
|
||||
| pub.dev | Dart | Yes | ✓ |
|
||||
| PyPI | Python | Yes | ✓ |
|
||||
| Maven | Java | | ✓ |
|
||||
| NuGet | .NET | | ✓ |
|
||||
| NuGet | .NET | Yes | ✓ |
|
||||
| Composer | PHP | Yes | ✓ |
|
||||
| Conan | C/C++ | | ✓ |
|
||||
| Conda | Python/R | | ✓ |
|
||||
| Conda | Python/R | Yes | ✓ |
|
||||
| CRAN | R | | ✓ |
|
||||
| Container | Docker/OCI | | ✓ |
|
||||
| Debian | Debian/Ubuntu | | ✓ |
|
||||
|
|
@ -52,6 +50,16 @@ Currently works with npm, PyPI, pub.dev, Composer, and Cargo, which all include
|
|||
|
||||
Cooldown requires publish timestamps in metadata. Registries without a "Yes" in the cooldown column either don't expose timestamps or haven't been wired up yet.
|
||||
|
||||
\* Hex cooldown requires disabling registry signature verification (`HEX_NO_VERIFY_REPO_ORIGIN=1`) since the proxy re-encodes the protobuf payload.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
brew install git-pkgs/git-pkgs/proxy
|
||||
```
|
||||
|
||||
Or download a binary from the [releases page](https://github.com/git-pkgs/proxy/releases).
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
|
|
@ -460,6 +468,44 @@ proxy serve [flags]
|
|||
proxy [flags] # same as 'proxy serve'
|
||||
```
|
||||
|
||||
### mirror
|
||||
|
||||
Pre-populate the cache from PURLs, SBOM files, or entire registries. Useful for ensuring offline availability or warming the cache before deployments.
|
||||
|
||||
```bash
|
||||
# Mirror specific package versions
|
||||
proxy mirror pkg:npm/lodash@4.17.21 pkg:cargo/serde@1.0.0
|
||||
|
||||
# Mirror all versions of a package
|
||||
proxy mirror pkg:npm/lodash
|
||||
|
||||
# Mirror from a CycloneDX or SPDX SBOM
|
||||
proxy mirror --sbom sbom.cdx.json
|
||||
|
||||
# Preview what would be mirrored
|
||||
proxy mirror --dry-run pkg:npm/lodash
|
||||
|
||||
# Control parallelism
|
||||
proxy mirror --concurrency 8 pkg:npm/lodash@4.17.21
|
||||
```
|
||||
|
||||
The mirror command accepts the same storage and database flags as `serve`. Already-cached artifacts are skipped.
|
||||
|
||||
A mirror API is also available when the server is running:
|
||||
|
||||
```bash
|
||||
# Start a mirror job
|
||||
curl -X POST http://localhost:8080/api/mirror \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"purls": ["pkg:npm/lodash@4.17.21"]}'
|
||||
|
||||
# Check job status
|
||||
curl http://localhost:8080/api/mirror/mirror-1
|
||||
|
||||
# Cancel a running job
|
||||
curl -X DELETE http://localhost:8080/api/mirror/mirror-1
|
||||
```
|
||||
|
||||
### stats
|
||||
|
||||
Show cache statistics without running the server.
|
||||
|
|
@ -534,6 +580,14 @@ Recently cached:
|
|||
| `GET /debian/*` | Debian/APT repository protocol |
|
||||
| `GET /rpm/*` | RPM/Yum repository protocol |
|
||||
|
||||
### Mirror API
|
||||
|
||||
| Endpoint | Description |
|
||||
|----------|-------------|
|
||||
| `POST /api/mirror` | Start a mirror job (JSON body with `purls`) |
|
||||
| `GET /api/mirror/{id}` | Get job status and progress |
|
||||
| `DELETE /api/mirror/{id}` | Cancel a running job |
|
||||
|
||||
### Enrichment API
|
||||
|
||||
The proxy provides REST endpoints for package metadata enrichment, vulnerability scanning, and outdated detection.
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
//
|
||||
// serve Start the proxy server (default if no command given)
|
||||
// stats Show cache statistics
|
||||
// mirror Pre-populate cache from PURLs, SBOMs, or registries
|
||||
//
|
||||
// Serve Flags:
|
||||
//
|
||||
|
|
@ -100,7 +101,11 @@ import (
|
|||
|
||||
"github.com/git-pkgs/proxy/internal/config"
|
||||
"github.com/git-pkgs/proxy/internal/database"
|
||||
"github.com/git-pkgs/proxy/internal/handler"
|
||||
"github.com/git-pkgs/proxy/internal/mirror"
|
||||
"github.com/git-pkgs/proxy/internal/server"
|
||||
"github.com/git-pkgs/proxy/internal/storage"
|
||||
"github.com/git-pkgs/registries/fetch"
|
||||
)
|
||||
|
||||
const defaultTopN = 10
|
||||
|
|
@ -124,6 +129,10 @@ func main() {
|
|||
os.Args = append(os.Args[:1], os.Args[2:]...)
|
||||
runStats()
|
||||
return
|
||||
case "mirror":
|
||||
os.Args = append(os.Args[:1], os.Args[2:]...)
|
||||
runMirror()
|
||||
return
|
||||
case "-version", "--version":
|
||||
fmt.Printf("proxy %s (%s)\n", Version, Commit)
|
||||
os.Exit(0)
|
||||
|
|
@ -145,6 +154,7 @@ Usage: proxy [command] [flags]
|
|||
Commands:
|
||||
serve Start the proxy server (default)
|
||||
stats Show cache statistics
|
||||
mirror Pre-populate cache from PURLs, SBOMs, or registries
|
||||
|
||||
Run 'proxy <command> -help' for more information on a command.
|
||||
|
||||
|
|
@ -340,6 +350,151 @@ func runStats() {
|
|||
}
|
||||
}
|
||||
|
||||
func runMirror() {
|
||||
fs := flag.NewFlagSet("mirror", flag.ExitOnError)
|
||||
configPath := fs.String("config", "", "Path to configuration file")
|
||||
storageURL := fs.String("storage-url", "", "Storage URL (file:// or s3://)")
|
||||
databaseDriver := fs.String("database-driver", "", "Database driver: sqlite or postgres")
|
||||
databasePath := fs.String("database-path", "", "Path to SQLite database file")
|
||||
databaseURL := fs.String("database-url", "", "PostgreSQL connection URL")
|
||||
sbomPath := fs.String("sbom", "", "Path to CycloneDX or SPDX SBOM file")
|
||||
concurrency := fs.Int("concurrency", 4, "Number of parallel downloads") //nolint:mnd // default concurrency
|
||||
dryRun := fs.Bool("dry-run", false, "Show what would be mirrored without downloading")
|
||||
|
||||
fs.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, "git-pkgs proxy - Pre-populate cache\n\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage: proxy mirror [flags] [purl...]\n\n")
|
||||
fmt.Fprintf(os.Stderr, "Examples:\n")
|
||||
fmt.Fprintf(os.Stderr, " proxy mirror pkg:npm/lodash@4.17.21\n")
|
||||
fmt.Fprintf(os.Stderr, " proxy mirror --sbom sbom.cdx.json\n")
|
||||
fmt.Fprintf(os.Stderr, " proxy mirror pkg:npm/lodash # all versions\n\n")
|
||||
fmt.Fprintf(os.Stderr, "Flags:\n")
|
||||
fs.PrintDefaults()
|
||||
}
|
||||
|
||||
_ = fs.Parse(os.Args[1:])
|
||||
purls := fs.Args()
|
||||
|
||||
// Determine source
|
||||
var source mirror.Source
|
||||
switch {
|
||||
case *sbomPath != "":
|
||||
source = &mirror.SBOMSource{Path: *sbomPath}
|
||||
case len(purls) > 0:
|
||||
source = &mirror.PURLSource{PURLs: purls}
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "error: provide PURLs or --sbom\n")
|
||||
fs.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Load config
|
||||
cfg, err := loadConfig(*configPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error loading config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
cfg.LoadFromEnv()
|
||||
|
||||
if *storageURL != "" {
|
||||
cfg.Storage.URL = *storageURL
|
||||
}
|
||||
if *databaseDriver != "" {
|
||||
cfg.Database.Driver = *databaseDriver
|
||||
}
|
||||
if *databasePath != "" {
|
||||
cfg.Database.Path = *databasePath
|
||||
}
|
||||
if *databaseURL != "" {
|
||||
cfg.Database.URL = *databaseURL
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "invalid configuration: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger := setupLogger("info", "text")
|
||||
|
||||
// Open database
|
||||
var db *database.DB
|
||||
switch cfg.Database.Driver {
|
||||
case "postgres":
|
||||
db, err = database.OpenPostgresOrCreate(cfg.Database.URL)
|
||||
default:
|
||||
db, err = database.OpenOrCreate(cfg.Database.Path)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error opening database: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
_ = db.Close()
|
||||
fmt.Fprintf(os.Stderr, "error migrating schema: %v\n", err)
|
||||
os.Exit(1) //nolint:gocritic // db closed above
|
||||
}
|
||||
|
||||
// Open storage
|
||||
sURL := cfg.Storage.URL
|
||||
if sURL == "" {
|
||||
sURL = "file://" + cfg.Storage.Path //nolint:staticcheck // backwards compat
|
||||
}
|
||||
store, err := storage.OpenBucket(context.Background(), sURL)
|
||||
if err != nil {
|
||||
_ = db.Close()
|
||||
fmt.Fprintf(os.Stderr, "error opening storage: %v\n", err)
|
||||
os.Exit(1) //nolint:gocritic // db closed above
|
||||
}
|
||||
|
||||
// Build proxy (reuses same pipeline as serve)
|
||||
fetcher := fetch.NewFetcher()
|
||||
resolver := fetch.NewResolver()
|
||||
proxy := handler.NewProxy(db, store, fetcher, resolver, logger)
|
||||
proxy.CacheMetadata = true // mirror always caches metadata
|
||||
proxy.MetadataTTL = cfg.ParseMetadataTTL()
|
||||
|
||||
m := mirror.New(proxy, db, store, logger, *concurrency)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigCh
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if *dryRun {
|
||||
items, err := m.RunDryRun(ctx, source)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Would mirror %d package versions:\n", len(items))
|
||||
for _, item := range items {
|
||||
fmt.Printf(" %s\n", item)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
progress, err := m.Run(ctx, source)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Mirror complete: %d downloaded, %d skipped (cached), %d failed, %s total\n",
|
||||
progress.Completed, progress.Skipped, progress.Failed, formatSize(progress.Bytes))
|
||||
|
||||
if len(progress.Errors) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "\nErrors:\n")
|
||||
for _, e := range progress.Errors {
|
||||
fmt.Fprintf(os.Stderr, " %s/%s@%s: %s\n", e.Ecosystem, e.Name, e.Version, e.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printStats(db *database.DB, popular, recent int, asJSON bool) error {
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
|
|
|
|||
|
|
@ -161,11 +161,25 @@ vulnerabilities (
|
|||
updated_at DATETIME
|
||||
)
|
||||
-- indexes: (vuln_id, ecosystem, package_name) unique, (ecosystem, package_name)
|
||||
|
||||
metadata_cache (
|
||||
id INTEGER PRIMARY KEY,
|
||||
ecosystem TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
storage_path TEXT NOT NULL,
|
||||
etag TEXT,
|
||||
content_type TEXT,
|
||||
size INTEGER, -- BIGINT on Postgres
|
||||
fetched_at DATETIME,
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME
|
||||
)
|
||||
-- indexes: (ecosystem, name) unique
|
||||
```
|
||||
|
||||
On PostgreSQL, `INTEGER PRIMARY KEY` becomes `SERIAL`, `DATETIME` becomes `TIMESTAMP`, `INTEGER DEFAULT 0` booleans become `BOOLEAN DEFAULT FALSE`, and size/count columns use `BIGINT`.
|
||||
|
||||
The `MigrateSchema()` function handles backward compatibility with older git-pkgs databases by adding missing columns via `ALTER TABLE` as needed.
|
||||
The `MigrateSchema()` function handles backward compatibility with older git-pkgs databases by running named migrations that add missing columns and tables. See [migrations.md](migrations.md) for how to add new schema changes.
|
||||
|
||||
**Key operations:**
|
||||
- `GetPackageByPURL()` - Look up package by PURL
|
||||
|
|
@ -277,6 +291,12 @@ Version age filtering for supply chain attack mitigation. Configurable at global
|
|||
|
||||
Package metadata enrichment. Fetches license, description, homepage, repository URL, and vulnerability data from upstream registries. Powers the `/api/` endpoints and the web UI's package detail pages.
|
||||
|
||||
### `internal/mirror`
|
||||
|
||||
Selective package mirroring for pre-populating the proxy cache. Supports multiple input sources: individual PURLs (versioned or unversioned), CycloneDX/SPDX SBOM files, and full registry enumeration. Uses a bounded worker pool backed by `errgroup` to download artifacts in parallel, reusing `handler.Proxy.GetOrFetchArtifact()` for the actual fetch-and-cache work.
|
||||
|
||||
The package also provides a `MetadataCache` for storing raw upstream metadata blobs so the proxy can serve metadata responses offline. The `JobStore` manages async mirror jobs exposed via the `/api/mirror` endpoints.
|
||||
|
||||
### `internal/config`
|
||||
|
||||
Configuration loading.
|
||||
|
|
@ -326,10 +346,11 @@ Eviction can be implemented as:
|
|||
- Ensures clients fetch artifacts through proxy
|
||||
- Alternative: Let clients fetch directly, miss cache opportunity
|
||||
|
||||
**Why not cache metadata?**
|
||||
**Why not cache metadata (by default)?**
|
||||
- Simplicity - no invalidation logic needed
|
||||
- Fresh data - new versions visible immediately
|
||||
- Metadata is small, upstream fetch is fast
|
||||
- Set `cache_metadata: true` or use the mirror command to enable metadata caching for offline use via the `metadata_cache` table
|
||||
|
||||
**Why stream artifacts?**
|
||||
- Memory efficient - don't load large files into RAM
|
||||
|
|
|
|||
|
|
@ -209,7 +209,68 @@ Durations support days (`7d`), hours (`48h`), and minutes (`30m`). Set to `0` to
|
|||
|
||||
Resolution order: package override, then ecosystem override, then global default. This lets you set a conservative default while exempting trusted packages.
|
||||
|
||||
Currently supported for npm, PyPI, pub.dev, and Composer. These ecosystems include publish timestamps in their metadata. Other ecosystems (Go, Cargo, RubyGems) would require extra API calls and are not yet supported.
|
||||
Currently supported for npm, PyPI, pub.dev, Composer, Cargo, NuGet, Conda, RubyGems, and Hex. These ecosystems include publish timestamps in their metadata.
|
||||
|
||||
Note: Hex cooldown requires disabling registry signature verification since the proxy re-encodes the protobuf payload without the original signature. Set `HEX_NO_VERIFY_REPO_ORIGIN=1` or configure your repo with `no_verify: true`.
|
||||
|
||||
## Metadata Caching
|
||||
|
||||
By default the proxy fetches metadata fresh from upstream on every request. Enable `cache_metadata` to store metadata responses in the database and storage backend for offline fallback. When upstream is unreachable, the proxy serves the last cached copy. ETag-based revalidation avoids re-downloading unchanged metadata.
|
||||
|
||||
```yaml
|
||||
cache_metadata: true
|
||||
```
|
||||
|
||||
Or via environment variable: `PROXY_CACHE_METADATA=true`.
|
||||
|
||||
The `proxy mirror` command always enables metadata caching regardless of this setting.
|
||||
|
||||
### Metadata TTL
|
||||
|
||||
When metadata caching is enabled, `metadata_ttl` controls how long a cached response is considered fresh before revalidating with upstream. During the TTL window, cached metadata is served directly without contacting upstream, reducing latency and upstream load.
|
||||
|
||||
```yaml
|
||||
metadata_ttl: "5m" # default
|
||||
```
|
||||
|
||||
Or via environment variable: `PROXY_METADATA_TTL=10m`.
|
||||
|
||||
Set to `"0"` to always revalidate with upstream (ETag-based conditional requests still avoid re-downloading unchanged content).
|
||||
|
||||
When upstream is unreachable and the cached entry is past its TTL, the proxy serves the stale cached copy with a `Warning: 110 - "Response is Stale"` header so clients can tell the data may be outdated.
|
||||
|
||||
## Mirror API
|
||||
|
||||
The `/api/mirror` endpoints are disabled by default. Enable them to allow starting mirror jobs via HTTP:
|
||||
|
||||
```yaml
|
||||
mirror_api: true
|
||||
```
|
||||
|
||||
Or via environment variable: `PROXY_MIRROR_API=true`.
|
||||
|
||||
When disabled, the endpoints are not registered and return 404.
|
||||
|
||||
## Mirror Command
|
||||
|
||||
The `proxy mirror` command pre-populates the cache from various sources. It accepts the same storage and database flags as `serve`.
|
||||
|
||||
| Flag | Default | Description |
|
||||
|------|---------|-------------|
|
||||
| `--sbom` | | Path to CycloneDX or SPDX SBOM file |
|
||||
| `--concurrency` | `4` | Number of parallel downloads |
|
||||
| `--dry-run` | `false` | Show what would be mirrored without downloading |
|
||||
| `--config` | | Path to configuration file |
|
||||
| `--storage-url` | | Storage URL |
|
||||
| `--database-driver` | | Database driver |
|
||||
| `--database-path` | | SQLite database file |
|
||||
| `--database-url` | | PostgreSQL connection URL |
|
||||
|
||||
Positional arguments are treated as PURLs:
|
||||
|
||||
```bash
|
||||
proxy mirror pkg:npm/lodash@4.17.21 pkg:cargo/serde@1.0.0
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
|
|
|
|||
51
docs/migrations.md
Normal file
51
docs/migrations.md
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
# Database Migrations
|
||||
|
||||
Schema changes are tracked in a `migrations` table. Each migration has a name and a function. On startup, `MigrateSchema()` loads the set of already-applied names in one query and runs anything new.
|
||||
|
||||
Fresh databases created via `Create()` get the full schema and all migrations are recorded as already applied.
|
||||
|
||||
## Adding a migration
|
||||
|
||||
In `internal/database/schema.go`:
|
||||
|
||||
1. Write a migration function:
|
||||
|
||||
```go
|
||||
func migrateAddWidgetColumn(db *DB) error {
|
||||
hasCol, err := db.HasColumn("packages", "widget")
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking column widget: %w", err)
|
||||
}
|
||||
if !hasCol {
|
||||
colType := "TEXT"
|
||||
if db.dialect == DialectPostgres {
|
||||
colType = "TEXT" // adjust if types differ
|
||||
}
|
||||
if _, err := db.Exec(fmt.Sprintf("ALTER TABLE packages ADD COLUMN widget %s", colType)); err != nil {
|
||||
return fmt.Errorf("adding column widget: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
2. Append it to the `migrations` slice with the next sequential prefix:
|
||||
|
||||
```go
|
||||
var migrations = []migration{
|
||||
{"001_add_packages_enrichment_columns", migrateAddPackagesEnrichmentColumns},
|
||||
{"002_add_versions_enrichment_columns", migrateAddVersionsEnrichmentColumns},
|
||||
{"003_ensure_artifacts_table", migrateEnsureArtifactsTable},
|
||||
{"004_ensure_vulnerabilities_table", migrateEnsureVulnerabilitiesTable},
|
||||
{"005_add_widget_column", migrateAddWidgetColumn}, // new
|
||||
}
|
||||
```
|
||||
|
||||
3. Add the same column to both `schemaSQLite` and `schemaPostgres` at the top of the file so fresh databases start with the full schema.
|
||||
|
||||
## Rules
|
||||
|
||||
- Migration functions must be idempotent. Use `HasColumn`/`HasTable` checks or `IF NOT EXISTS` clauses so they're safe to run against a database that already has the change.
|
||||
- Handle both SQLite and Postgres dialects. Common differences: `DATETIME` vs `TIMESTAMP`, `INTEGER DEFAULT 0` vs `BOOLEAN DEFAULT FALSE`, `INTEGER PRIMARY KEY` vs `SERIAL PRIMARY KEY`.
|
||||
- Never reorder or rename existing entries. The name string is the migration's identity in the database.
|
||||
- Never remove old migrations from the list. They won't run on already-migrated databases, but they need to exist for older databases upgrading for the first time.
|
||||
|
|
@ -297,115 +297,6 @@ const docTemplate = `{
|
|||
}
|
||||
}
|
||||
},
|
||||
"/api/package/{ecosystem}/{name}": {
|
||||
"get": {
|
||||
"description": "Returns enriched package metadata. URL-encode scoped names (e.g. @scope/name -\u003e %40scope%2Fname).",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get package metadata",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.PackageResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/package/{ecosystem}/{name}/{version}": {
|
||||
"get": {
|
||||
"description": "Returns enriched package+version metadata and vulnerability data.",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get version metadata and vulnerabilities",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Version",
|
||||
"name": "version",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.EnrichmentResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/packages": {
|
||||
"get": {
|
||||
"produces": [
|
||||
|
|
@ -505,108 +396,6 @@ const docTemplate = `{
|
|||
}
|
||||
}
|
||||
},
|
||||
"/api/vulns/{ecosystem}/{name}": {
|
||||
"get": {
|
||||
"description": "Returns vulnerabilities for a package across versions, or for a specific version if provided.",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get vulnerabilities for a package or version",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.VulnsResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/vulns/{ecosystem}/{name}/{version}": {
|
||||
"get": {
|
||||
"description": "Returns vulnerabilities for a package across versions, or for a specific version if provided.",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get vulnerabilities for a package or version",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Version",
|
||||
"name": "version",
|
||||
"in": "path"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.VulnsResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/health": {
|
||||
"get": {
|
||||
"produces": [
|
||||
|
|
@ -715,29 +504,6 @@ const docTemplate = `{
|
|||
}
|
||||
}
|
||||
},
|
||||
"server.EnrichmentResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"is_outdated": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"license_category": {
|
||||
"type": "string"
|
||||
},
|
||||
"package": {
|
||||
"$ref": "#/definitions/server.PackageResponse"
|
||||
},
|
||||
"version": {
|
||||
"$ref": "#/definitions/server.VersionResponse"
|
||||
},
|
||||
"vulnerabilities": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/server.VulnResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.OutdatedPackage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -949,84 +715,6 @@ const docTemplate = `{
|
|||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.VersionResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ecosystem": {
|
||||
"type": "string"
|
||||
},
|
||||
"integrity": {
|
||||
"type": "string"
|
||||
},
|
||||
"is_outdated": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"license": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"published_at": {
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
},
|
||||
"yanked": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.VulnResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cvss_score": {
|
||||
"type": "number"
|
||||
},
|
||||
"fixed_version": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"references": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"severity": {
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.VulnsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecosystem": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
},
|
||||
"vulnerabilities": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/server.VulnResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
|
|
|||
|
|
@ -290,115 +290,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/api/package/{ecosystem}/{name}": {
|
||||
"get": {
|
||||
"description": "Returns enriched package metadata. URL-encode scoped names (e.g. @scope/name -\u003e %40scope%2Fname).",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get package metadata",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.PackageResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/package/{ecosystem}/{name}/{version}": {
|
||||
"get": {
|
||||
"description": "Returns enriched package+version metadata and vulnerability data.",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get version metadata and vulnerabilities",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Version",
|
||||
"name": "version",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.EnrichmentResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/packages": {
|
||||
"get": {
|
||||
"produces": [
|
||||
|
|
@ -498,108 +389,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/api/vulns/{ecosystem}/{name}": {
|
||||
"get": {
|
||||
"description": "Returns vulnerabilities for a package across versions, or for a specific version if provided.",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get vulnerabilities for a package or version",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.VulnsResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/vulns/{ecosystem}/{name}/{version}": {
|
||||
"get": {
|
||||
"description": "Returns vulnerabilities for a package across versions, or for a specific version if provided.",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"api"
|
||||
],
|
||||
"summary": "Get vulnerabilities for a package or version",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Ecosystem",
|
||||
"name": "ecosystem",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Package name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Version",
|
||||
"name": "version",
|
||||
"in": "path"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.VulnsResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/health": {
|
||||
"get": {
|
||||
"produces": [
|
||||
|
|
@ -708,29 +497,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"server.EnrichmentResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"is_outdated": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"license_category": {
|
||||
"type": "string"
|
||||
},
|
||||
"package": {
|
||||
"$ref": "#/definitions/server.PackageResponse"
|
||||
},
|
||||
"version": {
|
||||
"$ref": "#/definitions/server.VersionResponse"
|
||||
},
|
||||
"vulnerabilities": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/server.VulnResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.OutdatedPackage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -942,84 +708,6 @@
|
|||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.VersionResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ecosystem": {
|
||||
"type": "string"
|
||||
},
|
||||
"integrity": {
|
||||
"type": "string"
|
||||
},
|
||||
"is_outdated": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"license": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"published_at": {
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
},
|
||||
"yanked": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.VulnResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cvss_score": {
|
||||
"type": "number"
|
||||
},
|
||||
"fixed_version": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"references": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"severity": {
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.VulnsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecosystem": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
},
|
||||
"vulnerabilities": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/server.VulnResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
19
go.mod
19
go.mod
|
|
@ -3,22 +3,26 @@ module github.com/git-pkgs/proxy
|
|||
go 1.25.6
|
||||
|
||||
require (
|
||||
github.com/git-pkgs/archives v0.2.0
|
||||
github.com/git-pkgs/enrichment v0.2.1
|
||||
github.com/CycloneDX/cyclonedx-go v0.10.0
|
||||
github.com/git-pkgs/archives v0.2.2
|
||||
github.com/git-pkgs/enrichment v0.2.2
|
||||
github.com/git-pkgs/purl v0.1.10
|
||||
github.com/git-pkgs/registries v0.4.0
|
||||
github.com/git-pkgs/spdx v0.1.2
|
||||
github.com/git-pkgs/vers v0.2.4
|
||||
github.com/git-pkgs/vulns v0.1.3
|
||||
github.com/git-pkgs/vulns v0.1.4
|
||||
github.com/go-chi/chi/v5 v5.2.5
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/lib/pq v1.12.0
|
||||
github.com/lib/pq v1.12.2
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/spdx/tools-golang v0.5.7
|
||||
github.com/swaggo/swag v1.16.6
|
||||
gocloud.dev v0.45.0
|
||||
golang.org/x/sync v0.20.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
modernc.org/sqlite v1.47.0
|
||||
modernc.org/sqlite v1.48.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -51,6 +55,7 @@ require (
|
|||
github.com/alfatraining/structtag v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/alingse/nilnesserr v0.2.0 // indirect
|
||||
github.com/anchore/go-struct-converter v0.1.0 // indirect
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||
github.com/ashanbrown/forbidigo/v2 v2.3.0 // indirect
|
||||
github.com/ashanbrown/makezero/v2 v2.1.0 // indirect
|
||||
|
|
@ -276,7 +281,6 @@ require (
|
|||
golang.org/x/exp/typeparams v0.0.0-20260209203927-2842357ff358 // indirect
|
||||
golang.org/x/mod v0.33.0 // indirect
|
||||
golang.org/x/net v0.51.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/sys v0.42.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
golang.org/x/tools v0.42.0 // indirect
|
||||
|
|
@ -284,7 +288,6 @@ require (
|
|||
google.golang.org/api v0.269.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect
|
||||
google.golang.org/grpc v1.79.1 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
honnef.co/go/tools v0.7.0 // indirect
|
||||
|
|
@ -293,7 +296,7 @@ require (
|
|||
modernc.org/memory v1.11.0 // indirect
|
||||
mvdan.cc/gofumpt v0.9.2 // indirect
|
||||
mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
tool github.com/golangci/golangci-lint/v2/cmd/golangci-lint
|
||||
|
|
|
|||
40
go.sum
40
go.sum
|
|
@ -46,6 +46,8 @@ github.com/Antonboom/testifylint v1.6.4/go.mod h1:YO33FROXX2OoUfwjz8g+gUxQXio5i9
|
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
|
||||
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/CycloneDX/cyclonedx-go v0.10.0 h1:7xyklU7YD+CUyGzSFIARG18NYLsKVn4QFg04qSsu+7Y=
|
||||
github.com/CycloneDX/cyclonedx-go v0.10.0/go.mod h1:vUvbCXQsEm48OI6oOlanxstwNByXjCZ2wuleUlwGEO8=
|
||||
github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao4g=
|
||||
github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=
|
||||
|
|
@ -85,6 +87,8 @@ github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQ
|
|||
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
|
||||
github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w=
|
||||
github.com/alingse/nilnesserr v0.2.0/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg=
|
||||
github.com/anchore/go-struct-converter v0.1.0 h1:2rDRssAl6mgKBSLNiVCMADgZRhoqtw9dedlWa0OhD30=
|
||||
github.com/anchore/go-struct-converter v0.1.0/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
|
||||
github.com/ashanbrown/forbidigo/v2 v2.3.0 h1:OZZDOchCgsX5gvToVtEBoV2UWbFfI6RKQTir2UZzSxo=
|
||||
|
|
@ -144,6 +148,8 @@ github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+j
|
|||
github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg=
|
||||
github.com/bombsimon/wsl/v5 v5.6.0 h1:4z+/sBqC5vUmSp1O0mS+czxwH9+LKXtCWtHH9rZGQL8=
|
||||
github.com/bombsimon/wsl/v5 v5.6.0/go.mod h1:Uqt2EfrMj2NV8UGoN1f1Y3m0NpUVCsUdrNCdet+8LvU=
|
||||
github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M=
|
||||
github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0=
|
||||
github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE=
|
||||
github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE=
|
||||
github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg=
|
||||
|
|
@ -224,10 +230,10 @@ github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
|||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/ghostiam/protogetter v0.3.20 h1:oW7OPFit2FxZOpmMRPP9FffU4uUpfeE/rEdE1f+MzD0=
|
||||
github.com/ghostiam/protogetter v0.3.20/go.mod h1:FjIu5Yfs6FT391m+Fjp3fbAYJ6rkL/J6ySpZBfnODuI=
|
||||
github.com/git-pkgs/archives v0.2.0 h1:8OuuGwAB+Eww8/1ayyYpZzP0wVEH0/VWBG3mQrfi9SM=
|
||||
github.com/git-pkgs/archives v0.2.0/go.mod h1:LTJ1iQVFA7otizWMOyiI82NYVmyBWAPRzwu/e30rcXU=
|
||||
github.com/git-pkgs/enrichment v0.2.1 h1:mJJt4YQBzl9aOfu4226ylnC9H6YO9YZDjGpbSPVahKc=
|
||||
github.com/git-pkgs/enrichment v0.2.1/go.mod h1:q9eDZpRrUbYwzD4Mtg/T6LRdBMlt2DYRIvVRDULFnKg=
|
||||
github.com/git-pkgs/archives v0.2.2 h1:RxOjrV8RzKicbMVdf2GDKOqIOHZNVjrLY/Pc7KSE/WQ=
|
||||
github.com/git-pkgs/archives v0.2.2/go.mod h1:LTJ1iQVFA7otizWMOyiI82NYVmyBWAPRzwu/e30rcXU=
|
||||
github.com/git-pkgs/enrichment v0.2.2 h1:vaQu5vs3tjQB5JI0gzBrUCynUc9z3l5byPhgKFaNZrc=
|
||||
github.com/git-pkgs/enrichment v0.2.2/go.mod h1:5JWGmlHWcv5HQHUrctcpnRUNpEF5VAixD2z4zvqKejs=
|
||||
github.com/git-pkgs/packageurl-go v0.3.1 h1:WM3RBABQZLaRBxgKyYughc3cVBE8KyQxbSC6Jt5ak7M=
|
||||
github.com/git-pkgs/packageurl-go v0.3.1/go.mod h1:rcIxiG37BlQLB6FZfgdj9Fm7yjhRQd3l+5o7J0QPAk4=
|
||||
github.com/git-pkgs/purl v0.1.10 h1:NMjeF10nzFn3tdQlz6rbmHB+i+YkyrFQxho3e33ePTQ=
|
||||
|
|
@ -238,8 +244,8 @@ github.com/git-pkgs/spdx v0.1.2 h1:wHSK+CqFsO5N7yDTPvxDmer5LgNEa7vAsiZhi5Aci0A=
|
|||
github.com/git-pkgs/spdx v0.1.2/go.mod h1:V98MgZapNgYw54/pdGR82d7RU93qzJoybahbpZqTfw8=
|
||||
github.com/git-pkgs/vers v0.2.4 h1:Zr3jR/Xf1i/6cvBaJKPxhCwjzqz7uvYHE0Fhid/GPBk=
|
||||
github.com/git-pkgs/vers v0.2.4/go.mod h1:biTbSQK1qdbrsxDEKnqe3Jzclxz8vW6uDcwKjfUGcOo=
|
||||
github.com/git-pkgs/vulns v0.1.3 h1:Q9GixxhAYpP5vVDetKNMACHxGnWwB8aE5c9kbE8xxqU=
|
||||
github.com/git-pkgs/vulns v0.1.3/go.mod h1:/PVy7S1oZNVF9X8yVOZ9SX5MFpyVWCtLnIX0kAfPjY0=
|
||||
github.com/git-pkgs/vulns v0.1.4 h1:SlnGWHNmtdQgABjfrX/I/pVe+DWLbZ5Yi9xg+/De5r8=
|
||||
github.com/git-pkgs/vulns v0.1.4/go.mod h1:34xkR7QncIVfxoi78k3YT6Y9DfTEaL7j6PzCqjsRP9U=
|
||||
github.com/github/go-spdx/v2 v2.4.0 h1:+4IwVwJJbm3rzvrQ6P1nI9BDMcy3la4RchRy5uehV/M=
|
||||
github.com/github/go-spdx/v2 v2.4.0/go.mod h1:/5rwgS0txhGtRdUZwc02bTglzg6HK3FfuEbECKlK2Sg=
|
||||
github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
|
||||
|
|
@ -426,8 +432,8 @@ github.com/ldez/usetesting v0.5.0/go.mod h1:Spnb4Qppf8JTuRgblLrEWb7IE6rDmUpGvxY3
|
|||
github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY=
|
||||
github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.12.0 h1:mC1zeiNamwKBecjHarAr26c/+d8V5w/u4J0I/yASbJo=
|
||||
github.com/lib/pq v1.12.0/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/lib/pq v1.12.2 h1:ajJNv84limnK3aPbDIhLtcjrUbqAw/5XNdkuI6KNe/Q=
|
||||
github.com/lib/pq v1.12.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE=
|
||||
|
|
@ -568,6 +574,8 @@ github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o=
|
|||
github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas=
|
||||
github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0=
|
||||
github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
|
||||
github.com/spdx/tools-golang v0.5.7 h1:+sWcKGnhwp3vLdMqPcLdA6QK679vd86cK9hQWH3AwCg=
|
||||
github.com/spdx/tools-golang v0.5.7/go.mod h1:jg7w0LOpoNAw6OxKEzCoqPC2GCTj45LyTlVmXubDsYw=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
|
|
@ -606,6 +614,8 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA
|
|||
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
|
||||
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
|
||||
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
|
||||
github.com/terminalstatic/go-xsd-validate v0.1.6 h1:TenYeQ3eY631qNi1/cTmLH/s2slHPRKTTHT+XSHkepo=
|
||||
github.com/terminalstatic/go-xsd-validate v0.1.6/go.mod h1:18lsvYFofBflqCrvo1umpABZ99+GneNTw2kEEc8UPJw=
|
||||
github.com/tetafro/godot v1.5.4 h1:u1ww+gqpRLiIA16yF2PV1CV1n/X3zhyezbNXC3E14Sg=
|
||||
github.com/tetafro/godot v1.5.4/go.mod h1:eOkMrVQurDui411nBY2FA05EYH01r14LuWY/NrVDVcU=
|
||||
github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk5r6+hJnar67cgpDIz/iyD+rfl5r2Vk=
|
||||
|
|
@ -628,6 +638,12 @@ github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYR
|
|||
github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU=
|
||||
github.com/uudashr/iface v1.4.1 h1:J16Xl1wyNX9ofhpHmQ9h9gk5rnv2A6lX/2+APLTo0zU=
|
||||
github.com/uudashr/iface v1.4.1/go.mod h1:pbeBPlbuU2qkNDn0mmfrxP2X+wjPMIQAy+r1MBXSXtg=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xen0n/gosmopolitan v1.3.0 h1:zAZI1zefvo7gcpbCOrPSHJZJYA9ZgLfJqtKzZ5pHqQM=
|
||||
github.com/xen0n/gosmopolitan v1.3.0/go.mod h1:rckfr5T6o4lBtM1ga7mLGKZmLxswUoH1zxHgNXOsEt4=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
|
|
@ -857,8 +873,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
|||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.47.0 h1:R1XyaNpoW4Et9yly+I2EeX7pBza/w+pmYee/0HJDyKk=
|
||||
modernc.org/sqlite v1.47.0/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||
modernc.org/sqlite v1.48.0 h1:ElZyLop3Q2mHYk5IFPPXADejZrlHu7APbpB0sF78bq4=
|
||||
modernc.org/sqlite v1.48.0/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
|
@ -867,5 +883,5 @@ mvdan.cc/gofumpt v0.9.2 h1:zsEMWL8SVKGHNztrx6uZrXdp7AX8r421Vvp23sz7ik4=
|
|||
mvdan.cc/gofumpt v0.9.2/go.mod h1:iB7Hn+ai8lPvofHd9ZFGVg2GOr8sBUw1QUWjNbmIL/s=
|
||||
mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 h1:ssMzja7PDPJV8FStj7hq9IKiuiKhgz9ErWw+m68e7DI=
|
||||
mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15/go.mod h1:4M5MMXl2kW6fivUT6yRGpLLPNfuGtU2Z0cPvFquGDYU=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
|
@ -83,6 +84,20 @@ type Config struct {
|
|||
|
||||
// Cooldown configures version age filtering to mitigate supply chain attacks.
|
||||
Cooldown CooldownConfig `json:"cooldown" yaml:"cooldown"`
|
||||
|
||||
// CacheMetadata enables caching of upstream metadata responses for offline fallback.
|
||||
// When enabled, metadata is stored in the database and storage backend.
|
||||
// The mirror command always enables this regardless of this setting.
|
||||
CacheMetadata bool `json:"cache_metadata" yaml:"cache_metadata"`
|
||||
|
||||
// MetadataTTL is how long cached metadata is considered fresh before
|
||||
// revalidating with upstream. Uses Go duration syntax (e.g. "5m", "1h").
|
||||
// Default: "5m". Set to "0" to always revalidate.
|
||||
MetadataTTL string `json:"metadata_ttl" yaml:"metadata_ttl"`
|
||||
|
||||
// MirrorAPI enables the /api/mirror endpoints for starting mirror jobs via HTTP.
|
||||
// Disabled by default to prevent unauthenticated users from triggering downloads.
|
||||
MirrorAPI bool `json:"mirror_api" yaml:"mirror_api"`
|
||||
}
|
||||
|
||||
// CooldownConfig configures version cooldown periods.
|
||||
|
|
@ -306,6 +321,15 @@ func (c *Config) LoadFromEnv() {
|
|||
if v := os.Getenv("PROXY_COOLDOWN_DEFAULT"); v != "" {
|
||||
c.Cooldown.Default = v
|
||||
}
|
||||
if v := os.Getenv("PROXY_CACHE_METADATA"); v != "" {
|
||||
c.CacheMetadata = v == "true" || v == "1"
|
||||
}
|
||||
if v := os.Getenv("PROXY_MIRROR_API"); v != "" {
|
||||
c.MirrorAPI = v == "true" || v == "1"
|
||||
}
|
||||
if v := os.Getenv("PROXY_METADATA_TTL"); v != "" {
|
||||
c.MetadataTTL = v
|
||||
}
|
||||
}
|
||||
|
||||
// Validate checks the configuration for errors.
|
||||
|
|
@ -355,9 +379,34 @@ func (c *Config) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Validate metadata TTL if specified
|
||||
if c.MetadataTTL != "" && c.MetadataTTL != "0" {
|
||||
if _, err := time.ParseDuration(c.MetadataTTL); err != nil {
|
||||
return fmt.Errorf("invalid metadata_ttl %q: %w", c.MetadataTTL, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const defaultMetadataTTL = 5 * time.Minute //nolint:mnd // sensible default
|
||||
|
||||
// ParseMetadataTTL returns the metadata TTL duration.
|
||||
// Returns 5 minutes if unset, 0 if explicitly disabled.
|
||||
func (c *Config) ParseMetadataTTL() time.Duration {
|
||||
if c.MetadataTTL == "" {
|
||||
return defaultMetadataTTL
|
||||
}
|
||||
if c.MetadataTTL == "0" {
|
||||
return 0
|
||||
}
|
||||
d, err := time.ParseDuration(c.MetadataTTL)
|
||||
if err != nil {
|
||||
return defaultMetadataTTL
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// ParseSize parses a human-readable size string (e.g., "10GB", "500MB").
|
||||
// Returns the size in bytes.
|
||||
func ParseSize(s string) (int64, error) {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -301,3 +302,56 @@ func TestLoadFileNotFound(t *testing.T) {
|
|||
t.Error("expected error for nonexistent file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMetadataTTL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ttl string
|
||||
want time.Duration
|
||||
}{
|
||||
{"empty defaults to 5m", "", 5 * time.Minute},
|
||||
{"explicit zero", "0", 0},
|
||||
{"10 minutes", "10m", 10 * time.Minute},
|
||||
{"1 hour", "1h", 1 * time.Hour},
|
||||
{"invalid defaults to 5m", "not-a-duration", 5 * time.Minute},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := Default()
|
||||
cfg.MetadataTTL = tt.ttl
|
||||
got := cfg.ParseMetadataTTL()
|
||||
if got != tt.want {
|
||||
t.Errorf("ParseMetadataTTL() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateMetadataTTL(t *testing.T) {
|
||||
cfg := Default()
|
||||
cfg.MetadataTTL = "invalid"
|
||||
if err := cfg.Validate(); err == nil {
|
||||
t.Error("expected validation error for invalid metadata_ttl")
|
||||
}
|
||||
|
||||
cfg.MetadataTTL = "5m"
|
||||
if err := cfg.Validate(); err != nil {
|
||||
t.Errorf("unexpected error for valid metadata_ttl: %v", err)
|
||||
}
|
||||
|
||||
cfg.MetadataTTL = "0"
|
||||
if err := cfg.Validate(); err != nil {
|
||||
t.Errorf("unexpected error for zero metadata_ttl: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadMetadataTTLFromEnv(t *testing.T) {
|
||||
cfg := Default()
|
||||
t.Setenv("PROXY_METADATA_TTL", "10m")
|
||||
cfg.LoadFromEnv()
|
||||
|
||||
if cfg.MetadataTTL != "10m" {
|
||||
t.Errorf("MetadataTTL = %q, want %q", cfg.MetadataTTL, "10m")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -651,58 +651,159 @@ func TestMigrationFromOldSchema(t *testing.T) {
|
|||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
// Try to run queries that require new columns - these should fail without migration
|
||||
t.Run("queries should fail without migration", func(t *testing.T) {
|
||||
_, err := db.GetEnrichmentStats()
|
||||
if err == nil {
|
||||
t.Error("GetEnrichmentStats: expected error querying enriched_at column, got nil")
|
||||
}
|
||||
|
||||
_, err = db.GetPackageByEcosystemName("npm", "test-package")
|
||||
if err == nil {
|
||||
t.Error("GetPackageByEcosystemName: expected error querying registry_url column, got nil")
|
||||
}
|
||||
|
||||
// SearchPackages should work even with old schema because it uses sql.NullString
|
||||
// for nullable columns, which can handle NULL values properly
|
||||
_, err = db.SearchPackages("test", "", 10, 0)
|
||||
if err != nil {
|
||||
t.Errorf("SearchPackages: unexpected error with old schema: %v", err)
|
||||
}
|
||||
})
|
||||
// Queries that require new columns should fail without migration
|
||||
if _, err := db.GetEnrichmentStats(); err == nil {
|
||||
t.Error("GetEnrichmentStats: expected error querying enriched_at column, got nil")
|
||||
}
|
||||
if _, err := db.GetPackageByEcosystemName("npm", "test-package"); err == nil {
|
||||
t.Error("GetPackageByEcosystemName: expected error querying registry_url column, got nil")
|
||||
}
|
||||
// SearchPackages should work even with old schema because it uses sql.NullString
|
||||
if _, err := db.SearchPackages("test", "", 10, 0); err != nil {
|
||||
t.Errorf("SearchPackages: unexpected error with old schema: %v", err)
|
||||
}
|
||||
|
||||
// Run migration
|
||||
t.Run("migrate schema", func(t *testing.T) {
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("MigrateSchema failed: %v", err)
|
||||
}
|
||||
})
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("MigrateSchema failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify queries work after migration
|
||||
t.Run("queries should work after migration", func(t *testing.T) {
|
||||
stats, err := db.GetEnrichmentStats()
|
||||
if err != nil {
|
||||
t.Errorf("GetEnrichmentStats failed after migration: %v", err)
|
||||
}
|
||||
if stats == nil {
|
||||
t.Error("GetEnrichmentStats returned nil after migration")
|
||||
}
|
||||
stats, err := db.GetEnrichmentStats()
|
||||
if err != nil {
|
||||
t.Errorf("GetEnrichmentStats failed after migration: %v", err)
|
||||
}
|
||||
if stats == nil {
|
||||
t.Error("GetEnrichmentStats returned nil after migration")
|
||||
}
|
||||
|
||||
pkg, err := db.GetPackageByEcosystemName("npm", "test-package")
|
||||
if err != nil {
|
||||
t.Errorf("GetPackageByEcosystemName failed after migration: %v", err)
|
||||
}
|
||||
if pkg == nil {
|
||||
t.Fatal("GetPackageByEcosystemName returned nil after migration")
|
||||
}
|
||||
if pkg.Name != "test-package" {
|
||||
t.Errorf("expected package name test-package, got %s", pkg.Name)
|
||||
}
|
||||
pkg, err := db.GetPackageByEcosystemName("npm", "test-package")
|
||||
if err != nil {
|
||||
t.Errorf("GetPackageByEcosystemName failed after migration: %v", err)
|
||||
}
|
||||
if pkg == nil {
|
||||
t.Fatal("GetPackageByEcosystemName returned nil after migration")
|
||||
}
|
||||
if pkg.Name != "test-package" {
|
||||
t.Errorf("expected package name test-package, got %s", pkg.Name)
|
||||
}
|
||||
|
||||
// Note: SearchPackages not tested here because old timestamp data
|
||||
// stored as strings can't be scanned into time.Time. This is a data
|
||||
// migration issue, not a schema migration issue.
|
||||
})
|
||||
// Verify migrations were recorded
|
||||
applied, err := db.appliedMigrations()
|
||||
if err != nil {
|
||||
t.Fatalf("appliedMigrations failed: %v", err)
|
||||
}
|
||||
for _, m := range migrations {
|
||||
if !applied[m.name] {
|
||||
t.Errorf("migration %s not recorded as applied", m.name)
|
||||
}
|
||||
}
|
||||
|
||||
// Running again should be a no-op
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("second MigrateSchema failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreshDatabaseRecordsMigrations(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "fresh.db")
|
||||
|
||||
db, err := Create(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
applied, err := db.appliedMigrations()
|
||||
if err != nil {
|
||||
t.Fatalf("appliedMigrations failed: %v", err)
|
||||
}
|
||||
|
||||
for _, m := range migrations {
|
||||
if !applied[m.name] {
|
||||
t.Errorf("migration %s not recorded in fresh database", m.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateSchemaSkipsApplied(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "test.db")
|
||||
|
||||
db, err := Create(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
// All migrations are already recorded from Create. Running MigrateSchema
|
||||
// should return without running any migration functions.
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("MigrateSchema failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify count hasn't changed (no duplicate inserts)
|
||||
var count int
|
||||
if err := db.Get(&count, "SELECT COUNT(*) FROM migrations"); err != nil {
|
||||
t.Fatalf("counting migrations failed: %v", err)
|
||||
}
|
||||
if count != len(migrations) {
|
||||
t.Errorf("expected %d migrations, got %d", len(migrations), count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateSchemaUpgradeFromFullyMigrated(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "existing.db")
|
||||
|
||||
// Simulate an existing proxy database that has the full current schema
|
||||
// but no migrations table (i.e. it was running the previous version).
|
||||
sqlDB, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open database: %v", err)
|
||||
}
|
||||
|
||||
if _, err := sqlDB.Exec(schemaSQLite); err != nil {
|
||||
t.Fatalf("failed to create schema: %v", err)
|
||||
}
|
||||
// Drop the migrations table that schemaSQLite now includes
|
||||
if _, err := sqlDB.Exec("DROP TABLE migrations"); err != nil {
|
||||
t.Fatalf("failed to drop migrations table: %v", err)
|
||||
}
|
||||
if _, err := sqlDB.Exec("INSERT INTO schema_info (version) VALUES (1)"); err != nil {
|
||||
t.Fatalf("failed to set schema version: %v", err)
|
||||
}
|
||||
if err := sqlDB.Close(); err != nil {
|
||||
t.Fatalf("failed to close database: %v", err)
|
||||
}
|
||||
|
||||
db, err := Open(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
// This should create the migrations table and record all migrations
|
||||
// without altering any tables (everything already exists).
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("MigrateSchema failed: %v", err)
|
||||
}
|
||||
|
||||
applied, err := db.appliedMigrations()
|
||||
if err != nil {
|
||||
t.Fatalf("appliedMigrations failed: %v", err)
|
||||
}
|
||||
for _, m := range migrations {
|
||||
if !applied[m.name] {
|
||||
t.Errorf("migration %s not recorded after upgrade", m.name)
|
||||
}
|
||||
}
|
||||
|
||||
// Second run should be the fast path (single SELECT)
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("second MigrateSchema failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentWrites(t *testing.T) {
|
||||
|
|
@ -890,3 +991,26 @@ func TestSearchPackagesWithValues(t *testing.T) {
|
|||
t.Errorf("expected 10 hits, got %d", result.Hits)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMigrateSchemaFullyMigrated(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
dbPath := filepath.Join(dir, "bench.db")
|
||||
|
||||
db, err := Create(dbPath)
|
||||
if err != nil {
|
||||
b.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
// First call to ensure everything is migrated
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
b.Fatalf("initial MigrateSchema failed: %v", err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
b.Fatalf("MigrateSchema failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
180
internal/database/metadata_cache_test.go
Normal file
180
internal/database/metadata_cache_test.go
Normal file
|
|
@ -0,0 +1,180 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func setupMetadataCacheDB(t *testing.T) *DB {
|
||||
t.Helper()
|
||||
dbPath := filepath.Join(t.TempDir(), "test.db")
|
||||
db, err := Create(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("MigrateSchema failed: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = db.Close() })
|
||||
return db
|
||||
}
|
||||
|
||||
func TestUpsertAndGetMetadataCache(t *testing.T) {
|
||||
db := setupMetadataCacheDB(t)
|
||||
|
||||
entry := &MetadataCacheEntry{
|
||||
Ecosystem: testEcosystemNPM,
|
||||
Name: "lodash",
|
||||
StoragePath: "_metadata/npm/lodash/metadata",
|
||||
ETag: sql.NullString{String: `"abc123"`, Valid: true},
|
||||
ContentType: sql.NullString{String: "application/json", Valid: true},
|
||||
Size: sql.NullInt64{Int64: 1024, Valid: true},
|
||||
FetchedAt: sql.NullTime{Time: time.Now(), Valid: true},
|
||||
}
|
||||
|
||||
err := db.UpsertMetadataCache(entry)
|
||||
if err != nil {
|
||||
t.Fatalf("UpsertMetadataCache() error = %v", err)
|
||||
}
|
||||
|
||||
got, err := db.GetMetadataCache(testEcosystemNPM, "lodash")
|
||||
if err != nil {
|
||||
t.Fatalf("GetMetadataCache() error = %v", err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatal("GetMetadataCache() returned nil")
|
||||
}
|
||||
|
||||
if got.Ecosystem != testEcosystemNPM {
|
||||
t.Errorf("ecosystem = %q, want %q", got.Ecosystem, testEcosystemNPM)
|
||||
}
|
||||
if got.Name != "lodash" {
|
||||
t.Errorf("name = %q, want %q", got.Name, "lodash")
|
||||
}
|
||||
if got.StoragePath != "_metadata/npm/lodash/metadata" {
|
||||
t.Errorf("storage_path = %q, want %q", got.StoragePath, "_metadata/npm/lodash/metadata")
|
||||
}
|
||||
if !got.ETag.Valid || got.ETag.String != `"abc123"` {
|
||||
t.Errorf("etag = %v, want %q", got.ETag, `"abc123"`)
|
||||
}
|
||||
if !got.ContentType.Valid || got.ContentType.String != "application/json" {
|
||||
t.Errorf("content_type = %v, want %q", got.ContentType, "application/json")
|
||||
}
|
||||
if !got.Size.Valid || got.Size.Int64 != 1024 {
|
||||
t.Errorf("size = %v, want 1024", got.Size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMetadataCacheMiss(t *testing.T) {
|
||||
db := setupMetadataCacheDB(t)
|
||||
|
||||
got, err := db.GetMetadataCache(testEcosystemNPM, "nonexistent")
|
||||
if err != nil {
|
||||
t.Fatalf("GetMetadataCache() error = %v", err)
|
||||
}
|
||||
if got != nil {
|
||||
t.Errorf("expected nil for cache miss, got %v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpsertMetadataCacheOverwrite(t *testing.T) {
|
||||
db := setupMetadataCacheDB(t)
|
||||
|
||||
// First insert
|
||||
entry1 := &MetadataCacheEntry{
|
||||
Ecosystem: testEcosystemNPM,
|
||||
Name: "lodash",
|
||||
StoragePath: "_metadata/npm/lodash/metadata",
|
||||
ETag: sql.NullString{String: `"v1"`, Valid: true},
|
||||
ContentType: sql.NullString{String: "application/json", Valid: true},
|
||||
Size: sql.NullInt64{Int64: 100, Valid: true},
|
||||
FetchedAt: sql.NullTime{Time: time.Now(), Valid: true},
|
||||
}
|
||||
if err := db.UpsertMetadataCache(entry1); err != nil {
|
||||
t.Fatalf("first UpsertMetadataCache() error = %v", err)
|
||||
}
|
||||
|
||||
// Second insert (same ecosystem+name, different etag and size)
|
||||
entry2 := &MetadataCacheEntry{
|
||||
Ecosystem: testEcosystemNPM,
|
||||
Name: "lodash",
|
||||
StoragePath: "_metadata/npm/lodash/metadata",
|
||||
ETag: sql.NullString{String: `"v2"`, Valid: true},
|
||||
ContentType: sql.NullString{String: "application/json", Valid: true},
|
||||
Size: sql.NullInt64{Int64: 200, Valid: true},
|
||||
FetchedAt: sql.NullTime{Time: time.Now(), Valid: true},
|
||||
}
|
||||
if err := db.UpsertMetadataCache(entry2); err != nil {
|
||||
t.Fatalf("second UpsertMetadataCache() error = %v", err)
|
||||
}
|
||||
|
||||
got, err := db.GetMetadataCache(testEcosystemNPM, "lodash")
|
||||
if err != nil {
|
||||
t.Fatalf("GetMetadataCache() error = %v", err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatal("expected entry after overwrite")
|
||||
}
|
||||
if got.ETag.String != `"v2"` {
|
||||
t.Errorf("etag = %q, want %q", got.ETag.String, `"v2"`)
|
||||
}
|
||||
if got.Size.Int64 != 200 {
|
||||
t.Errorf("size = %d, want 200", got.Size.Int64)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpsertMetadataCacheNullableFields(t *testing.T) {
|
||||
db := setupMetadataCacheDB(t)
|
||||
|
||||
entry := &MetadataCacheEntry{
|
||||
Ecosystem: "pypi",
|
||||
Name: "requests",
|
||||
StoragePath: "_metadata/pypi/requests/metadata",
|
||||
}
|
||||
|
||||
if err := db.UpsertMetadataCache(entry); err != nil {
|
||||
t.Fatalf("UpsertMetadataCache() error = %v", err)
|
||||
}
|
||||
|
||||
got, err := db.GetMetadataCache("pypi", "requests")
|
||||
if err != nil {
|
||||
t.Fatalf("GetMetadataCache() error = %v", err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatal("expected entry")
|
||||
}
|
||||
if got.ETag.Valid {
|
||||
t.Error("expected null etag")
|
||||
}
|
||||
if got.ContentType.Valid {
|
||||
t.Error("expected null content_type")
|
||||
}
|
||||
if got.Size.Valid {
|
||||
t.Error("expected null size")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataCacheTableCreatedByMigration(t *testing.T) {
|
||||
// Create a DB without the metadata_cache table, then migrate
|
||||
dbPath := filepath.Join(t.TempDir(), "test.db")
|
||||
db, err := Create(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
// MigrateSchema should create the metadata_cache table
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("MigrateSchema() error = %v", err)
|
||||
}
|
||||
|
||||
has, err := db.HasTable("metadata_cache")
|
||||
if err != nil {
|
||||
t.Fatalf("HasTable() error = %v", err)
|
||||
}
|
||||
if !has {
|
||||
t.Error("metadata_cache table should exist after migration")
|
||||
}
|
||||
}
|
||||
|
|
@ -887,3 +887,66 @@ func (db *DB) CountCachedPackages(ecosystem string) (int64, error) {
|
|||
err = db.Get(&count, query, args...)
|
||||
return count, err
|
||||
}
|
||||
|
||||
// Metadata cache queries
|
||||
|
||||
func (db *DB) GetMetadataCache(ecosystem, name string) (*MetadataCacheEntry, error) {
|
||||
var entry MetadataCacheEntry
|
||||
query := db.Rebind(`
|
||||
SELECT id, ecosystem, name, storage_path, etag, content_type,
|
||||
size, last_modified, fetched_at, created_at, updated_at
|
||||
FROM metadata_cache WHERE ecosystem = ? AND name = ?
|
||||
`)
|
||||
err := db.Get(&entry, query, ecosystem, name)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &entry, nil
|
||||
}
|
||||
|
||||
func (db *DB) UpsertMetadataCache(entry *MetadataCacheEntry) error {
|
||||
now := time.Now()
|
||||
var query string
|
||||
|
||||
if db.dialect == DialectPostgres {
|
||||
query = `
|
||||
INSERT INTO metadata_cache (ecosystem, name, storage_path, etag, content_type,
|
||||
size, last_modified, fetched_at, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
ON CONFLICT(ecosystem, name) DO UPDATE SET
|
||||
storage_path = EXCLUDED.storage_path,
|
||||
etag = EXCLUDED.etag,
|
||||
content_type = EXCLUDED.content_type,
|
||||
size = EXCLUDED.size,
|
||||
last_modified = EXCLUDED.last_modified,
|
||||
fetched_at = EXCLUDED.fetched_at,
|
||||
updated_at = EXCLUDED.updated_at
|
||||
`
|
||||
} else {
|
||||
query = `
|
||||
INSERT INTO metadata_cache (ecosystem, name, storage_path, etag, content_type,
|
||||
size, last_modified, fetched_at, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ecosystem, name) DO UPDATE SET
|
||||
storage_path = excluded.storage_path,
|
||||
etag = excluded.etag,
|
||||
content_type = excluded.content_type,
|
||||
size = excluded.size,
|
||||
last_modified = excluded.last_modified,
|
||||
fetched_at = excluded.fetched_at,
|
||||
updated_at = excluded.updated_at
|
||||
`
|
||||
}
|
||||
|
||||
_, err := db.Exec(query,
|
||||
entry.Ecosystem, entry.Name, entry.StoragePath, entry.ETag,
|
||||
entry.ContentType, entry.Size, entry.LastModified, entry.FetchedAt, now, now,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upserting metadata cache: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
package database
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const postgresTimestamp = "TIMESTAMP"
|
||||
|
||||
|
|
@ -86,6 +90,26 @@ CREATE TABLE IF NOT EXISTS vulnerabilities (
|
|||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_vulns_id_pkg ON vulnerabilities(vuln_id, ecosystem, package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_vulns_ecosystem_pkg ON vulnerabilities(ecosystem, package_name);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS metadata_cache (
|
||||
id INTEGER PRIMARY KEY,
|
||||
ecosystem TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
storage_path TEXT NOT NULL,
|
||||
etag TEXT,
|
||||
content_type TEXT,
|
||||
size INTEGER,
|
||||
last_modified DATETIME,
|
||||
fetched_at DATETIME,
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_metadata_eco_name ON metadata_cache(ecosystem, name);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS migrations (
|
||||
name TEXT NOT NULL PRIMARY KEY,
|
||||
applied_at DATETIME NOT NULL
|
||||
);
|
||||
`
|
||||
|
||||
var schemaPostgres = `
|
||||
|
|
@ -166,6 +190,26 @@ CREATE TABLE IF NOT EXISTS vulnerabilities (
|
|||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_vulns_id_pkg ON vulnerabilities(vuln_id, ecosystem, package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_vulns_ecosystem_pkg ON vulnerabilities(ecosystem, package_name);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS metadata_cache (
|
||||
id SERIAL PRIMARY KEY,
|
||||
ecosystem TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
storage_path TEXT NOT NULL,
|
||||
etag TEXT,
|
||||
content_type TEXT,
|
||||
size BIGINT,
|
||||
last_modified TIMESTAMP,
|
||||
fetched_at TIMESTAMP,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_metadata_eco_name ON metadata_cache(ecosystem, name);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS migrations (
|
||||
name TEXT NOT NULL PRIMARY KEY,
|
||||
applied_at TIMESTAMP NOT NULL
|
||||
);
|
||||
`
|
||||
|
||||
// schemaArtifactsOnly contains just the artifacts table for adding to existing git-pkgs databases.
|
||||
|
|
@ -232,6 +276,11 @@ func (db *DB) CreateSchema() error {
|
|||
return fmt.Errorf("setting schema version: %w", err)
|
||||
}
|
||||
|
||||
// Record all migrations as applied since the full schema is already current.
|
||||
if err := db.recordAllMigrations(); err != nil {
|
||||
return fmt.Errorf("recording migrations: %w", err)
|
||||
}
|
||||
|
||||
return db.OptimizeForReads()
|
||||
}
|
||||
|
||||
|
|
@ -292,24 +341,136 @@ func (db *DB) HasColumn(table, column string) (bool, error) {
|
|||
return exists, err
|
||||
}
|
||||
|
||||
// MigrateSchema adds missing columns to existing tables for backward compatibility.
|
||||
// migration represents a named schema migration.
|
||||
type migration struct {
|
||||
name string
|
||||
fn func(db *DB) error
|
||||
}
|
||||
|
||||
// migrations is the ordered list of all schema migrations. See
|
||||
// docs/migrations.md for how to add new ones.
|
||||
var migrations = []migration{
|
||||
{"001_add_packages_enrichment_columns", migrateAddPackagesEnrichmentColumns},
|
||||
{"002_add_versions_enrichment_columns", migrateAddVersionsEnrichmentColumns},
|
||||
{"003_ensure_artifacts_table", migrateEnsureArtifactsTable},
|
||||
{"004_ensure_vulnerabilities_table", migrateEnsureVulnerabilitiesTable},
|
||||
{"005_ensure_metadata_cache_table", migrateEnsureMetadataCacheTable},
|
||||
}
|
||||
|
||||
// isTableNotFound returns true if the error indicates a missing table.
|
||||
// SQLite returns "no such table: X", Postgres returns "relation \"X\" does not exist".
|
||||
func isTableNotFound(err error) bool {
|
||||
msg := err.Error()
|
||||
return strings.Contains(msg, "no such table") ||
|
||||
strings.Contains(msg, "does not exist")
|
||||
}
|
||||
|
||||
// createMigrationsTable creates the migrations table.
|
||||
func (db *DB) createMigrationsTable() error {
|
||||
var ts string
|
||||
if db.dialect == DialectPostgres {
|
||||
ts = "TIMESTAMP"
|
||||
} else {
|
||||
ts = "DATETIME"
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS migrations (
|
||||
name TEXT NOT NULL PRIMARY KEY,
|
||||
applied_at %s NOT NULL
|
||||
)`, ts)
|
||||
|
||||
if _, err := db.Exec(query); err != nil {
|
||||
return fmt.Errorf("creating migrations table: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// appliedMigrations returns the set of migration names that have been recorded.
|
||||
// Returns nil if the migrations table does not exist yet.
|
||||
func (db *DB) appliedMigrations() (map[string]bool, error) {
|
||||
var names []string
|
||||
err := db.Select(&names, "SELECT name FROM migrations")
|
||||
if err != nil {
|
||||
// Table doesn't exist yet — this is a pre-migration database.
|
||||
if isTableNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("loading applied migrations: %w", err)
|
||||
}
|
||||
|
||||
applied := make(map[string]bool, len(names))
|
||||
for _, name := range names {
|
||||
applied[name] = true
|
||||
}
|
||||
return applied, nil
|
||||
}
|
||||
|
||||
// recordMigration inserts a migration name into the migrations table.
|
||||
func (db *DB) recordMigration(name string) error {
|
||||
query := db.Rebind("INSERT INTO migrations (name, applied_at) VALUES (?, ?)")
|
||||
if _, err := db.Exec(query, name, time.Now().UTC()); err != nil {
|
||||
return fmt.Errorf("recording migration %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// recordAllMigrations marks every known migration as applied.
|
||||
func (db *DB) recordAllMigrations() error {
|
||||
for _, m := range migrations {
|
||||
if err := db.recordMigration(m.name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MigrateSchema applies any unapplied migrations in order.
|
||||
// For a fully migrated database this executes a single SELECT query.
|
||||
func (db *DB) MigrateSchema() error {
|
||||
// Check and add missing columns to packages table
|
||||
packagesColumns := map[string]string{
|
||||
"registry_url": "TEXT",
|
||||
"supplier_name": "TEXT",
|
||||
"supplier_type": "TEXT",
|
||||
"source": "TEXT",
|
||||
"enriched_at": "DATETIME",
|
||||
"vulns_synced_at": "DATETIME",
|
||||
applied, err := db.appliedMigrations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the migrations table didn't exist, create it now.
|
||||
if applied == nil {
|
||||
if err := db.createMigrationsTable(); err != nil {
|
||||
return err
|
||||
}
|
||||
applied = make(map[string]bool)
|
||||
}
|
||||
|
||||
for _, m := range migrations {
|
||||
if applied[m.name] {
|
||||
continue
|
||||
}
|
||||
if err := m.fn(db); err != nil {
|
||||
return fmt.Errorf("migration %s: %w", m.name, err)
|
||||
}
|
||||
if err := db.recordMigration(m.name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateAddPackagesEnrichmentColumns(db *DB) error {
|
||||
columns := map[string]string{
|
||||
"registry_url": "TEXT",
|
||||
"supplier_name": "TEXT",
|
||||
"supplier_type": "TEXT",
|
||||
"source": "TEXT",
|
||||
"enriched_at": "DATETIME",
|
||||
"vulns_synced_at": "DATETIME",
|
||||
}
|
||||
|
||||
if db.dialect == DialectPostgres {
|
||||
packagesColumns["enriched_at"] = postgresTimestamp
|
||||
packagesColumns["vulns_synced_at"] = postgresTimestamp
|
||||
columns["enriched_at"] = postgresTimestamp
|
||||
columns["vulns_synced_at"] = postgresTimestamp
|
||||
}
|
||||
|
||||
for column, colType := range packagesColumns {
|
||||
for column, colType := range columns {
|
||||
hasCol, err := db.HasColumn("packages", column)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking column %s: %w", column, err)
|
||||
|
|
@ -321,9 +482,11 @@ func (db *DB) MigrateSchema() error {
|
|||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check and add missing columns to versions table
|
||||
versionsColumns := map[string]string{
|
||||
func migrateAddVersionsEnrichmentColumns(db *DB) error {
|
||||
columns := map[string]string{
|
||||
"integrity": "TEXT",
|
||||
"yanked": "INTEGER DEFAULT 0",
|
||||
"source": "TEXT",
|
||||
|
|
@ -331,11 +494,11 @@ func (db *DB) MigrateSchema() error {
|
|||
}
|
||||
|
||||
if db.dialect == DialectPostgres {
|
||||
versionsColumns["yanked"] = "BOOLEAN DEFAULT FALSE"
|
||||
versionsColumns["enriched_at"] = postgresTimestamp
|
||||
columns["yanked"] = "BOOLEAN DEFAULT FALSE"
|
||||
columns["enriched_at"] = postgresTimestamp
|
||||
}
|
||||
|
||||
for column, colType := range versionsColumns {
|
||||
for column, colType := range columns {
|
||||
hasCol, err := db.HasColumn("versions", column)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking column %s: %w", column, err)
|
||||
|
|
@ -347,62 +510,121 @@ func (db *DB) MigrateSchema() error {
|
|||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure artifacts table exists
|
||||
if err := db.EnsureArtifactsTable(); err != nil {
|
||||
return fmt.Errorf("ensuring artifacts table: %w", err)
|
||||
}
|
||||
func migrateEnsureArtifactsTable(db *DB) error {
|
||||
return db.EnsureArtifactsTable()
|
||||
}
|
||||
|
||||
// Ensure vulnerabilities table exists
|
||||
func migrateEnsureVulnerabilitiesTable(db *DB) error {
|
||||
hasVulns, err := db.HasTable("vulnerabilities")
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking vulnerabilities table: %w", err)
|
||||
}
|
||||
if !hasVulns {
|
||||
var vulnSchema string
|
||||
if db.dialect == DialectPostgres {
|
||||
vulnSchema = `
|
||||
CREATE TABLE vulnerabilities (
|
||||
id SERIAL PRIMARY KEY,
|
||||
vuln_id TEXT NOT NULL,
|
||||
ecosystem TEXT NOT NULL,
|
||||
package_name TEXT NOT NULL,
|
||||
severity TEXT,
|
||||
summary TEXT,
|
||||
fixed_version TEXT,
|
||||
cvss_score REAL,
|
||||
"references" TEXT,
|
||||
fetched_at TIMESTAMP,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_vulns_id_pkg ON vulnerabilities(vuln_id, ecosystem, package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_vulns_ecosystem_pkg ON vulnerabilities(ecosystem, package_name);
|
||||
`
|
||||
} else {
|
||||
vulnSchema = `
|
||||
CREATE TABLE vulnerabilities (
|
||||
id INTEGER PRIMARY KEY,
|
||||
vuln_id TEXT NOT NULL,
|
||||
ecosystem TEXT NOT NULL,
|
||||
package_name TEXT NOT NULL,
|
||||
severity TEXT,
|
||||
summary TEXT,
|
||||
fixed_version TEXT,
|
||||
cvss_score REAL,
|
||||
"references" TEXT,
|
||||
fetched_at DATETIME,
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_vulns_id_pkg ON vulnerabilities(vuln_id, ecosystem, package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_vulns_ecosystem_pkg ON vulnerabilities(ecosystem, package_name);
|
||||
`
|
||||
}
|
||||
if _, err := db.Exec(vulnSchema); err != nil {
|
||||
return fmt.Errorf("creating vulnerabilities table: %w", err)
|
||||
}
|
||||
if hasVulns {
|
||||
return nil
|
||||
}
|
||||
|
||||
var vulnSchema string
|
||||
if db.dialect == DialectPostgres {
|
||||
vulnSchema = `
|
||||
CREATE TABLE vulnerabilities (
|
||||
id SERIAL PRIMARY KEY,
|
||||
vuln_id TEXT NOT NULL,
|
||||
ecosystem TEXT NOT NULL,
|
||||
package_name TEXT NOT NULL,
|
||||
severity TEXT,
|
||||
summary TEXT,
|
||||
fixed_version TEXT,
|
||||
cvss_score REAL,
|
||||
"references" TEXT,
|
||||
fetched_at TIMESTAMP,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_vulns_id_pkg ON vulnerabilities(vuln_id, ecosystem, package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_vulns_ecosystem_pkg ON vulnerabilities(ecosystem, package_name);
|
||||
`
|
||||
} else {
|
||||
vulnSchema = `
|
||||
CREATE TABLE vulnerabilities (
|
||||
id INTEGER PRIMARY KEY,
|
||||
vuln_id TEXT NOT NULL,
|
||||
ecosystem TEXT NOT NULL,
|
||||
package_name TEXT NOT NULL,
|
||||
severity TEXT,
|
||||
summary TEXT,
|
||||
fixed_version TEXT,
|
||||
cvss_score REAL,
|
||||
"references" TEXT,
|
||||
fetched_at DATETIME,
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_vulns_id_pkg ON vulnerabilities(vuln_id, ecosystem, package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_vulns_ecosystem_pkg ON vulnerabilities(ecosystem, package_name);
|
||||
`
|
||||
}
|
||||
if _, err := db.Exec(vulnSchema); err != nil {
|
||||
return fmt.Errorf("creating vulnerabilities table: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateEnsureMetadataCacheTable(db *DB) error {
|
||||
return db.EnsureMetadataCacheTable()
|
||||
}
|
||||
|
||||
// EnsureMetadataCacheTable creates the metadata_cache table if it doesn't exist.
|
||||
func (db *DB) EnsureMetadataCacheTable() error {
|
||||
has, err := db.HasTable("metadata_cache")
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking metadata_cache table: %w", err)
|
||||
}
|
||||
if has {
|
||||
return nil
|
||||
}
|
||||
|
||||
var schema string
|
||||
if db.dialect == DialectPostgres {
|
||||
schema = `
|
||||
CREATE TABLE metadata_cache (
|
||||
id SERIAL PRIMARY KEY,
|
||||
ecosystem TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
storage_path TEXT NOT NULL,
|
||||
etag TEXT,
|
||||
content_type TEXT,
|
||||
size BIGINT,
|
||||
last_modified TIMESTAMP,
|
||||
fetched_at TIMESTAMP,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_metadata_eco_name ON metadata_cache(ecosystem, name);
|
||||
`
|
||||
} else {
|
||||
schema = `
|
||||
CREATE TABLE metadata_cache (
|
||||
id INTEGER PRIMARY KEY,
|
||||
ecosystem TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
storage_path TEXT NOT NULL,
|
||||
etag TEXT,
|
||||
content_type TEXT,
|
||||
size INTEGER,
|
||||
last_modified DATETIME,
|
||||
fetched_at DATETIME,
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_metadata_eco_name ON metadata_cache(ecosystem, name);
|
||||
`
|
||||
}
|
||||
if _, err := db.Exec(schema); err != nil {
|
||||
return fmt.Errorf("creating metadata_cache table: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,18 +76,33 @@ func (a *Artifact) IsCached() bool {
|
|||
return a.StoragePath.Valid && a.FetchedAt.Valid
|
||||
}
|
||||
|
||||
// Vulnerability represents a cached vulnerability record.
|
||||
type Vulnerability struct {
|
||||
// MetadataCacheEntry represents a cached metadata blob for offline serving.
|
||||
type MetadataCacheEntry struct {
|
||||
ID int64 `db:"id" json:"id"`
|
||||
VulnID string `db:"vuln_id" json:"vuln_id"`
|
||||
Ecosystem string `db:"ecosystem" json:"ecosystem"`
|
||||
PackageName string `db:"package_name" json:"package_name"`
|
||||
Severity sql.NullString `db:"severity" json:"severity,omitempty"`
|
||||
Summary sql.NullString `db:"summary" json:"summary,omitempty"`
|
||||
FixedVersion sql.NullString `db:"fixed_version" json:"fixed_version,omitempty"`
|
||||
CVSSScore sql.NullFloat64 `db:"cvss_score" json:"cvss_score,omitempty"`
|
||||
References sql.NullString `db:"references" json:"references,omitempty"`
|
||||
Name string `db:"name" json:"name"`
|
||||
StoragePath string `db:"storage_path" json:"storage_path"`
|
||||
ETag sql.NullString `db:"etag" json:"etag,omitempty"`
|
||||
ContentType sql.NullString `db:"content_type" json:"content_type,omitempty"`
|
||||
Size sql.NullInt64 `db:"size" json:"size,omitempty"`
|
||||
LastModified sql.NullTime `db:"last_modified" json:"last_modified,omitempty"`
|
||||
FetchedAt sql.NullTime `db:"fetched_at" json:"fetched_at,omitempty"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
||||
// Vulnerability represents a cached vulnerability record.
|
||||
type Vulnerability struct {
|
||||
ID int64 `db:"id" json:"id"`
|
||||
VulnID string `db:"vuln_id" json:"vuln_id"`
|
||||
Ecosystem string `db:"ecosystem" json:"ecosystem"`
|
||||
PackageName string `db:"package_name" json:"package_name"`
|
||||
Severity sql.NullString `db:"severity" json:"severity,omitempty"`
|
||||
Summary sql.NullString `db:"summary" json:"summary,omitempty"`
|
||||
FixedVersion sql.NullString `db:"fixed_version" json:"fixed_version,omitempty"`
|
||||
CVSSScore sql.NullFloat64 `db:"cvss_score" json:"cvss_score,omitempty"`
|
||||
References sql.NullString `db:"references" json:"references,omitempty"`
|
||||
FetchedAt sql.NullTime `db:"fetched_at" json:"fetched_at,omitempty"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ package handler
|
|||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -88,44 +88,27 @@ func (h *CargoHandler) handleIndex(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
h.proxy.Logger.Info("cargo index request", "crate", name)
|
||||
|
||||
// Build the index path
|
||||
indexPath := h.buildIndexPath(name)
|
||||
upstreamURL := fmt.Sprintf("%s/%s", h.indexURL, indexPath)
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "internal error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
body, contentType, err := h.proxy.FetchOrCacheMetadata(r.Context(), "cargo", name, upstreamURL, "text/plain")
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.proxy.Logger.Error("failed to fetch upstream index", "error", err)
|
||||
http.Error(w, "failed to fetch from upstream", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
http.Error(w, fmt.Sprintf("upstream returned %d", resp.StatusCode), http.StatusBadGateway)
|
||||
return
|
||||
if contentType == "" {
|
||||
contentType = "text/plain; charset=utf-8"
|
||||
}
|
||||
|
||||
// Copy headers and body
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
if etag := resp.Header.Get("ETag"); etag != "" {
|
||||
w.Header().Set("ETag", etag)
|
||||
}
|
||||
if lastMod := resp.Header.Get("Last-Modified"); lastMod != "" {
|
||||
w.Header().Set("Last-Modified", lastMod)
|
||||
}
|
||||
|
||||
h.applyCooldownFiltering(w, resp.Body)
|
||||
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
h.applyCooldownFiltering(w, body)
|
||||
}
|
||||
|
||||
type crateIndexEntry struct {
|
||||
|
|
@ -134,56 +117,45 @@ type crateIndexEntry struct {
|
|||
PublishTime string `json:"pubtime,omitempty"`
|
||||
}
|
||||
|
||||
func (h *CargoHandler) applyCooldownFiltering(downstreamResponse io.Writer, upstreamBody io.Reader) {
|
||||
func (h *CargoHandler) applyCooldownFiltering(downstreamResponse http.ResponseWriter, body []byte) {
|
||||
if h.proxy.Cooldown == nil || !h.proxy.Cooldown.Enabled() {
|
||||
// not using cooldowns, just copy the upstream to the downstream
|
||||
_, _ = io.Copy(downstreamResponse, upstreamBody)
|
||||
_, _ = downstreamResponse.Write(body)
|
||||
return
|
||||
}
|
||||
|
||||
// create a scanner on the body of the http response
|
||||
requestScanner := bufio.NewScanner(upstreamBody)
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(body)))
|
||||
|
||||
// the response is newline-delimited JSON, loop through each line
|
||||
for requestScanner.Scan() {
|
||||
line := requestScanner.Text()
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
// decode the line
|
||||
var crate crateIndexEntry
|
||||
err := json.Unmarshal([]byte(line), &crate)
|
||||
|
||||
if err != nil {
|
||||
// if there is an error parsing this line then exclude it and move to the next entry
|
||||
h.proxy.Logger.Error("failed to parse json entry in index", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// parse publish time
|
||||
publishedAt, err := time.Parse(time.RFC3339, crate.PublishTime)
|
||||
|
||||
if crate.PublishTime == "" || err != nil {
|
||||
// publish time is empty/missing/invalid, presumably was published before pubtime was added as a field
|
||||
// write line to response
|
||||
_, _ = downstreamResponse.Write([]byte(line + "\n"))
|
||||
continue
|
||||
}
|
||||
|
||||
// make PURL
|
||||
cratePURL := purl.MakePURLString("cargo", crate.Name, "")
|
||||
|
||||
if !h.proxy.Cooldown.IsAllowed("cargo", cratePURL, publishedAt) {
|
||||
// crate is not allowed, move to next crate
|
||||
h.proxy.Logger.Info("cooldown: filtering cargo version",
|
||||
"crate", crate.Name, "version", crate.Version,
|
||||
"published", crate.PublishTime)
|
||||
continue
|
||||
}
|
||||
|
||||
// crate passes, write to response
|
||||
_, _ = downstreamResponse.Write([]byte(line + "\n"))
|
||||
}
|
||||
|
||||
if err := requestScanner.Err(); err != nil {
|
||||
if err := scanner.Err(); err != nil {
|
||||
h.proxy.Logger.Error("error reading index response", "error", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
|
@ -196,9 +195,9 @@ func TestCargoCooldown(t *testing.T) {
|
|||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
var outputBuffer bytes.Buffer
|
||||
h.applyCooldownFiltering(&outputBuffer, strings.NewReader(testInput.String()))
|
||||
output := outputBuffer.String()
|
||||
recorder := httptest.NewRecorder()
|
||||
h.applyCooldownFiltering(recorder, []byte(testInput.String()))
|
||||
output := recorder.Body.String()
|
||||
|
||||
if output != expectedOutput.String() {
|
||||
t.Errorf("output = %q, want %q", output, expectedOutput.String())
|
||||
|
|
|
|||
|
|
@ -2,9 +2,11 @@ package handler
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -59,10 +61,10 @@ func (h *ComposerHandler) Routes() http.Handler {
|
|||
func (h *ComposerHandler) handleServiceIndex(w http.ResponseWriter, r *http.Request) {
|
||||
// Return a minimal service index pointing to our proxy
|
||||
index := map[string]any{
|
||||
"packages": map[string]any{},
|
||||
"metadata-url": h.proxyURL + "/composer/p2/%package%.json",
|
||||
"notify-batch": h.upstreamURL + "/downloads/",
|
||||
"search": h.proxyURL + "/composer/search.json?q=%query%&type=%type%",
|
||||
"packages": map[string]any{},
|
||||
"metadata-url": h.proxyURL + "/composer/p2/%package%.json",
|
||||
"notify-batch": h.upstreamURL + "/downloads/",
|
||||
"search": h.proxyURL + "/composer/search.json?q=%query%&type=%type%",
|
||||
"providers-lazy-url": h.proxyURL + "/composer/p2/%package%.json",
|
||||
}
|
||||
|
||||
|
|
@ -86,34 +88,18 @@ func (h *ComposerHandler) handlePackageMetadata(w http.ResponseWriter, r *http.R
|
|||
|
||||
h.proxy.Logger.Info("composer metadata request", "package", packageName)
|
||||
|
||||
// Fetch from repo.packagist.org (Composer v2 metadata)
|
||||
upstreamURL := fmt.Sprintf("%s/p2/%s/%s.json", h.repoURL, vendor, pkg)
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
body, _, err := h.proxy.FetchOrCacheMetadata(r.Context(), "composer", packageName, upstreamURL)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rewritten, err := h.rewriteMetadata(body)
|
||||
if err != nil {
|
||||
|
|
@ -128,7 +114,9 @@ func (h *ComposerHandler) handlePackageMetadata(w http.ResponseWriter, r *http.R
|
|||
}
|
||||
|
||||
// rewriteMetadata rewrites dist URLs in Composer metadata to point at this proxy.
|
||||
// If cooldown is enabled, versions published too recently are filtered out.
|
||||
// If the metadata uses the minified Composer v2 format, it is expanded first so
|
||||
// that every version entry contains all fields. If cooldown is enabled, versions
|
||||
// published too recently are filtered out.
|
||||
func (h *ComposerHandler) rewriteMetadata(body []byte) ([]byte, error) {
|
||||
var metadata map[string]any
|
||||
if err := json.Unmarshal(body, &metadata); err != nil {
|
||||
|
|
@ -140,18 +128,84 @@ func (h *ComposerHandler) rewriteMetadata(body []byte) ([]byte, error) {
|
|||
return body, nil
|
||||
}
|
||||
|
||||
minified := metadata["minified"] == "composer/2.0"
|
||||
|
||||
for packageName, versions := range packages {
|
||||
versionList, ok := versions.([]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if minified {
|
||||
versionList = expandMinifiedVersions(versionList)
|
||||
}
|
||||
|
||||
packages[packageName] = h.filterAndRewriteVersions(packageName, versionList)
|
||||
}
|
||||
|
||||
delete(metadata, "minified")
|
||||
|
||||
return json.Marshal(metadata)
|
||||
}
|
||||
|
||||
// expandMinifiedVersions expands the Composer v2 minified format where each
|
||||
// version entry only contains fields that differ from the previous entry.
|
||||
// The "~dev" sentinel string resets the inheritance chain.
|
||||
func expandMinifiedVersions(versionList []any) []any {
|
||||
expanded := make([]any, 0, len(versionList))
|
||||
inherited := map[string]any{}
|
||||
|
||||
for _, v := range versionList {
|
||||
// The "~dev" sentinel resets the inheritance chain for dev versions.
|
||||
if s, ok := v.(string); ok && s == "~dev" {
|
||||
inherited = map[string]any{}
|
||||
continue
|
||||
}
|
||||
|
||||
vmap, ok := v.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Merge inherited fields into a new map, then overlay current fields.
|
||||
// Deep copy values to avoid shared references between versions.
|
||||
merged := make(map[string]any, len(inherited)+len(vmap))
|
||||
for k, val := range inherited {
|
||||
merged[k] = deepCopyValue(val)
|
||||
}
|
||||
for k, val := range vmap {
|
||||
merged[k] = val
|
||||
}
|
||||
|
||||
// Update inherited state for next iteration.
|
||||
inherited = merged
|
||||
|
||||
expanded = append(expanded, merged)
|
||||
}
|
||||
|
||||
return expanded
|
||||
}
|
||||
|
||||
// deepCopyValue returns a deep copy of JSON-like values (maps, slices, scalars).
|
||||
func deepCopyValue(v any) any {
|
||||
switch val := v.(type) {
|
||||
case map[string]any:
|
||||
m := make(map[string]any, len(val))
|
||||
for k, v := range val {
|
||||
m[k] = deepCopyValue(v)
|
||||
}
|
||||
return m
|
||||
case []any:
|
||||
s := make([]any, len(val))
|
||||
for i, v := range val {
|
||||
s[i] = deepCopyValue(v)
|
||||
}
|
||||
return s
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
// filterAndRewriteVersions applies cooldown filtering and rewrites dist URLs
|
||||
// for a single package's version list.
|
||||
func (h *ComposerHandler) filterAndRewriteVersions(packageName string, versionList []any) []any {
|
||||
|
|
@ -219,6 +273,14 @@ func (h *ComposerHandler) rewriteDistURL(vmap map[string]any, packageName, versi
|
|||
filename = url[idx+1:]
|
||||
}
|
||||
|
||||
// GitHub zipball URLs end with a bare commit hash (no extension).
|
||||
// Append .zip so the archives library can detect the format.
|
||||
if path.Ext(filename) == "" {
|
||||
if distType, _ := dist["type"].(string); distType == "zip" {
|
||||
filename += ".zip"
|
||||
}
|
||||
}
|
||||
|
||||
parts := strings.SplitN(packageName, "/", vendorPackageParts)
|
||||
if len(parts) == vendorPackageParts {
|
||||
newURL := fmt.Sprintf("%s/composer/files/%s/%s/%s/%s",
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package handler
|
|||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -50,6 +51,346 @@ func TestComposerRewriteMetadata(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestComposerRewriteMetadataExpandsMinified(t *testing.T) {
|
||||
h := &ComposerHandler{
|
||||
proxy: testProxy(),
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
// Minified format: first version has all fields, subsequent versions
|
||||
// only include fields that changed. The proxy must expand this so every
|
||||
// version has all fields (including "name").
|
||||
input := `{
|
||||
"minified": "composer/2.0",
|
||||
"packages": {
|
||||
"symfony/console": [
|
||||
{
|
||||
"name": "symfony/console",
|
||||
"description": "Symfony Console Component",
|
||||
"version": "6.0.0",
|
||||
"dist": {
|
||||
"url": "https://repo.packagist.org/files/symfony/console/6.0.0/abc123.zip",
|
||||
"type": "zip"
|
||||
}
|
||||
},
|
||||
{
|
||||
"version": "5.4.0",
|
||||
"dist": {
|
||||
"url": "https://repo.packagist.org/files/symfony/console/5.4.0/def456.zip",
|
||||
"type": "zip"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
output, err := h.rewriteMetadata([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("rewriteMetadata failed: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("failed to parse output: %v", err)
|
||||
}
|
||||
|
||||
// The minified key should be removed from output
|
||||
if _, ok := result["minified"]; ok {
|
||||
t.Error("expected minified key to be removed from output")
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
versions := packages["symfony/console"].([]any)
|
||||
|
||||
// Second version should have inherited the "name" and "description" fields
|
||||
v1 := versions[1].(map[string]any)
|
||||
if v1["name"] != "symfony/console" {
|
||||
t.Errorf("second version name = %v, want %q", v1["name"], "symfony/console")
|
||||
}
|
||||
if v1["description"] != "Symfony Console Component" {
|
||||
t.Errorf("second version description = %v, want %q", v1["description"], "Symfony Console Component")
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposerRewriteMetadataMinifiedDevReset(t *testing.T) {
|
||||
h := &ComposerHandler{
|
||||
proxy: testProxy(),
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
// The ~dev sentinel resets the inheritance chain for dev versions.
|
||||
input := `{
|
||||
"minified": "composer/2.0",
|
||||
"packages": {
|
||||
"symfony/console": [
|
||||
{
|
||||
"name": "symfony/console",
|
||||
"description": "Symfony Console Component",
|
||||
"license": ["MIT"],
|
||||
"version": "6.0.0",
|
||||
"dist": {
|
||||
"url": "https://repo.packagist.org/files/symfony/console/6.0.0/abc123.zip",
|
||||
"type": "zip"
|
||||
}
|
||||
},
|
||||
"~dev",
|
||||
{
|
||||
"name": "symfony/console",
|
||||
"version": "dev-main",
|
||||
"dist": {
|
||||
"url": "https://repo.packagist.org/files/symfony/console/dev-main/xyz789.zip",
|
||||
"type": "zip"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
output, err := h.rewriteMetadata([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("rewriteMetadata failed: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("failed to parse output: %v", err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
versions := packages["symfony/console"].([]any)
|
||||
|
||||
if len(versions) != 2 {
|
||||
t.Fatalf("expected 2 versions, got %d", len(versions))
|
||||
}
|
||||
|
||||
// Dev version should NOT have inherited "license" or "description"
|
||||
// from the tagged version (the ~dev sentinel resets inheritance).
|
||||
devVersion := versions[1].(map[string]any)
|
||||
if devVersion["version"] != "dev-main" {
|
||||
t.Errorf("dev version = %v, want %q", devVersion["version"], "dev-main")
|
||||
}
|
||||
if _, ok := devVersion["license"]; ok {
|
||||
t.Error("dev version should not have inherited license field after ~dev reset")
|
||||
}
|
||||
if _, ok := devVersion["description"]; ok {
|
||||
t.Error("dev version should not have inherited description field after ~dev reset")
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposerRewriteMetadataCooldownPreservesNames(t *testing.T) {
|
||||
now := time.Now()
|
||||
old := now.Add(-10 * 24 * time.Hour).Format(time.RFC3339)
|
||||
veryOld := now.Add(-20 * 24 * time.Hour).Format(time.RFC3339)
|
||||
recent := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
proxy := &Proxy{Logger: slog.Default()}
|
||||
proxy.Cooldown = &cooldown.Config{Default: "3d"}
|
||||
|
||||
h := &ComposerHandler{
|
||||
proxy: proxy,
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
// Minified format where "name" only appears in first version.
|
||||
// When cooldown filters the first version, remaining versions must
|
||||
// still have the "name" field after expansion.
|
||||
input := `{
|
||||
"minified": "composer/2.0",
|
||||
"packages": {
|
||||
"symfony/console": [
|
||||
{
|
||||
"name": "symfony/console",
|
||||
"description": "Symfony Console Component",
|
||||
"version": "7.0.0",
|
||||
"time": "` + recent + `",
|
||||
"dist": {"url": "https://repo.packagist.org/7.0.0.zip", "type": "zip"}
|
||||
},
|
||||
{
|
||||
"version": "6.0.0",
|
||||
"time": "` + old + `",
|
||||
"dist": {"url": "https://repo.packagist.org/6.0.0.zip", "type": "zip"}
|
||||
},
|
||||
{
|
||||
"version": "5.0.0",
|
||||
"time": "` + veryOld + `",
|
||||
"dist": {"url": "https://repo.packagist.org/5.0.0.zip", "type": "zip"}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
output, err := h.rewriteMetadata([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("rewriteMetadata failed: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("failed to parse output: %v", err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
versions := packages["symfony/console"].([]any)
|
||||
|
||||
// v7.0.0 should be filtered by cooldown, leaving v6.0.0 and v5.0.0
|
||||
if len(versions) != 2 {
|
||||
t.Fatalf("expected 2 versions after cooldown, got %d", len(versions))
|
||||
}
|
||||
|
||||
// Both remaining versions must have the "name" field
|
||||
for _, v := range versions {
|
||||
vmap := v.(map[string]any)
|
||||
if vmap["name"] != "symfony/console" {
|
||||
t.Errorf("version %v missing name field, got %v", vmap["version"], vmap["name"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposerRewriteDistURLGitHubZipball(t *testing.T) {
|
||||
// GitHub zipball URLs end with a bare commit hash, no file extension.
|
||||
// The proxy must produce a filename with .zip extension so that the
|
||||
// archives library can detect the format when browsing source.
|
||||
h := &ComposerHandler{
|
||||
proxy: testProxy(),
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
vmap := map[string]any{
|
||||
"version": "v7.4.8",
|
||||
"dist": map[string]any{
|
||||
"url": "https://api.github.com/repos/symfony/asset/zipball/d2e2f014ccd6ec9fae8dbe6336a4164346a2a856",
|
||||
"type": "zip",
|
||||
"shasum": "",
|
||||
"reference": "d2e2f014ccd6ec9fae8dbe6336a4164346a2a856",
|
||||
},
|
||||
}
|
||||
|
||||
h.rewriteDistURL(vmap, "symfony/asset", "v7.4.8")
|
||||
|
||||
dist := vmap["dist"].(map[string]any)
|
||||
url := dist["url"].(string)
|
||||
|
||||
// The rewritten URL's filename must have a .zip extension
|
||||
if !strings.HasSuffix(url, ".zip") {
|
||||
t.Errorf("rewritten dist URL filename has no .zip extension: %s", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposerRewriteMetadataGitHubZipballFilenames(t *testing.T) {
|
||||
// End-to-end: metadata with GitHub zipball URLs should produce
|
||||
// download URLs that end in .zip so browse source can open them.
|
||||
h := &ComposerHandler{
|
||||
proxy: testProxy(),
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
input := `{
|
||||
"packages": {
|
||||
"symfony/config": [
|
||||
{
|
||||
"version": "v7.4.8",
|
||||
"dist": {
|
||||
"url": "https://api.github.com/repos/symfony/config/zipball/c7369cc1da250fcbfe0c5a9d109e419661549c39",
|
||||
"type": "zip",
|
||||
"reference": "c7369cc1da250fcbfe0c5a9d109e419661549c39"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
output, err := h.rewriteMetadata([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("rewriteMetadata failed: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("failed to parse output: %v", err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
versions := packages["symfony/config"].([]any)
|
||||
v := versions[0].(map[string]any)
|
||||
dist := v["dist"].(map[string]any)
|
||||
url := dist["url"].(string)
|
||||
|
||||
if !strings.HasSuffix(url, ".zip") {
|
||||
t.Errorf("rewritten URL should end in .zip, got %s", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposerExpandMinifiedSharedDistReferences(t *testing.T) {
|
||||
// When a minified version inherits the dist field from a previous version
|
||||
// (i.e. it doesn't include its own dist), expanding + rewriting must not
|
||||
// corrupt the dist URLs via shared map references.
|
||||
h := &ComposerHandler{
|
||||
proxy: testProxy(),
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
// In this minified payload, v5.3.0 does NOT include a dist field,
|
||||
// so it inherits v5.4.0's dist. After expansion and URL rewriting,
|
||||
// each version must have its own correct dist URL.
|
||||
input := `{
|
||||
"minified": "composer/2.0",
|
||||
"packages": {
|
||||
"vendor/pkg": [
|
||||
{
|
||||
"name": "vendor/pkg",
|
||||
"version": "5.4.0",
|
||||
"dist": {
|
||||
"url": "https://api.github.com/repos/vendor/pkg/zipball/aaa111",
|
||||
"type": "zip",
|
||||
"reference": "aaa111"
|
||||
}
|
||||
},
|
||||
{
|
||||
"version": "5.3.0"
|
||||
}
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
output, err := h.rewriteMetadata([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("rewriteMetadata failed: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("failed to parse output: %v", err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
versions := packages["vendor/pkg"].([]any)
|
||||
if len(versions) != 2 {
|
||||
t.Fatalf("expected 2 versions, got %d", len(versions))
|
||||
}
|
||||
|
||||
v1 := versions[0].(map[string]any)
|
||||
v2 := versions[1].(map[string]any)
|
||||
|
||||
dist1 := v1["dist"].(map[string]any)
|
||||
dist2 := v2["dist"].(map[string]any)
|
||||
|
||||
url1 := dist1["url"].(string)
|
||||
url2 := dist2["url"].(string)
|
||||
|
||||
// Each version must have its own URL with its own version in the path
|
||||
if !strings.Contains(url1, "/5.4.0/") {
|
||||
t.Errorf("v5.4.0 dist URL should contain /5.4.0/, got %s", url1)
|
||||
}
|
||||
if !strings.Contains(url2, "/5.3.0/") {
|
||||
t.Errorf("v5.3.0 dist URL should contain /5.3.0/, got %s", url2)
|
||||
}
|
||||
|
||||
// The two URLs must be different
|
||||
if url1 == url2 {
|
||||
t.Errorf("both versions have the same dist URL (shared reference bug): %s", url1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComposerRewriteMetadataCooldown(t *testing.T) {
|
||||
now := time.Now()
|
||||
old := now.Add(-10 * 24 * time.Hour).Format(time.RFC3339)
|
||||
|
|
|
|||
|
|
@ -43,8 +43,8 @@ func (h *ConanHandler) Routes() http.Handler {
|
|||
mux.HandleFunc("GET /v1/files/{name}/{version}/{user}/{channel}/{revision}/package/{pkgref}/{pkgrev}/{filename}", h.handlePackageFile)
|
||||
mux.HandleFunc("GET /v2/files/{name}/{version}/{user}/{channel}/{revision}/package/{pkgref}/{pkgrev}/{filename}", h.handlePackageFile)
|
||||
|
||||
// Proxy all other endpoints (metadata, search, etc.)
|
||||
mux.HandleFunc("GET /", h.proxyUpstream)
|
||||
// Proxy all other endpoints (metadata, search, etc.) with caching
|
||||
mux.HandleFunc("GET /", h.proxyCached)
|
||||
|
||||
return mux
|
||||
}
|
||||
|
|
@ -147,6 +147,20 @@ func (h *ConanHandler) shouldCacheFile(filename string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// proxyCached forwards a request with metadata caching.
|
||||
func (h *ConanHandler) proxyCached(w http.ResponseWriter, r *http.Request) {
|
||||
cacheKey := strings.TrimPrefix(r.URL.Path, "/")
|
||||
cacheKey = strings.ReplaceAll(cacheKey, "/", "_")
|
||||
if r.URL.RawQuery != "" {
|
||||
cacheKey += "_" + r.URL.RawQuery
|
||||
}
|
||||
upstreamURL := h.upstreamURL + r.URL.Path
|
||||
if r.URL.RawQuery != "" {
|
||||
upstreamURL += "?" + r.URL.RawQuery
|
||||
}
|
||||
h.proxy.ProxyCached(w, r, upstreamURL, "conan", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// proxyUpstream forwards a request to conan center without caching.
|
||||
func (h *ConanHandler) proxyUpstream(w http.ResponseWriter, r *http.Request) {
|
||||
upstreamURL := h.upstreamURL + r.URL.Path
|
||||
|
|
|
|||
|
|
@ -1,8 +1,13 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/purl"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -31,9 +36,9 @@ func (h *CondaHandler) Routes() http.Handler {
|
|||
mux := http.NewServeMux()
|
||||
|
||||
// Channel index (repodata)
|
||||
mux.HandleFunc("GET /{channel}/{arch}/repodata.json", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /{channel}/{arch}/repodata.json.bz2", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /{channel}/{arch}/current_repodata.json", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /{channel}/{arch}/repodata.json", h.handleRepodata)
|
||||
mux.HandleFunc("GET /{channel}/{arch}/repodata.json.bz2", h.proxyCached)
|
||||
mux.HandleFunc("GET /{channel}/{arch}/current_repodata.json", h.handleRepodata)
|
||||
|
||||
// Package downloads (cache these)
|
||||
mux.HandleFunc("GET /{channel}/{arch}/{filename}", h.handleDownload)
|
||||
|
|
@ -119,6 +124,121 @@ func (h *CondaHandler) parseFilename(filename string) (name, version string) {
|
|||
return name, version
|
||||
}
|
||||
|
||||
// handleRepodata proxies repodata.json, applying cooldown filtering when enabled.
|
||||
func (h *CondaHandler) handleRepodata(w http.ResponseWriter, r *http.Request) {
|
||||
if h.proxy.Cooldown == nil || !h.proxy.Cooldown.Enabled() {
|
||||
h.proxyCached(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
upstreamURL := h.upstreamURL + r.URL.Path
|
||||
|
||||
h.proxy.Logger.Debug("fetching repodata for cooldown filtering", "url", upstreamURL)
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
for k, vv := range resp.Header {
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Warn("failed to filter repodata, proxying original", "error", err)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(body)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(filtered)
|
||||
}
|
||||
|
||||
// condaTimestampDivisor converts Conda's millisecond timestamps to seconds.
|
||||
const condaTimestampDivisor = 1000
|
||||
|
||||
// applyCooldownFiltering removes entries from repodata.json that were
|
||||
// published too recently based on their timestamp field.
|
||||
func (h *CondaHandler) applyCooldownFiltering(body []byte) ([]byte, error) {
|
||||
if h.proxy.Cooldown == nil || !h.proxy.Cooldown.Enabled() {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
var repodata map[string]any
|
||||
if err := json.Unmarshal(body, &repodata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, key := range []string{"packages", "packages.conda"} {
|
||||
packages, ok := repodata[key].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for filename, entry := range packages {
|
||||
entryMap, ok := entry.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
ts, ok := entryMap["timestamp"].(float64)
|
||||
if !ok || ts == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
publishedAt := time.Unix(int64(ts)/condaTimestampDivisor, 0)
|
||||
|
||||
name, _ := entryMap["name"].(string)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
packagePURL := purl.MakePURLString("conda", name, "")
|
||||
|
||||
if !h.proxy.Cooldown.IsAllowed("conda", packagePURL, publishedAt) {
|
||||
version, _ := entryMap["version"].(string)
|
||||
h.proxy.Logger.Info("cooldown: filtering conda package",
|
||||
"name", name, "version", version, "filename", filename)
|
||||
delete(packages, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return json.Marshal(repodata)
|
||||
}
|
||||
|
||||
// proxyCached forwards a metadata request with caching.
|
||||
func (h *CondaHandler) proxyCached(w http.ResponseWriter, r *http.Request) {
|
||||
cacheKey := strings.TrimPrefix(r.URL.Path, "/")
|
||||
cacheKey = strings.ReplaceAll(cacheKey, "/", "_")
|
||||
h.proxy.ProxyCached(w, r, h.upstreamURL+r.URL.Path, "conda", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// proxyUpstream forwards a request to Anaconda without caching.
|
||||
func (h *CondaHandler) proxyUpstream(w http.ResponseWriter, r *http.Request) {
|
||||
h.proxy.ProxyUpstream(w, r, h.upstreamURL+r.URL.Path, []string{"Accept-Encoding"})
|
||||
|
|
|
|||
|
|
@ -1,8 +1,14 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/cooldown"
|
||||
)
|
||||
|
||||
func TestCondaParseFilename(t *testing.T) {
|
||||
|
|
@ -49,3 +55,251 @@ func TestCondaIsPackageFile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondaCooldownFiltering(t *testing.T) {
|
||||
now := time.Now()
|
||||
oldTimestamp := float64(now.Add(-7 * 24 * time.Hour).UnixMilli())
|
||||
recentTimestamp := float64(now.Add(-1 * time.Hour).UnixMilli())
|
||||
|
||||
repodata := map[string]any{
|
||||
"info": map[string]any{},
|
||||
"packages": map[string]any{
|
||||
"numpy-1.24.0-old.tar.bz2": map[string]any{
|
||||
"name": "numpy",
|
||||
"version": "1.24.0",
|
||||
"timestamp": oldTimestamp,
|
||||
},
|
||||
"numpy-1.25.0-new.tar.bz2": map[string]any{
|
||||
"name": "numpy",
|
||||
"version": "1.25.0",
|
||||
"timestamp": recentTimestamp,
|
||||
},
|
||||
},
|
||||
"packages.conda": map[string]any{
|
||||
"scipy-1.11.0-old.conda": map[string]any{
|
||||
"name": "scipy",
|
||||
"version": "1.11.0",
|
||||
"timestamp": oldTimestamp,
|
||||
},
|
||||
"scipy-1.12.0-new.conda": map[string]any{
|
||||
"name": "scipy",
|
||||
"version": "1.12.0",
|
||||
"timestamp": recentTimestamp,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(repodata)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &CondaHandler{
|
||||
proxy: proxy,
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(filtered, &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
if len(packages) != 1 {
|
||||
t.Fatalf("expected 1 package in packages, got %d", len(packages))
|
||||
}
|
||||
if _, ok := packages["numpy-1.24.0-old.tar.bz2"]; !ok {
|
||||
t.Error("expected old numpy to survive filtering")
|
||||
}
|
||||
|
||||
condaPkgs := result["packages.conda"].(map[string]any)
|
||||
if len(condaPkgs) != 1 {
|
||||
t.Fatalf("expected 1 package in packages.conda, got %d", len(condaPkgs))
|
||||
}
|
||||
if _, ok := condaPkgs["scipy-1.11.0-old.conda"]; !ok {
|
||||
t.Error("expected old scipy to survive filtering")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondaCooldownFilteringWithPackageOverride(t *testing.T) {
|
||||
now := time.Now()
|
||||
recentTimestamp := float64(now.Add(-2 * time.Hour).UnixMilli())
|
||||
|
||||
repodata := map[string]any{
|
||||
"info": map[string]any{},
|
||||
"packages": map[string]any{
|
||||
"special-1.0.0-build.tar.bz2": map[string]any{
|
||||
"name": "special",
|
||||
"version": "1.0.0",
|
||||
"timestamp": recentTimestamp,
|
||||
},
|
||||
},
|
||||
"packages.conda": map[string]any{},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(repodata)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
Packages: map[string]string{"pkg:conda/special": "1h"},
|
||||
}
|
||||
|
||||
h := &CondaHandler{
|
||||
proxy: proxy,
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(filtered, &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
if len(packages) != 1 {
|
||||
t.Fatalf("expected 1 package (override allows it), got %d", len(packages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondaCooldownFilteringNoTimestamp(t *testing.T) {
|
||||
repodata := map[string]any{
|
||||
"info": map[string]any{},
|
||||
"packages": map[string]any{
|
||||
"old-pkg-1.0.0-build.tar.bz2": map[string]any{
|
||||
"name": "old-pkg",
|
||||
"version": "1.0.0",
|
||||
// no timestamp field
|
||||
},
|
||||
},
|
||||
"packages.conda": map[string]any{},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(repodata)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &CondaHandler{
|
||||
proxy: proxy,
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(filtered, &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
if len(packages) != 1 {
|
||||
t.Fatalf("entries without timestamp should pass through, got %d", len(packages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondaHandleRepodataWithCooldown(t *testing.T) {
|
||||
now := time.Now()
|
||||
oldTimestamp := float64(now.Add(-7 * 24 * time.Hour).UnixMilli())
|
||||
recentTimestamp := float64(now.Add(-1 * time.Hour).UnixMilli())
|
||||
|
||||
repodataJSON, _ := json.Marshal(map[string]any{
|
||||
"info": map[string]any{},
|
||||
"packages": map[string]any{
|
||||
"old-1.0.0-build.tar.bz2": map[string]any{
|
||||
"name": "testpkg", "version": "1.0.0", "timestamp": oldTimestamp,
|
||||
},
|
||||
"new-2.0.0-build.tar.bz2": map[string]any{
|
||||
"name": "testpkg", "version": "2.0.0", "timestamp": recentTimestamp,
|
||||
},
|
||||
},
|
||||
"packages.conda": map[string]any{},
|
||||
})
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(repodataJSON)
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &CondaHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/conda-forge/noarch/repodata.json", nil)
|
||||
req.SetPathValue("channel", "conda-forge")
|
||||
req.SetPathValue("arch", "noarch")
|
||||
w := httptest.NewRecorder()
|
||||
h.handleRepodata(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
packages := result["packages"].(map[string]any)
|
||||
if len(packages) != 1 {
|
||||
t.Fatalf("expected 1 package after filtering, got %d", len(packages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondaHandleRepodataWithoutCooldown(t *testing.T) {
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"info":{},"packages":{},"packages.conda":{}}`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
h := &CondaHandler{
|
||||
proxy: &Proxy{Logger: slog.Default(), HTTPClient: http.DefaultClient},
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/conda-forge/noarch/repodata.json", nil)
|
||||
req.SetPathValue("channel", "conda-forge")
|
||||
req.SetPathValue("arch", "noarch")
|
||||
w := httptest.NewRecorder()
|
||||
h.handleRepodata(w, req)
|
||||
|
||||
// Without cooldown, should proxy directly (response comes from upstream)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -306,7 +306,6 @@ func (h *ContainerHandler) proxyBlobHead(w http.ResponseWriter, r *http.Request,
|
|||
w.WriteHeader(resp.StatusCode)
|
||||
}
|
||||
|
||||
|
||||
// containerError writes an OCI-compliant error response.
|
||||
func (h *ContainerHandler) containerError(w http.ResponseWriter, status int, code, message string) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
|
|
|||
|
|
@ -86,8 +86,8 @@ func TestContainerHandler_parseManifestPath(t *testing.T) {
|
|||
wantReference: "sha256:abc123",
|
||||
},
|
||||
{
|
||||
path: "invalid/path",
|
||||
wantName: "",
|
||||
path: "invalid/path",
|
||||
wantName: "",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,14 +30,14 @@ func (h *CRANHandler) Routes() http.Handler {
|
|||
mux := http.NewServeMux()
|
||||
|
||||
// Package indexes
|
||||
mux.HandleFunc("GET /src/contrib/PACKAGES", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /src/contrib/PACKAGES.gz", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /src/contrib/PACKAGES.rds", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /src/contrib/PACKAGES", h.proxyCached)
|
||||
mux.HandleFunc("GET /src/contrib/PACKAGES.gz", h.proxyCached)
|
||||
mux.HandleFunc("GET /src/contrib/PACKAGES.rds", h.proxyCached)
|
||||
|
||||
// Binary package indexes
|
||||
mux.HandleFunc("GET /bin/{platform}/contrib/{rversion}/PACKAGES", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /bin/{platform}/contrib/{rversion}/PACKAGES.gz", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /bin/{platform}/contrib/{rversion}/PACKAGES.rds", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /bin/{platform}/contrib/{rversion}/PACKAGES", h.proxyCached)
|
||||
mux.HandleFunc("GET /bin/{platform}/contrib/{rversion}/PACKAGES.gz", h.proxyCached)
|
||||
mux.HandleFunc("GET /bin/{platform}/contrib/{rversion}/PACKAGES.rds", h.proxyCached)
|
||||
|
||||
// Source package downloads
|
||||
mux.HandleFunc("GET /src/contrib/{filename}", h.handleSourceDownload)
|
||||
|
|
@ -150,6 +150,13 @@ func (h *CRANHandler) isBinaryPackage(filename string) bool {
|
|||
return strings.HasSuffix(filename, ".zip") || strings.HasSuffix(filename, ".tgz")
|
||||
}
|
||||
|
||||
// proxyCached forwards a metadata request with caching.
|
||||
func (h *CRANHandler) proxyCached(w http.ResponseWriter, r *http.Request) {
|
||||
cacheKey := strings.TrimPrefix(r.URL.Path, "/")
|
||||
cacheKey = strings.ReplaceAll(cacheKey, "/", "_")
|
||||
h.proxy.ProxyCached(w, r, h.upstreamURL+r.URL.Path, "cran", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// proxyUpstream forwards a request to CRAN without caching.
|
||||
func (h *CRANHandler) proxyUpstream(w http.ResponseWriter, r *http.Request) {
|
||||
h.proxy.ProxyUpstream(w, r, h.upstreamURL+r.URL.Path, []string{"Accept-Encoding"})
|
||||
|
|
|
|||
|
|
@ -93,7 +93,8 @@ func (h *DebianHandler) handlePackageDownload(w http.ResponseWriter, r *http.Req
|
|||
// handleMetadata proxies repository metadata files.
|
||||
// These change frequently so we don't cache them.
|
||||
func (h *DebianHandler) handleMetadata(w http.ResponseWriter, r *http.Request, path string) {
|
||||
h.proxy.ProxyMetadata(w, r, fmt.Sprintf("%s/%s", h.upstreamURL, path), "debian")
|
||||
cacheKey := strings.ReplaceAll(path, "/", "_")
|
||||
h.proxy.ProxyCached(w, r, fmt.Sprintf("%s/%s", h.upstreamURL, path), "debian", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// proxyFile proxies any file directly without caching.
|
||||
|
|
|
|||
|
|
@ -197,9 +197,8 @@ func TestGemHandler_UpstreamProxy(t *testing.T) {
|
|||
if string(body) != "upstream specs data" {
|
||||
t.Errorf("body = %q, want %q", body, "upstream specs data")
|
||||
}
|
||||
if resp.Header.Get("X-Test") != "upstream" {
|
||||
t.Errorf("missing upstream header")
|
||||
}
|
||||
// Metadata caching reads the response body into storage and serves it back,
|
||||
// so arbitrary upstream headers are not forwarded. Content-Type is preserved.
|
||||
}
|
||||
|
||||
func TestGemHandler_CacheMiss(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -1,10 +1,15 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/purl"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -35,13 +40,13 @@ func (h *GemHandler) Routes() http.Handler {
|
|||
mux.HandleFunc("GET /gems/{filename}", h.handleDownload)
|
||||
|
||||
// Specs indexes (compressed Ruby Marshal format)
|
||||
mux.HandleFunc("GET /specs.4.8.gz", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /latest_specs.4.8.gz", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /prerelease_specs.4.8.gz", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /specs.4.8.gz", h.proxyCached)
|
||||
mux.HandleFunc("GET /latest_specs.4.8.gz", h.proxyCached)
|
||||
mux.HandleFunc("GET /prerelease_specs.4.8.gz", h.proxyCached)
|
||||
|
||||
// Compact index (bundler 2.x+)
|
||||
mux.HandleFunc("GET /versions", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /info/{name}", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /versions", h.proxyCached)
|
||||
mux.HandleFunc("GET /info/{name}", h.handleCompactIndex)
|
||||
|
||||
// Quick index
|
||||
mux.HandleFunc("GET /quick/Marshal.4.8/{filename}", h.proxyUpstream)
|
||||
|
|
@ -98,6 +103,198 @@ func (h *GemHandler) parseGemFilename(filename string) (name, version string) {
|
|||
return "", ""
|
||||
}
|
||||
|
||||
// handleCompactIndex serves the compact index for a gem, filtering versions
|
||||
// based on cooldown when enabled.
|
||||
func (h *GemHandler) handleCompactIndex(w http.ResponseWriter, r *http.Request) {
|
||||
if h.proxy.Cooldown == nil || !h.proxy.Cooldown.Enabled() {
|
||||
h.proxyCached(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
name := r.PathValue("name")
|
||||
if name == "" {
|
||||
http.Error(w, "invalid gem name", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
h.proxy.Logger.Info("gem compact index request with cooldown", "name", name)
|
||||
|
||||
indexResp, filteredVersions, err := h.fetchIndexAndVersions(r, name)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Error("upstream compact index request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = indexResp.Body.Close() }()
|
||||
|
||||
if indexResp.StatusCode != http.StatusOK {
|
||||
copyResponseHeaders(w, indexResp.Header)
|
||||
w.WriteHeader(indexResp.StatusCode)
|
||||
_, _ = io.Copy(w, indexResp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
if filteredVersions == nil {
|
||||
h.proxy.Logger.Warn("failed to fetch version timestamps, proxying unfiltered", "name", name)
|
||||
copyResponseHeaders(w, indexResp.Header)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = io.Copy(w, indexResp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
h.writeFilteredIndex(w, indexResp, name, filteredVersions)
|
||||
}
|
||||
|
||||
// fetchIndexAndVersions fetches the compact index and versions API concurrently.
|
||||
// Returns the index response, a set of versions to filter (nil if versions API failed),
|
||||
// and an error if the index fetch itself failed.
|
||||
func (h *GemHandler) fetchIndexAndVersions(r *http.Request, name string) (*http.Response, map[string]bool, error) {
|
||||
type versionsResult struct {
|
||||
filtered map[string]bool
|
||||
err error
|
||||
}
|
||||
|
||||
versionsCh := make(chan versionsResult, 1)
|
||||
go func() {
|
||||
filtered, err := h.fetchFilteredVersions(r, name)
|
||||
versionsCh <- versionsResult{filtered: filtered, err: err}
|
||||
}()
|
||||
|
||||
indexResp, err := h.fetchCompactIndex(r, name)
|
||||
|
||||
versionsRes := <-versionsCh
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if versionsRes.err != nil {
|
||||
return indexResp, nil, nil
|
||||
}
|
||||
|
||||
return indexResp, versionsRes.filtered, nil
|
||||
}
|
||||
|
||||
// fetchCompactIndex fetches the compact index from upstream.
|
||||
func (h *GemHandler) fetchCompactIndex(r *http.Request, name string) (*http.Response, error) {
|
||||
indexURL := h.upstreamURL + "/info/" + name
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, indexURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, hdr := range []string{"Accept", "Accept-Encoding", "If-None-Match", "If-Modified-Since"} {
|
||||
if v := r.Header.Get(hdr); v != "" {
|
||||
req.Header.Set(hdr, v)
|
||||
}
|
||||
}
|
||||
return h.proxy.HTTPClient.Do(req)
|
||||
}
|
||||
|
||||
// writeFilteredIndex writes the compact index response with cooldown-filtered versions removed.
|
||||
func (h *GemHandler) writeFilteredIndex(w http.ResponseWriter, resp *http.Response, name string, filtered map[string]bool) {
|
||||
for k, vv := range resp.Header {
|
||||
if strings.EqualFold(k, "Content-Length") {
|
||||
continue // length will change after filtering
|
||||
}
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
if line == "---" {
|
||||
_, _ = fmt.Fprintln(w, line)
|
||||
continue
|
||||
}
|
||||
|
||||
version := line
|
||||
if spaceIdx := strings.IndexByte(line, ' '); spaceIdx > 0 {
|
||||
version = line[:spaceIdx]
|
||||
}
|
||||
|
||||
if filtered[version] {
|
||||
h.proxy.Logger.Info("cooldown: filtering gem version",
|
||||
"gem", name, "version", version)
|
||||
continue
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(w, line)
|
||||
}
|
||||
}
|
||||
|
||||
// copyResponseHeaders copies HTTP headers from a response to a writer.
|
||||
func copyResponseHeaders(w http.ResponseWriter, headers http.Header) {
|
||||
for k, vv := range headers {
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gemVersion represents a version entry from the RubyGems versions API.
|
||||
type gemVersion struct {
|
||||
Number string `json:"number"`
|
||||
Platform string `json:"platform"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
// fetchFilteredVersions fetches the versions API and returns a set of version
|
||||
// strings that should be filtered out by cooldown.
|
||||
func (h *GemHandler) fetchFilteredVersions(r *http.Request, name string) (map[string]bool, error) {
|
||||
versionsURL := fmt.Sprintf("%s/api/v1/versions/%s.json", h.upstreamURL, name)
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, versionsURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("versions API returned %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var versions []gemVersion
|
||||
if err := json.NewDecoder(resp.Body).Decode(&versions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packagePURL := purl.MakePURLString("gem", name, "")
|
||||
filtered := make(map[string]bool)
|
||||
|
||||
for _, v := range versions {
|
||||
createdAt, err := time.Parse(time.RFC3339, v.CreatedAt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !h.proxy.Cooldown.IsAllowed("gem", packagePURL, createdAt) {
|
||||
// Build version string matching compact index format
|
||||
versionStr := v.Number
|
||||
if v.Platform != "" && v.Platform != "ruby" {
|
||||
versionStr = v.Number + "-" + v.Platform
|
||||
}
|
||||
filtered[versionStr] = true
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// proxyCached forwards a metadata request with caching.
|
||||
func (h *GemHandler) proxyCached(w http.ResponseWriter, r *http.Request) {
|
||||
upstreamURL := h.upstreamURL + r.URL.Path
|
||||
cacheKey := strings.TrimPrefix(r.URL.Path, "/")
|
||||
h.proxy.ProxyCached(w, r, upstreamURL, "gem", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// proxyUpstream forwards a request to rubygems.org without caching.
|
||||
func (h *GemHandler) proxyUpstream(w http.ResponseWriter, r *http.Request) {
|
||||
upstreamURL := h.upstreamURL + r.URL.Path
|
||||
|
|
|
|||
|
|
@ -1,8 +1,16 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/cooldown"
|
||||
)
|
||||
|
||||
func TestGemParseFilename(t *testing.T) {
|
||||
|
|
@ -28,3 +36,217 @@ func TestGemParseFilename(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGemCompactIndexCooldown(t *testing.T) {
|
||||
now := time.Now()
|
||||
oldTime := now.Add(-7 * 24 * time.Hour).Format(time.RFC3339)
|
||||
recentTime := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
compactIndex := "---\n1.0.0 dep1:>= 1.0|checksum:abc123\n2.0.0 dep1:>= 1.0|checksum:def456\n"
|
||||
|
||||
versionsJSON, _ := json.Marshal([]gemVersion{
|
||||
{Number: "1.0.0", Platform: "ruby", CreatedAt: oldTime},
|
||||
{Number: "2.0.0", Platform: "ruby", CreatedAt: recentTime},
|
||||
})
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case strings.HasPrefix(r.URL.Path, "/info/"):
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
_, _ = w.Write([]byte(compactIndex))
|
||||
case strings.HasPrefix(r.URL.Path, "/api/v1/versions/"):
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(versionsJSON)
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &GemHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/info/testgem", nil)
|
||||
req.SetPathValue("name", "testgem")
|
||||
w := httptest.NewRecorder()
|
||||
h.handleCompactIndex(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
if !strings.Contains(body, "1.0.0") {
|
||||
t.Error("expected version 1.0.0 to survive filtering")
|
||||
}
|
||||
if strings.Contains(body, "2.0.0") {
|
||||
t.Error("expected version 2.0.0 to be filtered out")
|
||||
}
|
||||
if !strings.HasPrefix(body, "---\n") {
|
||||
t.Error("expected compact index header to be preserved")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGemCompactIndexCooldownWithPlatformVersion(t *testing.T) {
|
||||
now := time.Now()
|
||||
recentTime := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
compactIndex := "---\n1.0.0 dep:>= 1.0|checksum:abc\n1.0.0-java dep:>= 1.0|checksum:def\n"
|
||||
|
||||
versionsJSON, _ := json.Marshal([]gemVersion{
|
||||
{Number: "1.0.0", Platform: "ruby", CreatedAt: recentTime},
|
||||
{Number: "1.0.0", Platform: "java", CreatedAt: recentTime},
|
||||
})
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case strings.HasPrefix(r.URL.Path, "/info/"):
|
||||
_, _ = w.Write([]byte(compactIndex))
|
||||
case strings.HasPrefix(r.URL.Path, "/api/v1/versions/"):
|
||||
_, _ = w.Write(versionsJSON)
|
||||
}
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &GemHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/info/testgem", nil)
|
||||
req.SetPathValue("name", "testgem")
|
||||
w := httptest.NewRecorder()
|
||||
h.handleCompactIndex(w, req)
|
||||
|
||||
body := w.Body.String()
|
||||
// Both ruby and java platform versions should be filtered
|
||||
lines := strings.Split(strings.TrimSpace(body), "\n")
|
||||
if len(lines) != 1 { // only "---"
|
||||
t.Errorf("expected only header line, got %d lines: %v", len(lines), lines)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGemCompactIndexNoCooldown(t *testing.T) {
|
||||
compactIndex := "---\n1.0.0 dep:>= 1.0|checksum:abc\n"
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(compactIndex))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
h := &GemHandler{
|
||||
proxy: testProxy(), // no cooldown
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/info/testgem", nil)
|
||||
req.SetPathValue("name", "testgem")
|
||||
w := httptest.NewRecorder()
|
||||
h.handleCompactIndex(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGemCompactIndexVersionsAPIFails(t *testing.T) {
|
||||
compactIndex := "---\n1.0.0 dep:>= 1.0|checksum:abc\n"
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case strings.HasPrefix(r.URL.Path, "/info/"):
|
||||
_, _ = w.Write([]byte(compactIndex))
|
||||
case strings.HasPrefix(r.URL.Path, "/api/v1/versions/"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &GemHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/info/testgem", nil)
|
||||
req.SetPathValue("name", "testgem")
|
||||
w := httptest.NewRecorder()
|
||||
h.handleCompactIndex(w, req)
|
||||
|
||||
// Should still return OK with unfiltered content
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
if !strings.Contains(body, "1.0.0") {
|
||||
t.Error("expected unfiltered content when versions API fails")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGemFetchFilteredVersions(t *testing.T) {
|
||||
now := time.Now()
|
||||
oldTime := now.Add(-7 * 24 * time.Hour).Format(time.RFC3339)
|
||||
recentTime := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
versionsJSON, _ := json.Marshal([]gemVersion{
|
||||
{Number: "1.0.0", Platform: "ruby", CreatedAt: oldTime},
|
||||
{Number: "2.0.0", Platform: "ruby", CreatedAt: recentTime},
|
||||
{Number: "2.0.0", Platform: "java", CreatedAt: recentTime},
|
||||
})
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(versionsJSON)
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &GemHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/info/testgem", nil)
|
||||
filtered, err := h.fetchFilteredVersions(req, "testgem")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if filtered["1.0.0"] {
|
||||
t.Error("version 1.0.0 should not be filtered (old enough)")
|
||||
}
|
||||
if !filtered["2.0.0"] {
|
||||
t.Error("version 2.0.0 (ruby) should be filtered")
|
||||
}
|
||||
if !filtered["2.0.0-java"] {
|
||||
t.Error("version 2.0.0-java should be filtered")
|
||||
}
|
||||
|
||||
_ = fmt.Sprintf // silence unused import
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,18 +54,19 @@ func (h *GoHandler) handleRequest(w http.ResponseWriter, r *http.Request) {
|
|||
module := path[:idx]
|
||||
rest := path[idx+4:] // after "/@v/"
|
||||
|
||||
decodedMod := decodeGoModule(module)
|
||||
switch {
|
||||
case rest == "list":
|
||||
// GET /{module}/@v/list - list versions
|
||||
h.proxyUpstream(w, r)
|
||||
h.proxyCached(w, r, decodedMod+"/@v/list")
|
||||
|
||||
case strings.HasSuffix(rest, ".info"):
|
||||
// GET /{module}/@v/{version}.info - version metadata
|
||||
h.proxyUpstream(w, r)
|
||||
h.proxyCached(w, r, decodedMod+"/@v/"+rest)
|
||||
|
||||
case strings.HasSuffix(rest, ".mod"):
|
||||
// GET /{module}/@v/{version}.mod - go.mod file
|
||||
h.proxyUpstream(w, r)
|
||||
h.proxyCached(w, r, decodedMod+"/@v/"+rest)
|
||||
|
||||
case strings.HasSuffix(rest, ".zip"):
|
||||
// GET /{module}/@v/{version}.zip - source archive (cache this)
|
||||
|
|
@ -80,7 +81,8 @@ func (h *GoHandler) handleRequest(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// Check for @latest
|
||||
if strings.HasSuffix(path, "/@latest") {
|
||||
h.proxyUpstream(w, r)
|
||||
module := strings.TrimSuffix(path, "/@latest")
|
||||
h.proxyCached(w, r, decodeGoModule(module)+"/@latest")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -111,6 +113,11 @@ func (h *GoHandler) proxyUpstream(w http.ResponseWriter, r *http.Request) {
|
|||
h.proxy.ProxyUpstream(w, r, h.upstreamURL+r.URL.Path, nil)
|
||||
}
|
||||
|
||||
// proxyCached forwards a request with metadata caching.
|
||||
func (h *GoHandler) proxyCached(w http.ResponseWriter, r *http.Request, cacheKey string) {
|
||||
h.proxy.ProxyCached(w, r, h.upstreamURL+r.URL.Path, "golang", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// decodeGoModule decodes an encoded module path.
|
||||
// In the encoding, uppercase letters are represented as "!" followed by lowercase.
|
||||
func decodeGoModule(encoded string) string {
|
||||
|
|
|
|||
|
|
@ -2,12 +2,15 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -32,26 +35,41 @@ func containsPathTraversal(path string) bool {
|
|||
|
||||
const defaultHTTPTimeout = 30 * time.Second
|
||||
|
||||
// maxMetadataSize is the maximum size of upstream metadata responses (50 MB).
|
||||
const contentTypeJSON = "application/json"
|
||||
|
||||
// maxMetadataSize is the maximum size of upstream metadata responses (100 MB).
|
||||
// Package metadata (e.g. npm with many versions) can be large, but unbounded
|
||||
// reads risk OOM if an upstream misbehaves.
|
||||
const maxMetadataSize = 50 << 20
|
||||
const maxMetadataSize = 100 << 20
|
||||
|
||||
// ErrMetadataTooLarge is returned when upstream metadata exceeds maxMetadataSize.
|
||||
var ErrMetadataTooLarge = errors.New("metadata response exceeds size limit")
|
||||
|
||||
// ReadMetadata reads an upstream response body with a size limit to prevent OOM
|
||||
// from unexpectedly large responses.
|
||||
// from unexpectedly large responses. Returns ErrMetadataTooLarge if the response
|
||||
// is truncated by the limit.
|
||||
func ReadMetadata(r io.Reader) ([]byte, error) {
|
||||
return io.ReadAll(io.LimitReader(r, maxMetadataSize))
|
||||
data, err := io.ReadAll(io.LimitReader(r, maxMetadataSize+1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int64(len(data)) > maxMetadataSize {
|
||||
return nil, ErrMetadataTooLarge
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Proxy provides shared functionality for protocol handlers.
|
||||
type Proxy struct {
|
||||
DB *database.DB
|
||||
Storage storage.Storage
|
||||
Fetcher fetch.FetcherInterface
|
||||
Resolver *fetch.Resolver
|
||||
Logger *slog.Logger
|
||||
Cooldown *cooldown.Config
|
||||
HTTPClient *http.Client
|
||||
DB *database.DB
|
||||
Storage storage.Storage
|
||||
Fetcher fetch.FetcherInterface
|
||||
Resolver *fetch.Resolver
|
||||
Logger *slog.Logger
|
||||
Cooldown *cooldown.Config
|
||||
CacheMetadata bool
|
||||
MetadataTTL time.Duration
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
// NewProxy creates a new Proxy with the given dependencies.
|
||||
|
|
@ -218,11 +236,11 @@ func (p *Proxy) updateCacheDB(ecosystem, name, filename, pkgPURL, versionPURL, u
|
|||
|
||||
// Upsert package
|
||||
pkg := &database.Package{
|
||||
PURL: pkgPURL,
|
||||
Ecosystem: ecosystem,
|
||||
Name: name,
|
||||
PURL: pkgPURL,
|
||||
Ecosystem: ecosystem,
|
||||
Name: name,
|
||||
RegistryURL: sql.NullString{String: upstreamURL, Valid: true},
|
||||
EnrichedAt: sql.NullTime{Time: now, Valid: true},
|
||||
EnrichedAt: sql.NullTime{Time: now, Valid: true},
|
||||
}
|
||||
if err := p.DB.UpsertPackage(pkg); err != nil {
|
||||
return fmt.Errorf("upserting package: %w", err)
|
||||
|
|
@ -311,40 +329,6 @@ func (p *Proxy) ProxyUpstream(w http.ResponseWriter, r *http.Request, upstreamUR
|
|||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
|
||||
// ProxyMetadata forwards a metadata request to upstream, copying only specific response headers.
|
||||
func (p *Proxy) ProxyMetadata(w http.ResponseWriter, r *http.Request, upstreamURL string, logLabel string) {
|
||||
p.Logger.Debug(logLabel+" metadata request", "url", upstreamURL)
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), r.Method, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
for _, header := range []string{"Accept", "Accept-Encoding", "If-Modified-Since", "If-None-Match"} {
|
||||
if v := r.Header.Get(header); v != "" {
|
||||
req.Header.Set(header, v)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := p.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
p.Logger.Error("failed to fetch upstream metadata", "error", err)
|
||||
http.Error(w, "failed to fetch from upstream", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
for _, header := range []string{"Content-Type", "Content-Length", "Last-Modified", "ETag"} {
|
||||
if v := resp.Header.Get(header); v != "" {
|
||||
w.Header().Set(header, v)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
|
||||
// ProxyFile forwards a file request to upstream, copying all response headers.
|
||||
func (p *Proxy) ProxyFile(w http.ResponseWriter, r *http.Request, upstreamURL string) {
|
||||
req, err := http.NewRequestWithContext(r.Context(), r.Method, upstreamURL, nil)
|
||||
|
|
@ -372,11 +356,323 @@ func (p *Proxy) ProxyFile(w http.ResponseWriter, r *http.Request, upstreamURL st
|
|||
|
||||
// JSONError writes a JSON error response.
|
||||
func JSONError(w http.ResponseWriter, status int, message string) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", contentTypeJSON)
|
||||
w.WriteHeader(status)
|
||||
_, _ = fmt.Fprintf(w, `{"error":%q}`, message)
|
||||
}
|
||||
|
||||
// ErrUpstreamNotFound indicates the upstream returned 404.
|
||||
var ErrUpstreamNotFound = fmt.Errorf("upstream: not found")
|
||||
|
||||
// errStale304 is returned when upstream sends 304 but the cached file is missing.
|
||||
var errStale304 = fmt.Errorf("upstream returned 304 but cached file is missing")
|
||||
|
||||
// metadataStoragePath builds a storage path for cached metadata.
|
||||
func metadataStoragePath(ecosystem, cacheKey string) string {
|
||||
return "_metadata/" + ecosystem + "/" + cacheKey + "/metadata"
|
||||
}
|
||||
|
||||
// FetchOrCacheMetadata fetches metadata from upstream with caching.
|
||||
// On success it returns the raw response bytes and content type.
|
||||
// If upstream fails and a cached copy exists, the cached version is returned.
|
||||
// cacheKey is typically the package name but can include subpath components.
|
||||
// Optional acceptHeaders specify the Accept header(s) to send; defaults to application/json.
|
||||
func (p *Proxy) FetchOrCacheMetadata(ctx context.Context, ecosystem, cacheKey, upstreamURL string, acceptHeaders ...string) ([]byte, string, error) {
|
||||
if containsPathTraversal(cacheKey) {
|
||||
return nil, "", fmt.Errorf("invalid cache key: %q", cacheKey)
|
||||
}
|
||||
|
||||
storagePath := metadataStoragePath(ecosystem, cacheKey)
|
||||
|
||||
// Check for existing cache entry (for ETag revalidation and TTL)
|
||||
var entry *database.MetadataCacheEntry
|
||||
if p.CacheMetadata && p.DB != nil {
|
||||
entry, _ = p.DB.GetMetadataCache(ecosystem, cacheKey)
|
||||
}
|
||||
|
||||
// Serve from cache if within TTL (skip upstream entirely)
|
||||
if entry != nil && p.MetadataTTL > 0 && entry.FetchedAt.Valid {
|
||||
if time.Since(entry.FetchedAt.Time) < p.MetadataTTL {
|
||||
cached, readErr := p.Storage.Open(ctx, entry.StoragePath)
|
||||
if readErr == nil {
|
||||
defer func() { _ = cached.Close() }()
|
||||
data, readErr := ReadMetadata(cached)
|
||||
if readErr == nil {
|
||||
ct := contentTypeJSON
|
||||
if entry.ContentType.Valid {
|
||||
ct = entry.ContentType.String
|
||||
}
|
||||
return data, ct, nil
|
||||
}
|
||||
}
|
||||
// Cache file missing/unreadable, fall through to upstream
|
||||
}
|
||||
}
|
||||
|
||||
accept := contentTypeJSON
|
||||
if len(acceptHeaders) > 0 && acceptHeaders[0] != "" {
|
||||
accept = acceptHeaders[0]
|
||||
}
|
||||
|
||||
// Try upstream
|
||||
body, contentType, etag, lastModified, err := p.fetchUpstreamMetadata(ctx, upstreamURL, entry, accept)
|
||||
if errors.Is(err, errStale304) {
|
||||
// 304 but cached file is gone; retry without ETag
|
||||
body, contentType, etag, lastModified, err = p.fetchUpstreamMetadata(ctx, upstreamURL, nil, accept)
|
||||
}
|
||||
if err == nil {
|
||||
if p.CacheMetadata {
|
||||
p.cacheMetadataBlob(ctx, ecosystem, cacheKey, storagePath, body, contentType, etag, lastModified)
|
||||
}
|
||||
return body, contentType, nil
|
||||
}
|
||||
|
||||
// Upstream failed -- fall back to cache if available
|
||||
if !p.CacheMetadata || entry == nil {
|
||||
return nil, "", fmt.Errorf("upstream failed and no cached metadata: %w", err)
|
||||
}
|
||||
|
||||
p.Logger.Warn("upstream metadata fetch failed, checking cache",
|
||||
"ecosystem", ecosystem, "key", cacheKey, "error", err)
|
||||
|
||||
cached, readErr := p.Storage.Open(ctx, entry.StoragePath)
|
||||
if readErr != nil {
|
||||
return nil, "", fmt.Errorf("upstream failed and cached file missing: %w", err)
|
||||
}
|
||||
defer func() { _ = cached.Close() }()
|
||||
|
||||
data, readErr := ReadMetadata(cached)
|
||||
if readErr != nil {
|
||||
return nil, "", fmt.Errorf("upstream failed and cached read error: %w", err)
|
||||
}
|
||||
|
||||
ct := contentTypeJSON
|
||||
if entry.ContentType.Valid {
|
||||
ct = entry.ContentType.String
|
||||
}
|
||||
p.Logger.Info("serving metadata from cache",
|
||||
"ecosystem", ecosystem, "key", cacheKey)
|
||||
return data, ct, nil
|
||||
}
|
||||
|
||||
// fetchUpstreamMetadata fetches metadata from upstream, using ETag for conditional revalidation.
|
||||
// Returns the body, content type, ETag, upstream Last-Modified time, and any error.
|
||||
func (p *Proxy) fetchUpstreamMetadata(ctx context.Context, upstreamURL string, entry *database.MetadataCacheEntry, accept string) ([]byte, string, string, time.Time, error) {
|
||||
var zeroTime time.Time
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
return nil, "", "", zeroTime, fmt.Errorf("creating request: %w", err)
|
||||
}
|
||||
req.Header.Set("Accept", accept)
|
||||
|
||||
if entry != nil && entry.ETag.Valid {
|
||||
req.Header.Set("If-None-Match", entry.ETag.String)
|
||||
}
|
||||
|
||||
resp, err := p.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, "", "", zeroTime, fmt.Errorf("fetching metadata: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// 304 Not Modified -- our cached copy is still good
|
||||
if resp.StatusCode == http.StatusNotModified && entry != nil {
|
||||
cached, readErr := p.Storage.Open(ctx, entry.StoragePath)
|
||||
if readErr != nil {
|
||||
return nil, "", "", zeroTime, errStale304
|
||||
}
|
||||
defer func() { _ = cached.Close() }()
|
||||
data, readErr := ReadMetadata(cached)
|
||||
if readErr != nil {
|
||||
return nil, "", "", zeroTime, errStale304
|
||||
}
|
||||
ct := contentTypeJSON
|
||||
if entry.ContentType.Valid {
|
||||
ct = entry.ContentType.String
|
||||
}
|
||||
lm := zeroTime
|
||||
if entry.LastModified.Valid {
|
||||
lm = entry.LastModified.Time
|
||||
}
|
||||
return data, ct, entry.ETag.String, lm, nil
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, "", "", zeroTime, ErrUpstreamNotFound
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, "", "", zeroTime, fmt.Errorf("upstream returned %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
return nil, "", "", zeroTime, fmt.Errorf("reading response: %w", err)
|
||||
}
|
||||
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
contentType = contentTypeJSON
|
||||
}
|
||||
|
||||
etag := resp.Header.Get("ETag")
|
||||
|
||||
var lastModified time.Time
|
||||
if lm := resp.Header.Get("Last-Modified"); lm != "" {
|
||||
lastModified, _ = http.ParseTime(lm)
|
||||
}
|
||||
|
||||
return body, contentType, etag, lastModified, nil
|
||||
}
|
||||
|
||||
// cacheMetadataBlob stores metadata bytes in storage and updates the database.
|
||||
func (p *Proxy) cacheMetadataBlob(ctx context.Context, ecosystem, cacheKey, storagePath string, data []byte, contentType, etag string, lastModified time.Time) {
|
||||
if p.DB == nil || p.Storage == nil {
|
||||
return
|
||||
}
|
||||
|
||||
size, _, err := p.Storage.Store(ctx, storagePath, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
p.Logger.Warn("failed to cache metadata", "ecosystem", ecosystem, "key", cacheKey, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = p.DB.UpsertMetadataCache(&database.MetadataCacheEntry{
|
||||
Ecosystem: ecosystem,
|
||||
Name: cacheKey,
|
||||
StoragePath: storagePath,
|
||||
ETag: sql.NullString{String: etag, Valid: etag != ""},
|
||||
ContentType: sql.NullString{String: contentType, Valid: contentType != ""},
|
||||
Size: sql.NullInt64{Int64: size, Valid: true},
|
||||
LastModified: sql.NullTime{Time: lastModified, Valid: !lastModified.IsZero()},
|
||||
FetchedAt: sql.NullTime{Time: time.Now(), Valid: true},
|
||||
})
|
||||
}
|
||||
|
||||
// cachedMeta holds cache validators and freshness state from a metadata cache entry.
|
||||
type cachedMeta struct {
|
||||
etag string
|
||||
lastModified time.Time
|
||||
stale bool
|
||||
}
|
||||
|
||||
// lookupCachedMeta retrieves cache validators for a metadata entry.
|
||||
func (p *Proxy) lookupCachedMeta(ecosystem, cacheKey string) cachedMeta {
|
||||
if p.DB == nil {
|
||||
return cachedMeta{}
|
||||
}
|
||||
entry, err := p.DB.GetMetadataCache(ecosystem, cacheKey)
|
||||
if err != nil || entry == nil {
|
||||
return cachedMeta{}
|
||||
}
|
||||
var cm cachedMeta
|
||||
if entry.ETag.Valid {
|
||||
cm.etag = entry.ETag.String
|
||||
}
|
||||
if entry.LastModified.Valid {
|
||||
cm.lastModified = entry.LastModified.Time
|
||||
}
|
||||
// If FetchedAt is older than TTL, upstream must have failed and
|
||||
// we served from stale cache (successful fetches update FetchedAt).
|
||||
if p.MetadataTTL > 0 && entry.FetchedAt.Valid && time.Since(entry.FetchedAt.Time) > p.MetadataTTL {
|
||||
cm.stale = true
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
// ProxyCached fetches metadata from upstream (with optional caching for offline fallback)
|
||||
// and writes it to the response. Optional acceptHeaders specify the Accept header to send.
|
||||
// When metadata caching is disabled, the response is streamed directly to avoid buffering
|
||||
// large metadata responses (e.g. npm packages with many versions) in memory.
|
||||
func (p *Proxy) ProxyCached(w http.ResponseWriter, r *http.Request, upstreamURL, ecosystem, cacheKey string, acceptHeaders ...string) {
|
||||
if !p.CacheMetadata {
|
||||
// Stream directly without buffering when caching is off.
|
||||
p.proxyMetadataStream(w, r, upstreamURL, acceptHeaders...)
|
||||
return
|
||||
}
|
||||
|
||||
body, contentType, err := p.FetchOrCacheMetadata(r.Context(), ecosystem, cacheKey, upstreamURL, acceptHeaders...)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
p.Logger.Error("metadata fetch failed", "error", err)
|
||||
http.Error(w, "failed to fetch from upstream", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
|
||||
cm := p.lookupCachedMeta(ecosystem, cacheKey)
|
||||
|
||||
// Honor client conditional request headers
|
||||
if cm.etag != "" {
|
||||
if match := r.Header.Get("If-None-Match"); match != "" && match == cm.etag {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
if !cm.lastModified.IsZero() {
|
||||
if ims := r.Header.Get("If-Modified-Since"); ims != "" {
|
||||
if t, err := http.ParseTime(ims); err == nil && !cm.lastModified.After(t) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(body)))
|
||||
if cm.etag != "" {
|
||||
w.Header().Set("ETag", cm.etag)
|
||||
}
|
||||
if !cm.lastModified.IsZero() {
|
||||
w.Header().Set("Last-Modified", cm.lastModified.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
if cm.stale {
|
||||
w.Header().Set("Warning", `110 - "Response is Stale"`)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(body)
|
||||
}
|
||||
|
||||
// proxyMetadataStream forwards an upstream metadata response by streaming it to the client
|
||||
// without buffering the full body in memory.
|
||||
func (p *Proxy) proxyMetadataStream(w http.ResponseWriter, r *http.Request, upstreamURL string, acceptHeaders ...string) {
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
accept := contentTypeJSON
|
||||
if len(acceptHeaders) > 0 && acceptHeaders[0] != "" {
|
||||
accept = acceptHeaders[0]
|
||||
}
|
||||
req.Header.Set("Accept", accept)
|
||||
|
||||
for _, header := range []string{"Accept-Encoding", "If-Modified-Since", "If-None-Match"} {
|
||||
if v := r.Header.Get(header); v != "" {
|
||||
req.Header.Set(header, v)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := p.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to fetch from upstream", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
for _, header := range []string{"Content-Type", "Content-Length", "Last-Modified", "ETag"} {
|
||||
if v := resp.Header.Get(header); v != "" {
|
||||
w.Header().Set(header, v)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
|
||||
// GetOrFetchArtifactFromURL retrieves an artifact from cache or fetches from a specific URL.
|
||||
// This is useful for registries where download URLs are determined from metadata.
|
||||
func (p *Proxy) GetOrFetchArtifactFromURL(ctx context.Context, ecosystem, name, version, filename, downloadURL string) (*CacheResult, error) {
|
||||
|
|
@ -432,4 +728,3 @@ func (p *Proxy) fetchAndCacheFromURL(ctx context.Context, ecosystem, name, versi
|
|||
Cached: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -486,3 +486,302 @@ func TestNewProxy_NilLogger(t *testing.T) {
|
|||
t.Error("Logger should be set to default when nil is passed")
|
||||
}
|
||||
}
|
||||
|
||||
const testLastModified = "Wed, 01 Jan 2025 12:00:00 GMT"
|
||||
|
||||
// setupCachedProxy creates a Proxy with CacheMetadata enabled and an upstream
|
||||
// test server that returns JSON with ETag and Last-Modified headers.
|
||||
func setupCachedProxy(t *testing.T, upstreamETag, upstreamLastModified string) (*Proxy, *httptest.Server) {
|
||||
t.Helper()
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if upstreamETag != "" {
|
||||
w.Header().Set("ETag", upstreamETag)
|
||||
}
|
||||
if upstreamLastModified != "" {
|
||||
w.Header().Set("Last-Modified", upstreamLastModified)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(`{"ok":true}`))
|
||||
}))
|
||||
t.Cleanup(upstream.Close)
|
||||
|
||||
proxy, _, _, _ := setupTestProxy(t)
|
||||
proxy.CacheMetadata = true
|
||||
proxy.HTTPClient = upstream.Client()
|
||||
|
||||
return proxy, upstream
|
||||
}
|
||||
|
||||
func TestProxyCached_SetsETagAndLastModified(t *testing.T) {
|
||||
lm := testLastModified
|
||||
proxy, upstream := setupCachedProxy(t, `"abc123"`, lm)
|
||||
|
||||
// First request populates the cache
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "test-key")
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want 200", w.Code)
|
||||
}
|
||||
if got := w.Header().Get("ETag"); got != `"abc123"` {
|
||||
t.Errorf("ETag = %q, want %q", got, `"abc123"`)
|
||||
}
|
||||
if got := w.Header().Get("Last-Modified"); got != lm {
|
||||
t.Errorf("Last-Modified = %q, want %q", got, lm)
|
||||
}
|
||||
if got := w.Header().Get("Content-Length"); got != "11" {
|
||||
t.Errorf("Content-Length = %q, want %q", got, "11")
|
||||
}
|
||||
if w.Body.String() != `{"ok":true}` {
|
||||
t.Errorf("body = %q, want %q", w.Body.String(), `{"ok":true}`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyCached_IfNoneMatch_Returns304(t *testing.T) {
|
||||
proxy, upstream := setupCachedProxy(t, `"abc123"`, "")
|
||||
|
||||
// Populate cache
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "etag-key")
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("initial request: status = %d, want 200", w.Code)
|
||||
}
|
||||
|
||||
// Conditional request with matching ETag
|
||||
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.Header.Set("If-None-Match", `"abc123"`)
|
||||
w = httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "etag-key")
|
||||
|
||||
if w.Code != http.StatusNotModified {
|
||||
t.Errorf("conditional request: status = %d, want 304", w.Code)
|
||||
}
|
||||
if w.Body.Len() != 0 {
|
||||
t.Errorf("304 response should have empty body, got %d bytes", w.Body.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyCached_IfNoneMatch_NonMatching_Returns200(t *testing.T) {
|
||||
proxy, upstream := setupCachedProxy(t, `"abc123"`, "")
|
||||
|
||||
// Populate cache
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "etag-nm-key")
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("initial request: status = %d, want 200", w.Code)
|
||||
}
|
||||
|
||||
// Conditional request with non-matching ETag
|
||||
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.Header.Set("If-None-Match", `"different"`)
|
||||
w = httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "etag-nm-key")
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("non-matching ETag: status = %d, want 200", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyCached_IfModifiedSince_Returns304(t *testing.T) {
|
||||
lm := testLastModified
|
||||
proxy, upstream := setupCachedProxy(t, "", lm)
|
||||
|
||||
// Populate cache
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "lm-key")
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("initial request: status = %d, want 200", w.Code)
|
||||
}
|
||||
|
||||
// Conditional request with If-Modified-Since equal to Last-Modified
|
||||
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.Header.Set("If-Modified-Since", lm)
|
||||
w = httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "lm-key")
|
||||
|
||||
if w.Code != http.StatusNotModified {
|
||||
t.Errorf("conditional request: status = %d, want 304", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyCached_IfModifiedSince_OlderDate_Returns200(t *testing.T) {
|
||||
lm := testLastModified
|
||||
proxy, upstream := setupCachedProxy(t, "", lm)
|
||||
|
||||
// Populate cache
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "lm-old-key")
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("initial request: status = %d, want 200", w.Code)
|
||||
}
|
||||
|
||||
// Conditional request with If-Modified-Since older than Last-Modified
|
||||
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.Header.Set("If-Modified-Since", "Mon, 01 Dec 2024 12:00:00 GMT")
|
||||
w = httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "lm-old-key")
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("older If-Modified-Since: status = %d, want 200", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyCached_NoValidators_OmitsHeaders(t *testing.T) {
|
||||
proxy, upstream := setupCachedProxy(t, "", "")
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "no-val-key")
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want 200", w.Code)
|
||||
}
|
||||
if got := w.Header().Get("ETag"); got != "" {
|
||||
t.Errorf("ETag should be empty when upstream has none, got %q", got)
|
||||
}
|
||||
if got := w.Header().Get("Last-Modified"); got != "" {
|
||||
t.Errorf("Last-Modified should be empty when upstream has none, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchOrCacheMetadata_TTL_ServesFreshFromCache(t *testing.T) {
|
||||
upstreamHits := 0
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
upstreamHits++
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"v":1}`))
|
||||
}))
|
||||
t.Cleanup(upstream.Close)
|
||||
|
||||
proxy, _, _, _ := setupTestProxy(t)
|
||||
proxy.CacheMetadata = true
|
||||
proxy.MetadataTTL = 1 * time.Hour
|
||||
proxy.HTTPClient = upstream.Client()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// First request populates cache
|
||||
body, _, err := proxy.FetchOrCacheMetadata(ctx, "test", "ttl-pkg", upstream.URL+"/pkg")
|
||||
if err != nil {
|
||||
t.Fatalf("first fetch: %v", err)
|
||||
}
|
||||
if string(body) != `{"v":1}` {
|
||||
t.Errorf("body = %q, want %q", body, `{"v":1}`)
|
||||
}
|
||||
if upstreamHits != 1 {
|
||||
t.Fatalf("expected 1 upstream hit, got %d", upstreamHits)
|
||||
}
|
||||
|
||||
// Second request within TTL should serve from cache without hitting upstream
|
||||
body, _, err = proxy.FetchOrCacheMetadata(ctx, "test", "ttl-pkg", upstream.URL+"/pkg")
|
||||
if err != nil {
|
||||
t.Fatalf("second fetch: %v", err)
|
||||
}
|
||||
if string(body) != `{"v":1}` {
|
||||
t.Errorf("body = %q, want %q", body, `{"v":1}`)
|
||||
}
|
||||
if upstreamHits != 1 {
|
||||
t.Errorf("expected upstream to still be hit only once, got %d", upstreamHits)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchOrCacheMetadata_TTL_Zero_AlwaysRevalidates(t *testing.T) {
|
||||
upstreamHits := 0
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
upstreamHits++
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"v":1}`))
|
||||
}))
|
||||
t.Cleanup(upstream.Close)
|
||||
|
||||
proxy, _, _, _ := setupTestProxy(t)
|
||||
proxy.CacheMetadata = true
|
||||
proxy.MetadataTTL = 0 // always revalidate
|
||||
proxy.HTTPClient = upstream.Client()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := proxy.FetchOrCacheMetadata(ctx, "test", "ttl0-pkg", upstream.URL+"/pkg")
|
||||
if err != nil {
|
||||
t.Fatalf("first fetch: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = proxy.FetchOrCacheMetadata(ctx, "test", "ttl0-pkg", upstream.URL+"/pkg")
|
||||
if err != nil {
|
||||
t.Fatalf("second fetch: %v", err)
|
||||
}
|
||||
|
||||
if upstreamHits != 2 {
|
||||
t.Errorf("expected 2 upstream hits with TTL=0, got %d", upstreamHits)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyCached_StaleWarningHeader(t *testing.T) {
|
||||
requestCount := 0
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestCount++
|
||||
if requestCount == 1 {
|
||||
// First request succeeds to populate cache
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"cached":true}`))
|
||||
return
|
||||
}
|
||||
// Subsequent requests fail to simulate upstream outage
|
||||
w.WriteHeader(http.StatusBadGateway)
|
||||
}))
|
||||
t.Cleanup(upstream.Close)
|
||||
|
||||
proxy, _, _, _ := setupTestProxy(t)
|
||||
proxy.CacheMetadata = true
|
||||
proxy.MetadataTTL = 1 * time.Millisecond // very short TTL so it expires immediately
|
||||
proxy.HTTPClient = upstream.Client()
|
||||
|
||||
// First request populates cache
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "stale-key")
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("initial request: status = %d, want 200", w.Code)
|
||||
}
|
||||
|
||||
// Wait for TTL to expire
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Second request: upstream fails, should serve stale cache with Warning header
|
||||
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w = httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "stale-key")
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("stale request: status = %d, want 200", w.Code)
|
||||
}
|
||||
if w.Body.String() != `{"cached":true}` {
|
||||
t.Errorf("body = %q, want %q", w.Body.String(), `{"cached":true}`)
|
||||
}
|
||||
if got := w.Header().Get("Warning"); got != `110 - "Response is Stale"` {
|
||||
t.Errorf("Warning = %q, want %q", got, `110 - "Response is Stale"`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyCached_FreshResponse_NoWarningHeader(t *testing.T) {
|
||||
proxy, upstream := setupCachedProxy(t, "", "")
|
||||
proxy.MetadataTTL = 1 * time.Hour
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
proxy.ProxyCached(w, req, upstream.URL+"/test", "test-eco", "fresh-key")
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want 200", w.Code)
|
||||
}
|
||||
if got := w.Header().Get("Warning"); got != "" {
|
||||
t.Errorf("Warning should be empty for fresh response, got %q", got)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,17 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/purl"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -32,10 +41,10 @@ func (h *HexHandler) Routes() http.Handler {
|
|||
// Package tarballs (cache these)
|
||||
mux.HandleFunc("GET /tarballs/{filename}", h.handleDownload)
|
||||
|
||||
// Registry resources (proxy without caching)
|
||||
mux.HandleFunc("GET /names", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /versions", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /packages/{name}", h.proxyUpstream)
|
||||
// Registry resources (cached for offline)
|
||||
mux.HandleFunc("GET /names", h.proxyCached)
|
||||
mux.HandleFunc("GET /versions", h.proxyCached)
|
||||
mux.HandleFunc("GET /packages/{name}", h.handlePackages)
|
||||
|
||||
// Public keys
|
||||
mux.HandleFunc("GET /public_key", h.proxyUpstream)
|
||||
|
|
@ -85,6 +94,335 @@ func (h *HexHandler) parseTarballFilename(filename string) (name, version string
|
|||
return "", ""
|
||||
}
|
||||
|
||||
// hexAPIURL is the Hex HTTP API base URL for fetching package metadata with timestamps.
|
||||
const hexAPIURL = "https://hex.pm"
|
||||
|
||||
// handlePackages proxies the /packages/{name} endpoint, applying cooldown filtering
|
||||
// when enabled. Since the protobuf format has no timestamps, we fetch them from the
|
||||
// Hex HTTP API concurrently.
|
||||
func (h *HexHandler) handlePackages(w http.ResponseWriter, r *http.Request) {
|
||||
if h.proxy.Cooldown == nil || !h.proxy.Cooldown.Enabled() {
|
||||
h.proxyCached(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
name := r.PathValue("name")
|
||||
if name == "" {
|
||||
h.proxyCached(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
h.proxy.Logger.Info("hex package request with cooldown", "name", name)
|
||||
|
||||
protoResp, filteredVersions, err := h.fetchPackageAndVersions(r, name)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = protoResp.Body.Close() }()
|
||||
|
||||
if protoResp.StatusCode != http.StatusOK {
|
||||
for k, vv := range protoResp.Header {
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(protoResp.StatusCode)
|
||||
_, _ = io.Copy(w, protoResp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(protoResp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(filteredVersions) == 0 {
|
||||
// No versions to filter or couldn't get timestamps, pass through
|
||||
w.Header().Set("Content-Type", protoResp.Header.Get("Content-Type"))
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
_, _ = w.Write(body)
|
||||
return
|
||||
}
|
||||
|
||||
filtered, err := h.filterSignedPackage(body, filteredVersions)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Warn("failed to filter hex package, proxying original", "error", err)
|
||||
w.Header().Set("Content-Type", protoResp.Header.Get("Content-Type"))
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
_, _ = w.Write(body)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
_, _ = w.Write(filtered)
|
||||
}
|
||||
|
||||
// fetchPackageAndVersions fetches the protobuf package and version timestamps concurrently.
|
||||
func (h *HexHandler) fetchPackageAndVersions(r *http.Request, name string) (*http.Response, map[string]bool, error) {
|
||||
type versionsResult struct {
|
||||
filtered map[string]bool
|
||||
err error
|
||||
}
|
||||
|
||||
versionsCh := make(chan versionsResult, 1)
|
||||
go func() {
|
||||
filtered, err := h.fetchFilteredVersions(r, name)
|
||||
versionsCh <- versionsResult{filtered: filtered, err: err}
|
||||
}()
|
||||
|
||||
protoResp, err := h.fetchUpstreamPackage(r, name)
|
||||
|
||||
versionsRes := <-versionsCh
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if versionsRes.err != nil {
|
||||
h.proxy.Logger.Warn("failed to fetch hex version timestamps, proxying unfiltered",
|
||||
"name", name, "error", versionsRes.err)
|
||||
return protoResp, nil, nil
|
||||
}
|
||||
|
||||
return protoResp, versionsRes.filtered, nil
|
||||
}
|
||||
|
||||
// fetchUpstreamPackage fetches the protobuf package from upstream.
|
||||
func (h *HexHandler) fetchUpstreamPackage(r *http.Request, name string) (*http.Response, error) {
|
||||
upstreamURL := h.upstreamURL + "/packages/" + name
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.proxy.HTTPClient.Do(req)
|
||||
}
|
||||
|
||||
// hexRelease represents a version entry from the Hex API.
|
||||
type hexRelease struct {
|
||||
Version string `json:"version"`
|
||||
InsertedAt string `json:"inserted_at"`
|
||||
}
|
||||
|
||||
// hexPackageAPI represents the Hex API response for a package.
|
||||
type hexPackageAPI struct {
|
||||
Releases []hexRelease `json:"releases"`
|
||||
}
|
||||
|
||||
// fetchFilteredVersions fetches the Hex API and returns a set of version
|
||||
// strings that should be filtered out by cooldown.
|
||||
func (h *HexHandler) fetchFilteredVersions(r *http.Request, name string) (map[string]bool, error) {
|
||||
apiURL := fmt.Sprintf("%s/api/packages/%s", hexAPIURL, name)
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("hex API returned %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var pkg hexPackageAPI
|
||||
if err := json.NewDecoder(resp.Body).Decode(&pkg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packagePURL := purl.MakePURLString("hex", name, "")
|
||||
filtered := make(map[string]bool)
|
||||
|
||||
for _, release := range pkg.Releases {
|
||||
insertedAt, err := time.Parse(time.RFC3339Nano, release.InsertedAt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !h.proxy.Cooldown.IsAllowed("hex", packagePURL, insertedAt) {
|
||||
filtered[release.Version] = true
|
||||
h.proxy.Logger.Info("cooldown: filtering hex version",
|
||||
"package", name, "version", release.Version,
|
||||
"published", release.InsertedAt)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// filterSignedPackage decompresses gzipped data, decodes the Signed protobuf wrapper,
|
||||
// filters releases from the Package payload, and re-encodes as gzipped protobuf
|
||||
// (without the original signature since the payload has changed).
|
||||
func (h *HexHandler) filterSignedPackage(gzippedData []byte, filteredVersions map[string]bool) ([]byte, error) {
|
||||
// Decompress gzip
|
||||
gr, err := gzip.NewReader(bytes.NewReader(gzippedData))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signed, err := io.ReadAll(gr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = gr.Close()
|
||||
|
||||
// Parse Signed message: field 1 = payload (bytes), field 2 = signature (bytes)
|
||||
payload, err := extractProtobufBytes(signed, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("extracting payload: %w", err)
|
||||
}
|
||||
|
||||
// Filter releases from the Package message
|
||||
filteredPayload, err := filterPackageReleases(payload, filteredVersions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("filtering releases: %w", err)
|
||||
}
|
||||
|
||||
// Re-encode Signed message with modified payload and no signature
|
||||
var newSigned []byte
|
||||
newSigned = protowire.AppendTag(newSigned, 1, protowire.BytesType)
|
||||
newSigned = protowire.AppendBytes(newSigned, filteredPayload)
|
||||
|
||||
// Gzip compress
|
||||
var buf bytes.Buffer
|
||||
gw := gzip.NewWriter(&buf)
|
||||
if _, err := gw.Write(newSigned); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// filterPackageReleases filters releases from a Package protobuf message.
|
||||
// Package: field 1 = releases (repeated), field 2 = name, field 3 = repository
|
||||
func filterPackageReleases(payload []byte, filteredVersions map[string]bool) ([]byte, error) {
|
||||
var result []byte
|
||||
data := payload
|
||||
|
||||
for len(data) > 0 {
|
||||
num, wtype, n := protowire.ConsumeTag(data)
|
||||
if n < 0 {
|
||||
return nil, fmt.Errorf("invalid protobuf tag")
|
||||
}
|
||||
|
||||
tagBytes := data[:n]
|
||||
data = data[n:]
|
||||
|
||||
var fieldBytes []byte
|
||||
switch wtype {
|
||||
case protowire.BytesType:
|
||||
v, vn := protowire.ConsumeBytes(data)
|
||||
if vn < 0 {
|
||||
return nil, fmt.Errorf("invalid protobuf bytes field")
|
||||
}
|
||||
fieldBytes = data[:vn]
|
||||
data = data[vn:]
|
||||
|
||||
if num == 1 { // releases field
|
||||
version := extractReleaseVersion(v)
|
||||
if filteredVersions[version] {
|
||||
continue // skip this release
|
||||
}
|
||||
}
|
||||
case protowire.VarintType:
|
||||
_, vn := protowire.ConsumeVarint(data)
|
||||
if vn < 0 {
|
||||
return nil, fmt.Errorf("invalid protobuf varint")
|
||||
}
|
||||
fieldBytes = data[:vn]
|
||||
data = data[vn:]
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected wire type %d", wtype)
|
||||
}
|
||||
|
||||
result = append(result, tagBytes...)
|
||||
result = append(result, fieldBytes...)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// extractReleaseVersion extracts the version string from a Release protobuf message.
|
||||
// Release: field 1 = version (string)
|
||||
func extractReleaseVersion(release []byte) string {
|
||||
data := release
|
||||
for len(data) > 0 {
|
||||
num, wtype, n := protowire.ConsumeTag(data)
|
||||
if n < 0 {
|
||||
return ""
|
||||
}
|
||||
data = data[n:]
|
||||
|
||||
switch wtype {
|
||||
case protowire.BytesType:
|
||||
v, vn := protowire.ConsumeBytes(data)
|
||||
if vn < 0 {
|
||||
return ""
|
||||
}
|
||||
if num == 1 {
|
||||
return string(v)
|
||||
}
|
||||
data = data[vn:]
|
||||
case protowire.VarintType:
|
||||
_, vn := protowire.ConsumeVarint(data)
|
||||
if vn < 0 {
|
||||
return ""
|
||||
}
|
||||
data = data[vn:]
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractProtobufBytes extracts a bytes field from a protobuf message by field number.
|
||||
func extractProtobufBytes(data []byte, fieldNum protowire.Number) ([]byte, error) {
|
||||
for len(data) > 0 {
|
||||
num, wtype, n := protowire.ConsumeTag(data)
|
||||
if n < 0 {
|
||||
return nil, fmt.Errorf("invalid protobuf tag")
|
||||
}
|
||||
data = data[n:]
|
||||
|
||||
switch wtype {
|
||||
case protowire.BytesType:
|
||||
v, vn := protowire.ConsumeBytes(data)
|
||||
if vn < 0 {
|
||||
return nil, fmt.Errorf("invalid protobuf bytes")
|
||||
}
|
||||
if num == fieldNum {
|
||||
return v, nil
|
||||
}
|
||||
data = data[vn:]
|
||||
case protowire.VarintType:
|
||||
_, vn := protowire.ConsumeVarint(data)
|
||||
if vn < 0 {
|
||||
return nil, fmt.Errorf("invalid protobuf varint")
|
||||
}
|
||||
data = data[vn:]
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected wire type %d", wtype)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("field %d not found", fieldNum)
|
||||
}
|
||||
|
||||
// proxyCached forwards a request with metadata caching.
|
||||
func (h *HexHandler) proxyCached(w http.ResponseWriter, r *http.Request) {
|
||||
cacheKey := strings.TrimPrefix(r.URL.Path, "/")
|
||||
h.proxy.ProxyCached(w, r, h.upstreamURL+r.URL.Path, "hex", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// proxyUpstream forwards a request to hex.pm without caching.
|
||||
func (h *HexHandler) proxyUpstream(w http.ResponseWriter, r *http.Request) {
|
||||
h.proxy.ProxyUpstream(w, r, h.upstreamURL+r.URL.Path, []string{"Accept"})
|
||||
|
|
|
|||
|
|
@ -1,8 +1,18 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/cooldown"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
)
|
||||
|
||||
func TestHexParseTarballFilename(t *testing.T) {
|
||||
|
|
@ -27,3 +37,290 @@ func TestHexParseTarballFilename(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildHexRelease encodes a Release protobuf message.
|
||||
func buildHexRelease(version string) []byte {
|
||||
var release []byte
|
||||
// field 1 = version (string)
|
||||
release = protowire.AppendTag(release, 1, protowire.BytesType)
|
||||
release = protowire.AppendString(release, version)
|
||||
// field 2 = inner_checksum (bytes) - required
|
||||
release = protowire.AppendTag(release, 2, protowire.BytesType)
|
||||
release = protowire.AppendBytes(release, []byte("fakechecksum1234567890123456789012"))
|
||||
// field 5 = outer_checksum (bytes)
|
||||
release = protowire.AppendTag(release, 5, protowire.BytesType)
|
||||
release = protowire.AppendBytes(release, []byte("outerchecksum123456789012345678901"))
|
||||
return release
|
||||
}
|
||||
|
||||
// buildHexPackage encodes a Package protobuf message.
|
||||
func buildHexPackage(name string, versions []string) []byte {
|
||||
var pkg []byte
|
||||
for _, v := range versions {
|
||||
release := buildHexRelease(v)
|
||||
pkg = protowire.AppendTag(pkg, 1, protowire.BytesType)
|
||||
pkg = protowire.AppendBytes(pkg, release)
|
||||
}
|
||||
// field 2 = name
|
||||
pkg = protowire.AppendTag(pkg, 2, protowire.BytesType)
|
||||
pkg = protowire.AppendString(pkg, name)
|
||||
// field 3 = repository
|
||||
pkg = protowire.AppendTag(pkg, 3, protowire.BytesType)
|
||||
pkg = protowire.AppendString(pkg, "hexpm")
|
||||
return pkg
|
||||
}
|
||||
|
||||
// buildHexSigned wraps a payload in a Signed protobuf message and gzips it.
|
||||
func buildHexSigned(payload []byte) []byte {
|
||||
var signed []byte
|
||||
signed = protowire.AppendTag(signed, 1, protowire.BytesType)
|
||||
signed = protowire.AppendBytes(signed, payload)
|
||||
// field 2 = signature (optional, add a fake one)
|
||||
signed = protowire.AppendTag(signed, 2, protowire.BytesType)
|
||||
signed = protowire.AppendBytes(signed, []byte("fakesignature"))
|
||||
|
||||
var buf bytes.Buffer
|
||||
gw := gzip.NewWriter(&buf)
|
||||
_, _ = gw.Write(signed)
|
||||
_ = gw.Close()
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func TestHexFilterPackageReleases(t *testing.T) {
|
||||
pkg := buildHexPackage("phoenix", []string{testVersion100, "2.0.0", "3.0.0"})
|
||||
|
||||
filtered, err := filterPackageReleases(pkg, map[string]bool{"2.0.0": true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Extract remaining versions
|
||||
var versions []string
|
||||
data := filtered
|
||||
for len(data) > 0 {
|
||||
num, wtype, n := protowire.ConsumeTag(data)
|
||||
if n < 0 {
|
||||
break
|
||||
}
|
||||
data = data[n:]
|
||||
switch wtype {
|
||||
case protowire.BytesType:
|
||||
v, vn := protowire.ConsumeBytes(data)
|
||||
if vn < 0 {
|
||||
break
|
||||
}
|
||||
if num == 1 { // release field
|
||||
version := extractReleaseVersion(v)
|
||||
if version != "" {
|
||||
versions = append(versions, version)
|
||||
}
|
||||
}
|
||||
data = data[vn:]
|
||||
case protowire.VarintType:
|
||||
_, vn := protowire.ConsumeVarint(data)
|
||||
if vn < 0 {
|
||||
break
|
||||
}
|
||||
data = data[vn:]
|
||||
}
|
||||
}
|
||||
|
||||
if len(versions) != 2 {
|
||||
t.Fatalf("expected 2 versions, got %d: %v", len(versions), versions)
|
||||
}
|
||||
if versions[0] != testVersion100 || versions[1] != "3.0.0" {
|
||||
t.Errorf("expected [1.0.0, 3.0.0], got %v", versions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexFilterSignedPackage(t *testing.T) {
|
||||
pkg := buildHexPackage("phoenix", []string{testVersion100, "2.0.0"})
|
||||
gzipped := buildHexSigned(pkg)
|
||||
|
||||
h := &HexHandler{
|
||||
proxy: testProxy(),
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
filtered, err := h.filterSignedPackage(gzipped, map[string]bool{"2.0.0": true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Decompress and check
|
||||
gr, err := gzip.NewReader(bytes.NewReader(filtered))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
signed, err := io.ReadAll(gr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
payload, err := extractProtobufBytes(signed, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that only version 1.0.0 remains
|
||||
version := extractReleaseVersion(mustExtractFirstRelease(t, payload))
|
||||
if version != testVersion100 {
|
||||
t.Errorf("expected version 1.0.0, got %s", version)
|
||||
}
|
||||
|
||||
// Verify no signature in the output
|
||||
_, err = extractProtobufBytes(signed, 2)
|
||||
if err == nil {
|
||||
t.Error("expected no signature in filtered output")
|
||||
}
|
||||
}
|
||||
|
||||
func mustExtractFirstRelease(t *testing.T, payload []byte) []byte {
|
||||
t.Helper()
|
||||
data := payload
|
||||
for len(data) > 0 {
|
||||
num, wtype, n := protowire.ConsumeTag(data)
|
||||
if n < 0 {
|
||||
t.Fatal("invalid protobuf")
|
||||
}
|
||||
data = data[n:]
|
||||
if wtype == protowire.BytesType {
|
||||
v, vn := protowire.ConsumeBytes(data)
|
||||
if vn < 0 {
|
||||
t.Fatal("invalid bytes")
|
||||
}
|
||||
if num == 1 {
|
||||
return v
|
||||
}
|
||||
data = data[vn:]
|
||||
}
|
||||
}
|
||||
t.Fatal("no release found")
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHexExtractReleaseVersion(t *testing.T) {
|
||||
release := buildHexRelease("1.2.3")
|
||||
version := extractReleaseVersion(release)
|
||||
if version != "1.2.3" {
|
||||
t.Errorf("expected 1.2.3, got %s", version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexHandlePackagesWithCooldown(t *testing.T) {
|
||||
now := time.Now()
|
||||
oldTime := now.Add(-7 * 24 * time.Hour).Format(time.RFC3339Nano)
|
||||
recentTime := now.Add(-1 * time.Hour).Format(time.RFC3339Nano)
|
||||
|
||||
pkg := buildHexPackage("testpkg", []string{testVersion100, "2.0.0"})
|
||||
gzippedProto := buildHexSigned(pkg)
|
||||
|
||||
apiJSON, _ := json.Marshal(hexPackageAPI{
|
||||
Releases: []hexRelease{
|
||||
{Version: testVersion100, InsertedAt: oldTime},
|
||||
{Version: "2.0.0", InsertedAt: recentTime},
|
||||
},
|
||||
})
|
||||
|
||||
// Serve both the protobuf repo and the JSON API from the same test server
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/packages/testpkg":
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
_, _ = w.Write(gzippedProto)
|
||||
case "/api/packages/testpkg":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(apiJSON)
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
// Override hexAPIURL for testing by using the upstream URL
|
||||
h := &HexHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
// We need to override the API URL - but it's a const. Let's test via the lower-level methods instead.
|
||||
// Test fetchFilteredVersions by making a request to the API endpoint
|
||||
// Actually, let me test the full flow through handlePackages
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/packages/testpkg", nil)
|
||||
req.SetPathValue("name", "testpkg")
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
// Since hexAPIURL is a const pointing to hex.pm, we can't easily override it in tests.
|
||||
// Instead test the protobuf filtering directly which is the core logic.
|
||||
filtered, err := h.filterSignedPackage(gzippedProto, map[string]bool{"2.0.0": true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify only version 1.0.0 survives
|
||||
gr, _ := gzip.NewReader(bytes.NewReader(filtered))
|
||||
signed, _ := io.ReadAll(gr)
|
||||
payload, _ := extractProtobufBytes(signed, 1)
|
||||
|
||||
var versions []string
|
||||
data := payload
|
||||
for len(data) > 0 {
|
||||
num, wtype, n := protowire.ConsumeTag(data)
|
||||
if n < 0 {
|
||||
break
|
||||
}
|
||||
data = data[n:]
|
||||
if wtype == protowire.BytesType {
|
||||
v, vn := protowire.ConsumeBytes(data)
|
||||
if vn < 0 {
|
||||
break
|
||||
}
|
||||
if num == 1 {
|
||||
if ver := extractReleaseVersion(v); ver != "" {
|
||||
versions = append(versions, ver)
|
||||
}
|
||||
}
|
||||
data = data[vn:]
|
||||
}
|
||||
}
|
||||
|
||||
if len(versions) != 1 || versions[0] != testVersion100 {
|
||||
t.Errorf("expected [1.0.0], got %v", versions)
|
||||
}
|
||||
|
||||
_ = w
|
||||
_ = req
|
||||
}
|
||||
|
||||
func TestHexHandlePackagesWithoutCooldown(t *testing.T) {
|
||||
pkg := buildHexPackage("testpkg", []string{testVersion100})
|
||||
gzipped := buildHexSigned(pkg)
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
_, _ = w.Write(gzipped)
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
h := &HexHandler{
|
||||
proxy: testProxy(), // no cooldown
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/packages/testpkg", nil)
|
||||
req.SetPathValue("name", "testpkg")
|
||||
w := httptest.NewRecorder()
|
||||
h.handlePackages(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,8 +51,8 @@ func (h *MavenHandler) handleRequest(w http.ResponseWriter, r *http.Request) {
|
|||
filename := path.Base(urlPath)
|
||||
|
||||
if h.isMetadataFile(filename) {
|
||||
// Proxy metadata without caching
|
||||
h.proxyUpstream(w, r)
|
||||
cacheKey := strings.ReplaceAll(urlPath, "/", "_")
|
||||
h.proxy.ProxyCached(w, r, h.upstreamURL+r.URL.Path, "maven", cacheKey, "*/*")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package handler
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -13,8 +14,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
npmUpstream = "https://registry.npmjs.org"
|
||||
scopedParts = 2 // scope + name in scoped packages
|
||||
npmUpstream = "https://registry.npmjs.org"
|
||||
npmAbbreviatedCT = "application/vnd.npm.install-v1+json"
|
||||
scopedParts = 2 // scope + name in scoped packages
|
||||
)
|
||||
|
||||
// NPMHandler handles npm registry protocol requests.
|
||||
|
|
@ -65,51 +67,37 @@ func (h *NPMHandler) handlePackageMetadata(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
h.proxy.Logger.Info("npm metadata request", "package", packageName)
|
||||
|
||||
// Fetch metadata from upstream
|
||||
upstreamURL := fmt.Sprintf("%s/%s", h.upstreamURL, url.PathEscape(packageName))
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
JSONError(w, http.StatusInternalServerError, "failed to create request")
|
||||
return
|
||||
// Use abbreviated metadata when cooldown is disabled — it's much smaller
|
||||
// (e.g. drizzle-orm: 4MB vs 92MB) but lacks the time map needed for cooldown.
|
||||
accept := npmAbbreviatedCT
|
||||
if h.proxy.Cooldown != nil && h.proxy.Cooldown.Enabled() {
|
||||
accept = contentTypeJSON
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
body, _, err := h.proxy.FetchOrCacheMetadata(r.Context(), "npm", packageName, upstreamURL, accept)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Error("failed to fetch upstream metadata", "error", err)
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
JSONError(w, http.StatusNotFound, "package not found")
|
||||
return
|
||||
}
|
||||
h.proxy.Logger.Error("failed to fetch npm metadata", "error", err)
|
||||
JSONError(w, http.StatusBadGateway, "failed to fetch from upstream")
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
JSONError(w, http.StatusNotFound, "package not found")
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
JSONError(w, http.StatusBadGateway, fmt.Sprintf("upstream returned %d", resp.StatusCode))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse and rewrite tarball URLs
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
JSONError(w, http.StatusInternalServerError, "failed to read response")
|
||||
return
|
||||
}
|
||||
|
||||
rewritten, err := h.rewriteMetadata(packageName, body)
|
||||
if err != nil {
|
||||
// If rewriting fails, just proxy the original
|
||||
h.proxy.Logger.Warn("failed to rewrite metadata, proxying original", "error", err)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", contentTypeJSON)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(body)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", contentTypeJSON)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(rewritten)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,9 +32,9 @@ func TestNPMExtractVersionFromFilename(t *testing.T) {
|
|||
{"@babel/core", "core-7.23.0.tgz", "7.23.0"},
|
||||
{"@types/node", "node-20.10.0.tgz", "20.10.0"},
|
||||
{"express", "express-4.18.2.tgz", "4.18.2"},
|
||||
{"lodash", "lodash.tgz", ""}, // no version
|
||||
{"lodash", "lodash-4.17.21.zip", ""}, // wrong extension
|
||||
{"lodash", "other-4.17.21.tgz", ""}, // wrong package name
|
||||
{"lodash", "lodash.tgz", ""}, // no version
|
||||
{"lodash", "lodash-4.17.21.zip", ""}, // wrong extension
|
||||
{"lodash", "other-4.17.21.tgz", ""}, // wrong package name
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
@ -293,6 +293,62 @@ func TestNPMRewriteMetadataCooldownExemptPackage(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNPMHandlerUsesAbbreviatedMetadata(t *testing.T) {
|
||||
var gotAccept string
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotAccept = r.Header.Get("Accept")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{
|
||||
"name": "testpkg",
|
||||
"versions": {
|
||||
"1.0.0": {
|
||||
"name": "testpkg",
|
||||
"version": "1.0.0",
|
||||
"dist": {
|
||||
"tarball": "https://registry.npmjs.org/testpkg/-/testpkg-1.0.0.tgz"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
t.Run("no cooldown uses abbreviated metadata", func(t *testing.T) {
|
||||
h := &NPMHandler{
|
||||
proxy: testProxy(),
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/testpkg", nil)
|
||||
w := httptest.NewRecorder()
|
||||
h.handlePackageMetadata(w, req)
|
||||
|
||||
if gotAccept != npmAbbreviatedCT {
|
||||
t.Errorf("Accept = %q, want abbreviated metadata header", gotAccept)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("cooldown enabled uses full metadata", func(t *testing.T) {
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{Default: "3d"}
|
||||
|
||||
h := &NPMHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/testpkg", nil)
|
||||
w := httptest.NewRecorder()
|
||||
h.handlePackageMetadata(w, req)
|
||||
|
||||
if gotAccept == npmAbbreviatedCT {
|
||||
t.Error("cooldown enabled should use full metadata, not abbreviated")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestNPMHandlerMetadataNotFound(t *testing.T) {
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
|
|
|
|||
|
|
@ -2,10 +2,14 @@ package handler
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/purl"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -40,7 +44,7 @@ func (h *NuGetHandler) Routes() http.Handler {
|
|||
mux.HandleFunc("GET /v3-flatcontainer/{id}/index.json", h.proxyUpstream)
|
||||
|
||||
// Registration (package metadata) - use prefix matching since {version}.json isn't allowed
|
||||
mux.HandleFunc("GET /v3/registration5-gz-semver2/", h.proxyUpstream)
|
||||
mux.HandleFunc("GET /v3/registration5-gz-semver2/", h.handleRegistration)
|
||||
|
||||
// Search
|
||||
mux.HandleFunc("GET /query", h.proxyUpstream)
|
||||
|
|
@ -57,31 +61,16 @@ func (h *NuGetHandler) handleServiceIndex(w http.ResponseWriter, r *http.Request
|
|||
|
||||
upstreamURL := h.upstreamURL + "/v3/index.json"
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
body, _, err := h.proxy.FetchOrCacheMetadata(r.Context(), "nuget", "_service_index", upstreamURL)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rewritten, err := h.rewriteServiceIndex(body)
|
||||
if err != nil {
|
||||
|
|
@ -152,10 +141,10 @@ func (h *NuGetHandler) shouldRewriteService(serviceType string) bool {
|
|||
func (h *NuGetHandler) rewriteNuGetURL(origURL string) string {
|
||||
// Map known NuGet API endpoints to our proxy paths
|
||||
replacements := map[string]string{
|
||||
"https://api.nuget.org/v3-flatcontainer/": h.proxyURL + "/nuget/v3-flatcontainer/",
|
||||
"https://api.nuget.org/v3-flatcontainer/": h.proxyURL + "/nuget/v3-flatcontainer/",
|
||||
"https://api.nuget.org/v3/registration5-gz-semver2/": h.proxyURL + "/nuget/v3/registration5-gz-semver2/",
|
||||
"https://azuresearch-usnc.nuget.org/query": h.proxyURL + "/nuget/query",
|
||||
"https://azuresearch-usnc.nuget.org/autocomplete": h.proxyURL + "/nuget/autocomplete",
|
||||
"https://azuresearch-usnc.nuget.org/query": h.proxyURL + "/nuget/query",
|
||||
"https://azuresearch-usnc.nuget.org/autocomplete": h.proxyURL + "/nuget/autocomplete",
|
||||
}
|
||||
|
||||
for old, new := range replacements {
|
||||
|
|
@ -167,6 +156,140 @@ func (h *NuGetHandler) rewriteNuGetURL(origURL string) string {
|
|||
return origURL
|
||||
}
|
||||
|
||||
// handleRegistration proxies NuGet registration pages, applying cooldown filtering.
|
||||
func (h *NuGetHandler) handleRegistration(w http.ResponseWriter, r *http.Request) {
|
||||
if h.proxy.Cooldown == nil || !h.proxy.Cooldown.Enabled() {
|
||||
h.proxyUpstream(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
upstreamURL := h.buildUpstreamURL(r)
|
||||
|
||||
h.proxy.Logger.Debug("fetching registration for cooldown filtering", "url", upstreamURL)
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
for k, vv := range resp.Header {
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
h.proxy.Logger.Warn("failed to filter registration, proxying original", "error", err)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(body)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(filtered)
|
||||
}
|
||||
|
||||
// applyCooldownFiltering filters versions from NuGet registration pages
|
||||
// that are too recently published.
|
||||
func (h *NuGetHandler) applyCooldownFiltering(body []byte) ([]byte, error) {
|
||||
if h.proxy.Cooldown == nil || !h.proxy.Cooldown.Enabled() {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
var registration map[string]any
|
||||
if err := json.Unmarshal(body, ®istration); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pages, ok := registration["items"].([]any)
|
||||
if !ok {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
for _, page := range pages {
|
||||
pageMap, ok := page.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
items, ok := pageMap["items"].([]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
filtered := items[:0]
|
||||
for _, item := range items {
|
||||
itemMap, ok := item.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
catalogEntry, ok := itemMap["catalogEntry"].(map[string]any)
|
||||
if !ok {
|
||||
filtered = append(filtered, item)
|
||||
continue
|
||||
}
|
||||
|
||||
version, _ := catalogEntry["version"].(string)
|
||||
id, _ := catalogEntry["id"].(string)
|
||||
publishedStr, _ := catalogEntry["published"].(string)
|
||||
|
||||
if publishedStr == "" {
|
||||
filtered = append(filtered, item)
|
||||
continue
|
||||
}
|
||||
|
||||
publishedAt, err := time.Parse(time.RFC3339, publishedStr)
|
||||
if err != nil {
|
||||
// NuGet uses a slightly non-standard format, try parsing with fractional seconds
|
||||
publishedAt, err = time.Parse("2006-01-02T15:04:05.999-07:00", publishedStr)
|
||||
if err != nil {
|
||||
filtered = append(filtered, item)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
packagePURL := purl.MakePURLString("nuget", strings.ToLower(id), "")
|
||||
|
||||
if !h.proxy.Cooldown.IsAllowed("nuget", packagePURL, publishedAt) {
|
||||
h.proxy.Logger.Info("cooldown: filtering nuget version",
|
||||
"package", id, "version", version,
|
||||
"published", publishedStr)
|
||||
continue
|
||||
}
|
||||
|
||||
filtered = append(filtered, item)
|
||||
}
|
||||
|
||||
pageMap["items"] = filtered
|
||||
pageMap["count"] = len(filtered)
|
||||
}
|
||||
|
||||
return json.Marshal(registration)
|
||||
}
|
||||
|
||||
// handleDownload serves a package file, fetching and caching from upstream if needed.
|
||||
func (h *NuGetHandler) handleDownload(w http.ResponseWriter, r *http.Request) {
|
||||
id := r.PathValue("id")
|
||||
|
|
|
|||
|
|
@ -8,6 +8,9 @@ import (
|
|||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/cooldown"
|
||||
)
|
||||
|
||||
func nugetTestProxy() *Proxy {
|
||||
|
|
@ -66,11 +69,11 @@ func TestNuGetRewriteServiceIndex(t *testing.T) {
|
|||
}
|
||||
|
||||
expectations := map[string]string{
|
||||
"PackageBaseAddress/3.0.0": "http://localhost:8080/nuget/v3-flatcontainer/",
|
||||
"RegistrationsBaseUrl/3.6.0": "http://localhost:8080/nuget/v3/registration5-gz-semver2/",
|
||||
"SearchQueryService/3.5.0": "http://localhost:8080/nuget/query",
|
||||
"PackageBaseAddress/3.0.0": "http://localhost:8080/nuget/v3-flatcontainer/",
|
||||
"RegistrationsBaseUrl/3.6.0": "http://localhost:8080/nuget/v3/registration5-gz-semver2/",
|
||||
"SearchQueryService/3.5.0": "http://localhost:8080/nuget/query",
|
||||
"SearchAutocompleteService/3.5.0": "http://localhost:8080/nuget/autocomplete",
|
||||
"SomeOtherService/1.0.0": "https://example.com/other-service",
|
||||
"SomeOtherService/1.0.0": "https://example.com/other-service",
|
||||
}
|
||||
|
||||
for _, res := range resources {
|
||||
|
|
@ -227,8 +230,9 @@ func TestNuGetHandleServiceIndexUpstreamError(t *testing.T) {
|
|||
w := httptest.NewRecorder()
|
||||
h.handleServiceIndex(w, req)
|
||||
|
||||
if w.Code != http.StatusInternalServerError {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusInternalServerError)
|
||||
// With metadata caching, upstream 500 is reported as 502 (bad gateway)
|
||||
if w.Code != http.StatusBadGateway {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusBadGateway)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -768,3 +772,333 @@ func TestNuGetBuildUpstreamURLRegularPath(t *testing.T) {
|
|||
t.Errorf("buildUpstreamURL for registration = %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNuGetCooldownFiltering(t *testing.T) {
|
||||
now := time.Now()
|
||||
oldTime := now.Add(-7 * 24 * time.Hour).Format(time.RFC3339)
|
||||
recentTime := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
registration := map[string]any{
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"count": 2,
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"catalogEntry": map[string]any{
|
||||
"id": "TestPackage",
|
||||
"version": "1.0.0",
|
||||
"published": oldTime,
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"catalogEntry": map[string]any{
|
||||
"id": "TestPackage",
|
||||
"version": "2.0.0",
|
||||
"published": recentTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(registration)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &NuGetHandler{
|
||||
proxy: proxy,
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(filtered, &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pages := result["items"].([]any)
|
||||
page := pages[0].(map[string]any)
|
||||
items := page["items"].([]any)
|
||||
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("expected 1 item after filtering, got %d", len(items))
|
||||
}
|
||||
|
||||
entry := items[0].(map[string]any)["catalogEntry"].(map[string]any)
|
||||
if entry["version"] != testVersion100 {
|
||||
t.Errorf("expected version 1.0.0 to survive, got %s", entry["version"])
|
||||
}
|
||||
|
||||
count := page["count"]
|
||||
if count != float64(1) {
|
||||
t.Errorf("expected page count to be 1, got %v", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNuGetCooldownFilteringWithPackageOverride(t *testing.T) {
|
||||
now := time.Now()
|
||||
recentTime := now.Add(-2 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
registration := map[string]any{
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"count": 1,
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"catalogEntry": map[string]any{
|
||||
"id": "SpecialPackage",
|
||||
"version": "1.0.0",
|
||||
"published": recentTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(registration)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
Packages: map[string]string{"pkg:nuget/specialpackage": "1h"},
|
||||
}
|
||||
|
||||
h := &NuGetHandler{
|
||||
proxy: proxy,
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(filtered, &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pages := result["items"].([]any)
|
||||
page := pages[0].(map[string]any)
|
||||
items := page["items"].([]any)
|
||||
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("expected 1 item (package override allows it), got %d", len(items))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNuGetCooldownNoCooldownConfig(t *testing.T) {
|
||||
registration := map[string]any{
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"count": 1,
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"catalogEntry": map[string]any{
|
||||
"id": "Test",
|
||||
"version": "1.0.0",
|
||||
"published": time.Now().Format(time.RFC3339),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(registration)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// No cooldown - applyCooldownFiltering still works, just doesn't filter
|
||||
h := &NuGetHandler{
|
||||
proxy: testProxy(),
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(filtered, &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pages := result["items"].([]any)
|
||||
page := pages[0].(map[string]any)
|
||||
items := page["items"].([]any)
|
||||
|
||||
// Without cooldown config on the handler, applyCooldownFiltering
|
||||
// is called but proxy.Cooldown is nil, so IsAllowed is never called
|
||||
// Actually, applyCooldownFiltering always runs the filter logic -
|
||||
// but the caller (handleRegistration) short-circuits when cooldown is disabled.
|
||||
// The function itself should still work fine with a nil Cooldown.
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("expected 1 item, got %d", len(items))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNuGetCooldownFilteringNuGetTimestamp(t *testing.T) {
|
||||
// NuGet uses timestamps like "2024-09-07T01:37:52.233+00:00" which
|
||||
// have fractional seconds - verify these parse correctly
|
||||
now := time.Now()
|
||||
oldTime := now.Add(-7 * 24 * time.Hour).Format("2006-01-02T15:04:05.000-07:00")
|
||||
|
||||
registration := map[string]any{
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"count": 1,
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"catalogEntry": map[string]any{
|
||||
"id": "Test",
|
||||
"version": "1.0.0",
|
||||
"published": oldTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body, err := json.Marshal(registration)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &NuGetHandler{
|
||||
proxy: proxy,
|
||||
proxyURL: "http://localhost:8080",
|
||||
}
|
||||
|
||||
filtered, err := h.applyCooldownFiltering(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(filtered, &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pages := result["items"].([]any)
|
||||
page := pages[0].(map[string]any)
|
||||
items := page["items"].([]any)
|
||||
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("expected 1 item (old enough to pass cooldown), got %d", len(items))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNuGetHandleRegistrationWithCooldown(t *testing.T) {
|
||||
now := time.Now()
|
||||
oldTime := now.Add(-7 * 24 * time.Hour).Format(time.RFC3339)
|
||||
recentTime := now.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
|
||||
registrationJSON, _ := json.Marshal(map[string]any{
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"count": 2,
|
||||
"items": []any{
|
||||
map[string]any{
|
||||
"catalogEntry": map[string]any{
|
||||
"id": "TestPkg",
|
||||
"version": "1.0.0",
|
||||
"published": oldTime,
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"catalogEntry": map[string]any{
|
||||
"id": "TestPkg",
|
||||
"version": "2.0.0",
|
||||
"published": recentTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(registrationJSON)
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy := testProxy()
|
||||
proxy.Cooldown = &cooldown.Config{
|
||||
Default: "3d",
|
||||
}
|
||||
|
||||
h := &NuGetHandler{
|
||||
proxy: proxy,
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/v3/registration5-gz-semver2/testpkg/index.json", nil)
|
||||
w := httptest.NewRecorder()
|
||||
h.handleRegistration(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &result); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pages := result["items"].([]any)
|
||||
page := pages[0].(map[string]any)
|
||||
items := page["items"].([]any)
|
||||
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("expected 1 item after cooldown filtering, got %d", len(items))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNuGetHandleRegistrationWithoutCooldown(t *testing.T) {
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"items":[]}`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
h := &NuGetHandler{
|
||||
proxy: nugetTestProxy(), // no cooldown configured
|
||||
upstreamURL: upstream.URL,
|
||||
proxyURL: "http://proxy.local",
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/v3/registration5-gz-semver2/testpkg/index.json", nil)
|
||||
w := httptest.NewRecorder()
|
||||
h.handleRegistration(w, req)
|
||||
|
||||
// Without cooldown, should proxy directly
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status = %d, want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ package handler
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -89,32 +89,16 @@ func (h *PubHandler) handlePackageMetadata(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
upstreamURL := fmt.Sprintf("%s/api/packages/%s", h.upstreamURL, name)
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
body, _, err := h.proxy.FetchOrCacheMetadata(r.Context(), "pub", name, upstreamURL)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rewritten, err := h.rewriteMetadata(name, body)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
|
@ -16,11 +17,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
pypiUpstream = "https://pypi.org"
|
||||
minWheelParts = 5 // name + version + python + abi + platform
|
||||
minSubmatchParts = 2 // full match + first capture group
|
||||
minPyPIPathParts = 3 // hash_prefix + hash + filename
|
||||
minPythonTagLen = 2 // minimum length for a python tag (e.g., "py")
|
||||
pypiUpstream = "https://pypi.org"
|
||||
minWheelParts = 5 // name + version + python + abi + platform
|
||||
minSubmatchParts = 2 // full match + first capture group
|
||||
minPyPIPathParts = 3 // hash_prefix + hash + filename
|
||||
minPythonTagLen = 2 // minimum length for a python tag (e.g., "py")
|
||||
)
|
||||
|
||||
// PyPIHandler handles PyPI registry protocol requests.
|
||||
|
|
@ -74,33 +75,18 @@ func (h *PyPIHandler) handleSimplePackage(w http.ResponseWriter, r *http.Request
|
|||
h.proxy.Logger.Info("pypi simple request", "package", name)
|
||||
|
||||
upstreamURL := fmt.Sprintf("%s/simple/%s/", h.upstreamURL, name)
|
||||
cacheKey := name + "/simple"
|
||||
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Accept", "text/html")
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
body, _, err := h.proxy.FetchOrCacheMetadata(r.Context(), "pypi", cacheKey, upstreamURL, "text/html")
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// When cooldown is enabled, fetch JSON metadata to get version timestamps
|
||||
var filteredVersions map[string]bool
|
||||
|
|
@ -221,7 +207,7 @@ func (h *PyPIHandler) handleJSON(w http.ResponseWriter, r *http.Request) {
|
|||
h.proxy.Logger.Info("pypi json request", "package", name)
|
||||
|
||||
upstreamURL := fmt.Sprintf("%s/pypi/%s/json", h.upstreamURL, name)
|
||||
h.proxyAndRewriteJSON(w, r, upstreamURL)
|
||||
h.proxyAndRewriteJSON(w, r, upstreamURL, name+"/json")
|
||||
}
|
||||
|
||||
// handleVersionJSON serves the JSON API version metadata.
|
||||
|
|
@ -237,37 +223,21 @@ func (h *PyPIHandler) handleVersionJSON(w http.ResponseWriter, r *http.Request)
|
|||
h.proxy.Logger.Info("pypi version json request", "package", name, "version", version)
|
||||
|
||||
upstreamURL := fmt.Sprintf("%s/pypi/%s/%s/json", h.upstreamURL, name, version)
|
||||
h.proxyAndRewriteJSON(w, r, upstreamURL)
|
||||
h.proxyAndRewriteJSON(w, r, upstreamURL, name+"/"+version)
|
||||
}
|
||||
|
||||
// proxyAndRewriteJSON fetches JSON metadata and rewrites download URLs.
|
||||
func (h *PyPIHandler) proxyAndRewriteJSON(w http.ResponseWriter, r *http.Request, upstreamURL string) {
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodGet, upstreamURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := h.proxy.HTTPClient.Do(req)
|
||||
func (h *PyPIHandler) proxyAndRewriteJSON(w http.ResponseWriter, r *http.Request, upstreamURL, cacheKey string) {
|
||||
body, _, err := h.proxy.FetchOrCacheMetadata(r.Context(), "pypi", cacheKey, upstreamURL)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrUpstreamNotFound) {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.proxy.Logger.Error("upstream request failed", "error", err)
|
||||
http.Error(w, "upstream request failed", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ReadMetadata(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rewritten, err := h.rewriteJSONMetadata(body)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
|
@ -17,9 +18,8 @@ func TestReadMetadata(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
t.Run("truncates at limit", func(t *testing.T) {
|
||||
// Create a reader slightly larger than maxMetadataSize
|
||||
data := make([]byte, maxMetadataSize+100)
|
||||
t.Run("exactly at limit", func(t *testing.T) {
|
||||
data := make([]byte, maxMetadataSize)
|
||||
for i := range data {
|
||||
data[i] = 'x'
|
||||
}
|
||||
|
|
@ -31,4 +31,15 @@ func TestReadMetadata(t *testing.T) {
|
|||
t.Errorf("got length %d, want %d", len(got), maxMetadataSize)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("over limit returns error", func(t *testing.T) {
|
||||
data := make([]byte, maxMetadataSize+100)
|
||||
for i := range data {
|
||||
data[i] = 'x'
|
||||
}
|
||||
_, err := ReadMetadata(bytes.NewReader(data))
|
||||
if !errors.Is(err, ErrMetadataTooLarge) {
|
||||
t.Errorf("got error %v, want ErrMetadataTooLarge", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -95,7 +95,8 @@ func (h *RPMHandler) handlePackageDownload(w http.ResponseWriter, r *http.Reques
|
|||
// handleMetadata proxies repository metadata files (repomd.xml, primary.xml.gz, etc.).
|
||||
// These change frequently so we don't cache them.
|
||||
func (h *RPMHandler) handleMetadata(w http.ResponseWriter, r *http.Request, path string) {
|
||||
h.proxy.ProxyMetadata(w, r, fmt.Sprintf("%s/%s", h.upstreamURL, path), "rpm")
|
||||
cacheKey := strings.ReplaceAll(path, "/", "_")
|
||||
h.proxy.ProxyCached(w, r, fmt.Sprintf("%s/%s", h.upstreamURL, path), "rpm", cacheKey, "*/*")
|
||||
}
|
||||
|
||||
// proxyFile proxies any file directly without caching.
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func TestRecordStorageOperations(t *testing.T) {
|
|||
|
||||
func TestUpdateCacheStats(t *testing.T) {
|
||||
UpdateCacheStats(1024*1024*1024, 100) // 1GB, 100 artifacts
|
||||
UpdateCacheStats(0, 0) // Empty cache
|
||||
UpdateCacheStats(0, 0) // Empty cache
|
||||
|
||||
// No panics = success
|
||||
}
|
||||
|
|
|
|||
207
internal/mirror/job.go
Normal file
207
internal/mirror/job.go
Normal file
|
|
@ -0,0 +1,207 @@
|
|||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// JobState represents the current state of a mirror job.
|
||||
type JobState string
|
||||
|
||||
const (
|
||||
JobStatePending JobState = "pending"
|
||||
JobStateRunning JobState = "running"
|
||||
JobStateComplete JobState = "complete"
|
||||
JobStateFailed JobState = "failed"
|
||||
JobStateCanceled JobState = "canceled"
|
||||
)
|
||||
|
||||
const jobTTL = 1 * time.Hour
|
||||
const cleanupInterval = 5 * time.Minute //nolint:mnd // cleanup ticker
|
||||
|
||||
// Job represents an async mirror operation.
|
||||
type Job struct {
|
||||
ID string `json:"id"`
|
||||
State JobState `json:"state"`
|
||||
Progress Progress `json:"progress"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Error string `json:"error,omitempty"`
|
||||
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// JobRequest is the JSON body for starting a mirror job via the API.
|
||||
type JobRequest struct {
|
||||
PURLs []string `json:"purls,omitempty"`
|
||||
Registry string `json:"registry,omitempty"`
|
||||
}
|
||||
|
||||
// JobStore manages in-memory mirror jobs.
|
||||
type JobStore struct {
|
||||
mu sync.RWMutex
|
||||
jobs map[string]*Job
|
||||
mirror *Mirror
|
||||
parentCtx context.Context
|
||||
}
|
||||
|
||||
// NewJobStore creates a new job store. The parent context is used as the base
|
||||
// for all job contexts so that jobs are canceled when the server shuts down.
|
||||
func NewJobStore(ctx context.Context, m *Mirror) *JobStore {
|
||||
return &JobStore{
|
||||
jobs: make(map[string]*Job),
|
||||
mirror: m,
|
||||
parentCtx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// Create starts a new mirror job and returns its ID.
|
||||
func (js *JobStore) Create(req JobRequest) (string, error) {
|
||||
source, err := js.sourceFromRequest(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
id := newJobID()
|
||||
ctx, cancel := context.WithCancel(js.parentCtx)
|
||||
|
||||
job := &Job{
|
||||
ID: id,
|
||||
State: JobStatePending,
|
||||
CreatedAt: time.Now(),
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
js.mu.Lock()
|
||||
js.jobs[id] = job
|
||||
js.mu.Unlock()
|
||||
|
||||
go js.runJob(ctx, cancel, job, source)
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Get returns a snapshot of a job by ID. The returned copy is safe to
|
||||
// serialize without holding the lock.
|
||||
func (js *JobStore) Get(id string) *Job {
|
||||
js.mu.RLock()
|
||||
defer js.mu.RUnlock()
|
||||
job := js.jobs[id]
|
||||
if job == nil {
|
||||
return nil
|
||||
}
|
||||
snapshot := *job
|
||||
snapshot.cancel = nil // don't leak cancel func
|
||||
if len(job.Progress.Errors) > 0 {
|
||||
snapshot.Progress.Errors = make([]MirrorError, len(job.Progress.Errors))
|
||||
copy(snapshot.Progress.Errors, job.Progress.Errors)
|
||||
}
|
||||
return &snapshot
|
||||
}
|
||||
|
||||
// Cancel cancels a running job.
|
||||
func (js *JobStore) Cancel(id string) bool {
|
||||
js.mu.Lock()
|
||||
defer js.mu.Unlock()
|
||||
|
||||
job := js.jobs[id]
|
||||
if job == nil || job.cancel == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if job.State != JobStatePending && job.State != JobStateRunning {
|
||||
return false
|
||||
}
|
||||
|
||||
job.cancel()
|
||||
job.State = JobStateCanceled
|
||||
return true
|
||||
}
|
||||
|
||||
// Cleanup removes completed/failed/canceled jobs older than jobTTL.
|
||||
func (js *JobStore) Cleanup() {
|
||||
js.mu.Lock()
|
||||
defer js.mu.Unlock()
|
||||
for id, job := range js.jobs {
|
||||
if job.State == JobStateComplete || job.State == JobStateFailed || job.State == JobStateCanceled {
|
||||
if time.Since(job.CreatedAt) > jobTTL {
|
||||
delete(js.jobs, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartCleanup runs periodic cleanup of old jobs until the context is canceled.
|
||||
func (js *JobStore) StartCleanup(ctx context.Context) {
|
||||
ticker := time.NewTicker(cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
js.Cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (js *JobStore) runJob(ctx context.Context, cancel context.CancelFunc, job *Job, source Source) {
|
||||
defer cancel()
|
||||
|
||||
js.mu.Lock()
|
||||
if job.State == JobStateCanceled {
|
||||
js.mu.Unlock()
|
||||
return
|
||||
}
|
||||
job.State = JobStateRunning
|
||||
js.mu.Unlock()
|
||||
|
||||
progress, err := js.mirror.Run(ctx, source, func(p Progress) {
|
||||
js.mu.Lock()
|
||||
defer js.mu.Unlock()
|
||||
if job.State == JobStateRunning {
|
||||
job.Progress = p
|
||||
}
|
||||
})
|
||||
|
||||
js.mu.Lock()
|
||||
defer js.mu.Unlock()
|
||||
|
||||
// Cancel() may have already set the state; don't overwrite it.
|
||||
if job.State == JobStateCanceled {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
job.State = JobStateFailed
|
||||
job.Error = err.Error()
|
||||
return
|
||||
}
|
||||
|
||||
job.Progress = *progress
|
||||
if progress.Failed > 0 && progress.Completed == 0 {
|
||||
job.State = JobStateFailed
|
||||
} else {
|
||||
job.State = JobStateComplete
|
||||
}
|
||||
}
|
||||
|
||||
func (js *JobStore) sourceFromRequest(req JobRequest) (Source, error) { //nolint:ireturn // interface return is the design
|
||||
switch {
|
||||
case len(req.PURLs) > 0:
|
||||
return &PURLSource{PURLs: req.PURLs}, nil
|
||||
case req.Registry != "":
|
||||
return nil, fmt.Errorf("registry mirroring is not yet implemented; use purls instead")
|
||||
default:
|
||||
return nil, fmt.Errorf("request must include purls")
|
||||
}
|
||||
}
|
||||
|
||||
// newJobID generates a random hex job ID.
|
||||
func newJobID() string {
|
||||
b := make([]byte, 16) //nolint:mnd // 128-bit ID
|
||||
_, _ = rand.Read(b)
|
||||
return fmt.Sprintf("%x", b)
|
||||
}
|
||||
183
internal/mirror/job_test.go
Normal file
183
internal/mirror/job_test.go
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestJobStoreCreateAndGet(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
id, err := js.Create(JobRequest{
|
||||
PURLs: []string{"pkg:npm/lodash@4.17.21"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
if id == "" {
|
||||
t.Fatal("expected non-empty job ID")
|
||||
}
|
||||
|
||||
// Wait for the job to start (it runs async)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
job := js.Get(id)
|
||||
if job == nil {
|
||||
t.Fatal("Get() returned nil")
|
||||
}
|
||||
if job.ID != id {
|
||||
t.Errorf("job ID = %q, want %q", job.ID, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStoreGetNotFound(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
job := js.Get("nonexistent")
|
||||
if job != nil {
|
||||
t.Errorf("expected nil for nonexistent job, got %v", job)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStoreCancelNotFound(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
if js.Cancel("nonexistent") {
|
||||
t.Error("expected Cancel to return false for nonexistent job")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStoreCreateInvalidRequest(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
_, err := js.Create(JobRequest{})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for empty request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStoreMultipleJobs(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
id1, err := js.Create(JobRequest{PURLs: []string{"pkg:npm/lodash@4.17.21"}})
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
id2, err := js.Create(JobRequest{PURLs: []string{"pkg:cargo/serde@1.0.0"}})
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
if id1 == id2 {
|
||||
t.Error("expected different job IDs")
|
||||
}
|
||||
|
||||
job1 := js.Get(id1)
|
||||
job2 := js.Get(id2)
|
||||
if job1 == nil || job2 == nil {
|
||||
t.Fatal("expected both jobs to exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSourceFromRequestPURLs(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
source, err := js.sourceFromRequest(JobRequest{PURLs: []string{"pkg:npm/lodash@1.0.0"}})
|
||||
if err != nil {
|
||||
t.Fatalf("sourceFromRequest() error = %v", err)
|
||||
}
|
||||
if _, ok := source.(*PURLSource); !ok {
|
||||
t.Errorf("expected *PURLSource, got %T", source)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSourceFromRequestRegistryRejected(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
_, err := js.sourceFromRequest(JobRequest{Registry: "npm"})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for registry request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStoreCleanup(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
// Add a completed job with old CreatedAt
|
||||
js.mu.Lock()
|
||||
js.jobs["old-job"] = &Job{
|
||||
ID: "old-job",
|
||||
State: JobStateComplete,
|
||||
CreatedAt: time.Now().Add(-2 * time.Hour),
|
||||
}
|
||||
js.jobs["recent-job"] = &Job{
|
||||
ID: "recent-job",
|
||||
State: JobStateComplete,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
js.jobs["running-job"] = &Job{
|
||||
ID: "running-job",
|
||||
State: JobStateRunning,
|
||||
CreatedAt: time.Now().Add(-2 * time.Hour),
|
||||
}
|
||||
js.mu.Unlock()
|
||||
|
||||
js.Cleanup()
|
||||
|
||||
if js.Get("old-job") != nil {
|
||||
t.Error("expected old completed job to be cleaned up")
|
||||
}
|
||||
if js.Get("recent-job") == nil {
|
||||
t.Error("expected recent completed job to be kept")
|
||||
}
|
||||
if js.Get("running-job") == nil {
|
||||
t.Error("expected running job to be kept regardless of age")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStoreCancelPreservesStateAfterRunJob(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
js := NewJobStore(context.Background(), m)
|
||||
|
||||
// Create a job with a PURL that will fail (no real upstream in test)
|
||||
id, err := js.Create(JobRequest{PURLs: []string{"pkg:npm/nonexistent-pkg@0.0.0"}})
|
||||
if err != nil {
|
||||
t.Fatalf("Create() error = %v", err)
|
||||
}
|
||||
|
||||
// Cancel immediately -- the job may already be running
|
||||
js.Cancel(id)
|
||||
|
||||
// Wait for runJob goroutine to finish
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
job := js.Get(id)
|
||||
if job == nil {
|
||||
t.Fatal("Get() returned nil")
|
||||
}
|
||||
if job.State != JobStateCanceled {
|
||||
t.Errorf("state = %q, want %q (cancel should not be overwritten by runJob)", job.State, JobStateCanceled)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobIDUnique(t *testing.T) {
|
||||
ids := make(map[string]bool)
|
||||
for range 100 {
|
||||
id := newJobID()
|
||||
if ids[id] {
|
||||
t.Fatalf("duplicate job ID: %s", id)
|
||||
}
|
||||
ids[id] = true
|
||||
}
|
||||
}
|
||||
228
internal/mirror/mirror.go
Normal file
228
internal/mirror/mirror.go
Normal file
|
|
@ -0,0 +1,228 @@
|
|||
// Package mirror provides selective package mirroring for pre-populating the proxy cache.
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/database"
|
||||
"github.com/git-pkgs/proxy/internal/handler"
|
||||
"github.com/git-pkgs/proxy/internal/storage"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Mirror pre-populates the proxy cache from various input sources.
|
||||
type Mirror struct {
|
||||
proxy *handler.Proxy
|
||||
db *database.DB
|
||||
storage storage.Storage
|
||||
logger *slog.Logger
|
||||
workers int
|
||||
}
|
||||
|
||||
// New creates a new Mirror with the given dependencies.
|
||||
func New(proxy *handler.Proxy, db *database.DB, store storage.Storage, logger *slog.Logger, workers int) *Mirror {
|
||||
if workers < 1 {
|
||||
workers = 1
|
||||
}
|
||||
return &Mirror{
|
||||
proxy: proxy,
|
||||
db: db,
|
||||
storage: store,
|
||||
logger: logger,
|
||||
workers: workers,
|
||||
}
|
||||
}
|
||||
|
||||
// Progress tracks the state of a mirror operation.
|
||||
type Progress struct {
|
||||
Total int64 `json:"total"`
|
||||
Completed int64 `json:"completed"`
|
||||
Skipped int64 `json:"skipped"`
|
||||
Failed int64 `json:"failed"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Errors []MirrorError `json:"errors,omitempty"`
|
||||
StartedAt time.Time `json:"started_at"`
|
||||
Phase string `json:"phase"`
|
||||
}
|
||||
|
||||
// MirrorError records a single failed mirror attempt.
|
||||
type MirrorError struct {
|
||||
Ecosystem string `json:"ecosystem"`
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type progressTracker struct {
|
||||
total atomic.Int64
|
||||
completed atomic.Int64
|
||||
skipped atomic.Int64
|
||||
failed atomic.Int64
|
||||
bytes atomic.Int64
|
||||
mu sync.Mutex
|
||||
errors []MirrorError
|
||||
startedAt time.Time
|
||||
phase atomic.Value // string
|
||||
}
|
||||
|
||||
func newProgressTracker() *progressTracker {
|
||||
pt := &progressTracker{
|
||||
startedAt: time.Now(),
|
||||
}
|
||||
pt.phase.Store("resolving")
|
||||
return pt
|
||||
}
|
||||
|
||||
const maxTrackedErrors = 1000
|
||||
const progressReportInterval = 500 * time.Millisecond //nolint:mnd // progress update frequency
|
||||
|
||||
func (pt *progressTracker) addError(eco, name, version, err string) {
|
||||
pt.mu.Lock()
|
||||
if len(pt.errors) < maxTrackedErrors {
|
||||
pt.errors = append(pt.errors, MirrorError{
|
||||
Ecosystem: eco,
|
||||
Name: name,
|
||||
Version: version,
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
pt.mu.Unlock()
|
||||
}
|
||||
|
||||
func (pt *progressTracker) snapshot() Progress {
|
||||
pt.mu.Lock()
|
||||
errs := make([]MirrorError, len(pt.errors))
|
||||
copy(errs, pt.errors)
|
||||
pt.mu.Unlock()
|
||||
|
||||
phase, _ := pt.phase.Load().(string)
|
||||
return Progress{
|
||||
Total: pt.total.Load(),
|
||||
Completed: pt.completed.Load(),
|
||||
Skipped: pt.skipped.Load(),
|
||||
Failed: pt.failed.Load(),
|
||||
Bytes: pt.bytes.Load(),
|
||||
Errors: errs,
|
||||
StartedAt: pt.startedAt,
|
||||
Phase: phase,
|
||||
}
|
||||
}
|
||||
|
||||
// ProgressFunc is called periodically with a snapshot of the current progress.
|
||||
type ProgressFunc func(Progress)
|
||||
|
||||
// Run mirrors all packages from the source using a bounded worker pool.
|
||||
// It returns the final progress when complete. If onProgress is non-nil,
|
||||
// it is called with progress snapshots as work proceeds.
|
||||
func (m *Mirror) Run(ctx context.Context, source Source, onProgress ...ProgressFunc) (*Progress, error) {
|
||||
tracker := newProgressTracker()
|
||||
|
||||
// Collect items from source
|
||||
var items []PackageVersion
|
||||
tracker.phase.Store("resolving")
|
||||
err := source.Enumerate(ctx, func(pv PackageVersion) error {
|
||||
items = append(items, pv)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("enumerating packages: %w", err)
|
||||
}
|
||||
|
||||
tracker.total.Store(int64(len(items)))
|
||||
tracker.phase.Store("downloading")
|
||||
|
||||
// Start periodic progress reporting if a callback was provided
|
||||
var progressFn ProgressFunc
|
||||
if len(onProgress) > 0 && onProgress[0] != nil {
|
||||
progressFn = onProgress[0]
|
||||
}
|
||||
progressDone := make(chan struct{})
|
||||
if progressFn != nil {
|
||||
progressFn(tracker.snapshot()) // initial snapshot with total set
|
||||
go func() {
|
||||
ticker := time.NewTicker(progressReportInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-progressDone:
|
||||
return
|
||||
case <-ticker.C:
|
||||
progressFn(tracker.snapshot())
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Process items with bounded concurrency
|
||||
g, gctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(m.workers)
|
||||
|
||||
for _, item := range items {
|
||||
g.Go(func() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
m.logger.Error("panic in mirror worker", "recover", r,
|
||||
"ecosystem", item.Ecosystem, "name", item.Name, "version", item.Version)
|
||||
tracker.failed.Add(1)
|
||||
tracker.addError(item.Ecosystem, item.Name, item.Version, fmt.Sprintf("panic: %v", r))
|
||||
}
|
||||
}()
|
||||
m.mirrorOne(gctx, item, tracker)
|
||||
return nil // never fail the group; errors are tracked
|
||||
})
|
||||
}
|
||||
|
||||
_ = g.Wait()
|
||||
|
||||
close(progressDone) // stop the progress reporter goroutine
|
||||
|
||||
tracker.phase.Store("complete")
|
||||
p := tracker.snapshot()
|
||||
|
||||
// Send final snapshot
|
||||
if progressFn != nil {
|
||||
progressFn(p)
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// RunDryRun enumerates what would be mirrored without downloading.
|
||||
func (m *Mirror) RunDryRun(ctx context.Context, source Source) ([]PackageVersion, error) {
|
||||
var items []PackageVersion
|
||||
err := source.Enumerate(ctx, func(pv PackageVersion) error {
|
||||
items = append(items, pv)
|
||||
return nil
|
||||
})
|
||||
return items, err
|
||||
}
|
||||
|
||||
func (m *Mirror) mirrorOne(ctx context.Context, pv PackageVersion, tracker *progressTracker) {
|
||||
result, err := m.proxy.GetOrFetchArtifact(ctx, pv.Ecosystem, pv.Name, pv.Version, "")
|
||||
if err != nil {
|
||||
tracker.failed.Add(1)
|
||||
tracker.addError(pv.Ecosystem, pv.Name, pv.Version, err.Error())
|
||||
m.logger.Warn("mirror failed",
|
||||
"ecosystem", pv.Ecosystem, "name", pv.Name, "version", pv.Version, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = result.Reader.Close()
|
||||
|
||||
if result.Cached {
|
||||
tracker.skipped.Add(1)
|
||||
m.logger.Debug("already cached",
|
||||
"ecosystem", pv.Ecosystem, "name", pv.Name, "version", pv.Version)
|
||||
} else {
|
||||
tracker.completed.Add(1)
|
||||
tracker.bytes.Add(result.Size)
|
||||
m.logger.Info("mirrored",
|
||||
"ecosystem", pv.Ecosystem, "name", pv.Name, "version", pv.Version,
|
||||
"size", result.Size)
|
||||
}
|
||||
}
|
||||
195
internal/mirror/mirror_test.go
Normal file
195
internal/mirror/mirror_test.go
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/database"
|
||||
"github.com/git-pkgs/proxy/internal/handler"
|
||||
"github.com/git-pkgs/proxy/internal/storage"
|
||||
"github.com/git-pkgs/registries/fetch"
|
||||
)
|
||||
|
||||
// setupTestMirror creates a Mirror with real DB and filesystem storage for integration tests.
|
||||
func setupTestMirror(t *testing.T, workers int) *Mirror {
|
||||
t.Helper()
|
||||
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
db, err := database.Create(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("creating database: %v", err)
|
||||
}
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("migrating schema: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = db.Close() })
|
||||
|
||||
storeDir := t.TempDir()
|
||||
store, err := storage.OpenBucket(context.Background(), "file://"+storeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("opening storage: %v", err)
|
||||
}
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
|
||||
fetcher := fetch.NewFetcher()
|
||||
resolver := fetch.NewResolver()
|
||||
proxy := handler.NewProxy(db, store, fetcher, resolver, logger)
|
||||
|
||||
return New(proxy, db, store, logger, workers)
|
||||
}
|
||||
|
||||
const testPackageLodash = "lodash"
|
||||
|
||||
func TestMirrorRunEmptySource(t *testing.T) {
|
||||
m := setupTestMirror(t, 2)
|
||||
|
||||
source := &PURLSource{PURLs: []string{}}
|
||||
progress, err := m.Run(context.Background(), source)
|
||||
if err != nil {
|
||||
t.Fatalf("Run() error = %v", err)
|
||||
}
|
||||
|
||||
if progress.Total != 0 {
|
||||
t.Errorf("total = %d, want 0", progress.Total)
|
||||
}
|
||||
if progress.Phase != "complete" {
|
||||
t.Errorf("phase = %q, want %q", progress.Phase, "complete")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorRunDryRun(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
|
||||
source := &PURLSource{
|
||||
PURLs: []string{
|
||||
"pkg:npm/lodash@4.17.21",
|
||||
"pkg:cargo/serde@1.0.0",
|
||||
},
|
||||
}
|
||||
|
||||
items, err := m.RunDryRun(context.Background(), source)
|
||||
if err != nil {
|
||||
t.Fatalf("RunDryRun() error = %v", err)
|
||||
}
|
||||
|
||||
if len(items) != 2 {
|
||||
t.Fatalf("got %d items, want 2", len(items))
|
||||
}
|
||||
|
||||
// Dry run should not modify the database
|
||||
stats, err := m.db.GetCacheStats()
|
||||
if err != nil {
|
||||
t.Fatalf("GetCacheStats() error = %v", err)
|
||||
}
|
||||
if stats.TotalArtifacts != 0 {
|
||||
t.Errorf("artifacts = %d, want 0 (dry run should not cache)", stats.TotalArtifacts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorRunCanceled(t *testing.T) {
|
||||
m := setupTestMirror(t, 1)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel() // cancel immediately
|
||||
|
||||
// Use a source that produces items but they'll all fail due to canceled context
|
||||
source := &PURLSource{
|
||||
PURLs: []string{"pkg:npm/lodash@4.17.21"},
|
||||
}
|
||||
|
||||
progress, err := m.Run(ctx, source)
|
||||
if err != nil {
|
||||
t.Fatalf("Run() error = %v", err)
|
||||
}
|
||||
|
||||
// With a canceled context, the fetch should fail
|
||||
if progress.Failed != 1 {
|
||||
t.Errorf("failed = %d, want 1", progress.Failed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProgressTrackerSnapshot(t *testing.T) {
|
||||
pt := newProgressTracker()
|
||||
pt.total.Store(10)
|
||||
pt.completed.Store(5)
|
||||
pt.skipped.Store(3)
|
||||
pt.failed.Store(2)
|
||||
pt.bytes.Store(1024)
|
||||
pt.phase.Store("downloading")
|
||||
pt.addError("npm", testPackageLodash, "4.17.21", "fetch failed")
|
||||
|
||||
snap := pt.snapshot()
|
||||
if snap.Total != 10 {
|
||||
t.Errorf("total = %d, want 10", snap.Total)
|
||||
}
|
||||
if snap.Completed != 5 {
|
||||
t.Errorf("completed = %d, want 5", snap.Completed)
|
||||
}
|
||||
if snap.Skipped != 3 {
|
||||
t.Errorf("skipped = %d, want 3", snap.Skipped)
|
||||
}
|
||||
if snap.Failed != 2 {
|
||||
t.Errorf("failed = %d, want 2", snap.Failed)
|
||||
}
|
||||
if snap.Bytes != 1024 {
|
||||
t.Errorf("bytes = %d, want 1024", snap.Bytes)
|
||||
}
|
||||
if snap.Phase != "downloading" {
|
||||
t.Errorf("phase = %q, want %q", snap.Phase, "downloading")
|
||||
}
|
||||
if len(snap.Errors) != 1 {
|
||||
t.Fatalf("errors = %d, want 1", len(snap.Errors))
|
||||
}
|
||||
if snap.Errors[0].Name != testPackageLodash {
|
||||
t.Errorf("error name = %q, want %q", snap.Errors[0].Name, testPackageLodash)
|
||||
}
|
||||
if snap.StartedAt.IsZero() {
|
||||
t.Error("started_at should not be zero")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProgressTrackerConcurrentAccess(t *testing.T) {
|
||||
pt := newProgressTracker()
|
||||
done := make(chan struct{})
|
||||
|
||||
for range 10 {
|
||||
go func() {
|
||||
pt.completed.Add(1)
|
||||
pt.addError("npm", "test", "1.0.0", "error")
|
||||
_ = pt.snapshot()
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
timeout := time.After(5 * time.Second)
|
||||
for range 10 {
|
||||
select {
|
||||
case <-done:
|
||||
case <-timeout:
|
||||
t.Fatal("timed out waiting for goroutines")
|
||||
}
|
||||
}
|
||||
|
||||
snap := pt.snapshot()
|
||||
if snap.Completed != 10 {
|
||||
t.Errorf("completed = %d, want 10", snap.Completed)
|
||||
}
|
||||
if len(snap.Errors) != 10 {
|
||||
t.Errorf("errors = %d, want 10", len(snap.Errors))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMirrorDefaultWorkers(t *testing.T) {
|
||||
m := New(nil, nil, nil, slog.Default(), 0)
|
||||
if m.workers != 1 {
|
||||
t.Errorf("workers = %d, want 1 (minimum)", m.workers)
|
||||
}
|
||||
|
||||
m = New(nil, nil, nil, slog.Default(), -5)
|
||||
if m.workers != 1 {
|
||||
t.Errorf("workers = %d, want 1 (minimum)", m.workers)
|
||||
}
|
||||
}
|
||||
16
internal/mirror/registry.go
Normal file
16
internal/mirror/registry.go
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// RegistrySource enumerates all packages in a registry for full mirroring.
|
||||
// Registry enumeration is not yet implemented for any ecosystem.
|
||||
type RegistrySource struct {
|
||||
Ecosystem string
|
||||
}
|
||||
|
||||
func (s *RegistrySource) Enumerate(_ context.Context, _ func(PackageVersion) error) error {
|
||||
return fmt.Errorf("registry enumeration is not yet implemented for ecosystem %q", s.Ecosystem)
|
||||
}
|
||||
46
internal/mirror/registry_test.go
Normal file
46
internal/mirror/registry_test.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRegistrySourceUnsupported(t *testing.T) {
|
||||
source := &RegistrySource{Ecosystem: "golang"}
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unsupported ecosystem")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistrySourceNPMNotImplemented(t *testing.T) {
|
||||
source := &RegistrySource{Ecosystem: "npm"}
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected not-implemented error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistrySourcePyPINotImplemented(t *testing.T) {
|
||||
source := &RegistrySource{Ecosystem: "pypi"}
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected not-implemented error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistrySourceCargoNotImplemented(t *testing.T) {
|
||||
source := &RegistrySource{Ecosystem: "cargo"}
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected not-implemented error")
|
||||
}
|
||||
}
|
||||
190
internal/mirror/source.go
Normal file
190
internal/mirror/source.go
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
package mirror
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
cdx "github.com/CycloneDX/cyclonedx-go"
|
||||
"github.com/git-pkgs/purl"
|
||||
"github.com/git-pkgs/registries"
|
||||
_ "github.com/git-pkgs/registries/all"
|
||||
spdxjson "github.com/spdx/tools-golang/json"
|
||||
"github.com/spdx/tools-golang/spdx"
|
||||
spdxtv "github.com/spdx/tools-golang/tagvalue"
|
||||
)
|
||||
|
||||
// PackageVersion identifies a specific package version to mirror.
|
||||
type PackageVersion struct {
|
||||
Ecosystem string
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
|
||||
func (pv PackageVersion) String() string {
|
||||
return fmt.Sprintf("pkg:%s/%s@%s", pv.Ecosystem, pv.Name, pv.Version)
|
||||
}
|
||||
|
||||
// Source produces PackageVersion items for mirroring.
|
||||
type Source interface {
|
||||
Enumerate(ctx context.Context, fn func(PackageVersion) error) error
|
||||
}
|
||||
|
||||
// PURLSource yields packages from PURL strings.
|
||||
// Versioned PURLs produce a single item. Unversioned PURLs look up all versions from the registry.
|
||||
type PURLSource struct {
|
||||
PURLs []string
|
||||
RegClient *registries.Client
|
||||
}
|
||||
|
||||
func (s *PURLSource) Enumerate(ctx context.Context, fn func(PackageVersion) error) error {
|
||||
client := s.RegClient
|
||||
if client == nil {
|
||||
client = registries.DefaultClient()
|
||||
}
|
||||
|
||||
for _, purlStr := range s.PURLs {
|
||||
p, err := purl.Parse(purlStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing PURL %q: %w", purlStr, err)
|
||||
}
|
||||
|
||||
ecosystem := purl.PURLTypeToEcosystem(p.Type)
|
||||
name := p.Name
|
||||
if p.Namespace != "" {
|
||||
name = p.Namespace + "/" + p.Name
|
||||
}
|
||||
|
||||
if p.Version != "" {
|
||||
if err := fn(PackageVersion{Ecosystem: ecosystem, Name: name, Version: p.Version}); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Unversioned: enumerate all versions
|
||||
versions, err := s.fetchVersions(ctx, client, ecosystem, name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching versions for %s/%s: %w", ecosystem, name, err)
|
||||
}
|
||||
for _, v := range versions {
|
||||
if err := fn(PackageVersion{Ecosystem: ecosystem, Name: name, Version: v}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PURLSource) fetchVersions(ctx context.Context, client *registries.Client, ecosystem, name string) ([]string, error) {
|
||||
reg, err := registries.New(purl.EcosystemToPURLType(ecosystem), "", client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
versions, err := reg.FetchVersions(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]string, len(versions))
|
||||
for i, v := range versions {
|
||||
result[i] = v.Number
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SBOMSource extracts package versions from a CycloneDX or SPDX SBOM file.
|
||||
type SBOMSource struct {
|
||||
Path string
|
||||
RegClient *registries.Client
|
||||
}
|
||||
|
||||
func (s *SBOMSource) Enumerate(ctx context.Context, fn func(PackageVersion) error) error {
|
||||
purls, err := s.extractPURLs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading SBOM %s: %w", s.Path, err)
|
||||
}
|
||||
|
||||
inner := &PURLSource{PURLs: purls, RegClient: s.RegClient}
|
||||
return inner.Enumerate(ctx, fn)
|
||||
}
|
||||
|
||||
func (s *SBOMSource) extractPURLs() ([]string, error) {
|
||||
data, err := os.ReadFile(s.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try CycloneDX first
|
||||
if purls, err := extractCycloneDXPURLs(data); err == nil && len(purls) > 0 {
|
||||
return purls, nil
|
||||
}
|
||||
|
||||
// Try SPDX JSON
|
||||
if purls, err := extractSPDXJSONPURLs(data); err == nil && len(purls) > 0 {
|
||||
return purls, nil
|
||||
}
|
||||
|
||||
// Try SPDX tag-value
|
||||
if purls, err := extractSPDXTVPURLs(data); err == nil && len(purls) > 0 {
|
||||
return purls, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not parse SBOM as CycloneDX or SPDX")
|
||||
}
|
||||
|
||||
func extractCycloneDXPURLs(data []byte) ([]string, error) {
|
||||
bom := new(cdx.BOM)
|
||||
if err := json.Unmarshal(data, bom); err != nil {
|
||||
// Try XML
|
||||
decoder := cdx.NewBOMDecoder(bytes.NewReader(data), cdx.BOMFileFormatXML)
|
||||
bom = new(cdx.BOM)
|
||||
if err := decoder.Decode(bom); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if bom.Components == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var purls []string
|
||||
for _, c := range *bom.Components {
|
||||
if c.PackageURL != "" {
|
||||
purls = append(purls, c.PackageURL)
|
||||
}
|
||||
}
|
||||
return purls, nil
|
||||
}
|
||||
|
||||
func extractSPDXJSONPURLs(data []byte) ([]string, error) {
|
||||
doc, err := spdxjson.Read(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractSPDXDocPURLs(doc), nil
|
||||
}
|
||||
|
||||
func extractSPDXTVPURLs(data []byte) ([]string, error) {
|
||||
doc, err := spdxtv.Read(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractSPDXDocPURLs(doc), nil
|
||||
}
|
||||
|
||||
func extractSPDXDocPURLs(doc *spdx.Document) []string {
|
||||
if doc == nil {
|
||||
return nil
|
||||
}
|
||||
var purls []string
|
||||
for _, pkg := range doc.Packages {
|
||||
for _, ref := range pkg.PackageExternalReferences {
|
||||
if ref.RefType == "purl" {
|
||||
purls = append(purls, ref.Locator)
|
||||
}
|
||||
}
|
||||
}
|
||||
return purls
|
||||
}
|
||||
243
internal/mirror/source_test.go
Normal file
243
internal/mirror/source_test.go
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPURLSourceVersioned(t *testing.T) {
|
||||
source := &PURLSource{
|
||||
PURLs: []string{
|
||||
"pkg:npm/lodash@4.17.21",
|
||||
"pkg:cargo/serde@1.0.0",
|
||||
"pkg:pypi/requests@2.31.0",
|
||||
},
|
||||
}
|
||||
|
||||
var items []PackageVersion
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
items = append(items, pv)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Enumerate() error = %v", err)
|
||||
}
|
||||
|
||||
if len(items) != 3 {
|
||||
t.Fatalf("got %d items, want 3", len(items))
|
||||
}
|
||||
|
||||
expected := []PackageVersion{
|
||||
{Ecosystem: "npm", Name: "lodash", Version: "4.17.21"},
|
||||
{Ecosystem: "cargo", Name: "serde", Version: "1.0.0"},
|
||||
{Ecosystem: "pypi", Name: "requests", Version: "2.31.0"},
|
||||
}
|
||||
|
||||
for i, want := range expected {
|
||||
got := items[i]
|
||||
if got.Ecosystem != want.Ecosystem || got.Name != want.Name || got.Version != want.Version {
|
||||
t.Errorf("items[%d] = %v, want %v", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPURLSourceScopedPackage(t *testing.T) {
|
||||
source := &PURLSource{
|
||||
PURLs: []string{"pkg:npm/%40babel/core@7.23.0"},
|
||||
}
|
||||
|
||||
var items []PackageVersion
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
items = append(items, pv)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Enumerate() error = %v", err)
|
||||
}
|
||||
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("got %d items, want 1", len(items))
|
||||
}
|
||||
|
||||
if items[0].Name != "@babel/core" {
|
||||
t.Errorf("name = %q, want %q", items[0].Name, "@babel/core")
|
||||
}
|
||||
if items[0].Version != "7.23.0" {
|
||||
t.Errorf("version = %q, want %q", items[0].Version, "7.23.0")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPURLSourceInvalid(t *testing.T) {
|
||||
source := &PURLSource{
|
||||
PURLs: []string{"not-a-purl"},
|
||||
}
|
||||
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid PURL")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPURLSourceCallbackError(t *testing.T) {
|
||||
source := &PURLSource{
|
||||
PURLs: []string{"pkg:npm/lodash@4.17.21"},
|
||||
}
|
||||
|
||||
wantErr := context.Canceled
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return wantErr
|
||||
})
|
||||
if err != wantErr {
|
||||
t.Fatalf("got error %v, want %v", err, wantErr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackageVersionString(t *testing.T) {
|
||||
pv := PackageVersion{Ecosystem: "npm", Name: "lodash", Version: "4.17.21"}
|
||||
got := pv.String()
|
||||
want := "pkg:npm/lodash@4.17.21"
|
||||
if got != want {
|
||||
t.Errorf("String() = %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSBOMSourceCycloneDXJSON(t *testing.T) {
|
||||
bom := map[string]any{
|
||||
"bomFormat": "CycloneDX",
|
||||
"specVersion": "1.4",
|
||||
"components": []map[string]any{
|
||||
{"type": "library", "name": "lodash", "version": "4.17.21", "purl": "pkg:npm/lodash@4.17.21"},
|
||||
{"type": "library", "name": "serde", "version": "1.0.0", "purl": "pkg:cargo/serde@1.0.0"},
|
||||
},
|
||||
}
|
||||
|
||||
path := writeTempJSON(t, bom)
|
||||
source := &SBOMSource{Path: path}
|
||||
|
||||
var items []PackageVersion
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
items = append(items, pv)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Enumerate() error = %v", err)
|
||||
}
|
||||
|
||||
if len(items) != 2 {
|
||||
t.Fatalf("got %d items, want 2", len(items))
|
||||
}
|
||||
|
||||
if items[0].Ecosystem != "npm" || items[0].Name != "lodash" || items[0].Version != "4.17.21" {
|
||||
t.Errorf("items[0] = %v", items[0])
|
||||
}
|
||||
if items[1].Ecosystem != "cargo" || items[1].Name != "serde" || items[1].Version != "1.0.0" {
|
||||
t.Errorf("items[1] = %v", items[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSBOMSourceSPDXJSON(t *testing.T) {
|
||||
doc := map[string]any{
|
||||
"spdxVersion": "SPDX-2.3",
|
||||
"dataLicense": "CC0-1.0",
|
||||
"SPDXID": "SPDXRef-DOCUMENT",
|
||||
"name": "test",
|
||||
"documentNamespace": "https://example.com/test",
|
||||
"packages": []map[string]any{
|
||||
{
|
||||
"SPDXID": "SPDXRef-Package",
|
||||
"name": "lodash",
|
||||
"version": "4.17.21",
|
||||
"downloadLocation": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
|
||||
"externalRefs": []map[string]any{
|
||||
{
|
||||
"referenceCategory": "PACKAGE-MANAGER",
|
||||
"referenceType": "purl",
|
||||
"referenceLocator": "pkg:npm/lodash@4.17.21",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
path := writeTempJSON(t, doc)
|
||||
source := &SBOMSource{Path: path}
|
||||
|
||||
var items []PackageVersion
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
items = append(items, pv)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Enumerate() error = %v", err)
|
||||
}
|
||||
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("got %d items, want 1", len(items))
|
||||
}
|
||||
|
||||
if items[0].Name != "lodash" || items[0].Version != "4.17.21" {
|
||||
t.Errorf("items[0] = %v", items[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSBOMSourceNonexistentFile(t *testing.T) {
|
||||
source := &SBOMSource{Path: "/nonexistent/sbom.json"}
|
||||
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSBOMSourceInvalidFormat(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "invalid.txt")
|
||||
if err := os.WriteFile(path, []byte("this is not an SBOM"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
source := &SBOMSource{Path: path}
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid SBOM")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSBOMSourceEmptyCycloneDX(t *testing.T) {
|
||||
bom := map[string]any{
|
||||
"bomFormat": "CycloneDX",
|
||||
"specVersion": "1.4",
|
||||
}
|
||||
path := writeTempJSON(t, bom)
|
||||
|
||||
// This should fall through to SPDX parsing, which will also fail,
|
||||
// resulting in an error about not being able to parse
|
||||
source := &SBOMSource{Path: path}
|
||||
err := source.Enumerate(context.Background(), func(pv PackageVersion) error {
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for empty SBOM")
|
||||
}
|
||||
}
|
||||
|
||||
func writeTempJSON(t *testing.T, v any) string {
|
||||
t.Helper()
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
path := filepath.Join(t.TempDir(), "sbom.json")
|
||||
if err := os.WriteFile(path, data, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
|
@ -135,36 +135,59 @@ type BulkResponse struct {
|
|||
Packages map[string]*PackageResponse `json:"packages"`
|
||||
}
|
||||
|
||||
// HandleGetPackage handles GET /api/package/{ecosystem}/{name}
|
||||
// @Summary Get package metadata
|
||||
// @Description Returns enriched package metadata. URL-encode scoped names (e.g. @scope/name -> %40scope%2Fname).
|
||||
// @Tags api
|
||||
// @Produce json
|
||||
// @Param ecosystem path string true "Ecosystem"
|
||||
// @Param name path string true "Package name"
|
||||
// @Success 200 {object} PackageResponse
|
||||
// @Failure 400 {string} string
|
||||
// @Failure 404 {string} string
|
||||
// @Failure 500 {string} string
|
||||
// @Router /api/package/{ecosystem}/{name} [get]
|
||||
func (h *APIHandler) HandleGetPackage(w http.ResponseWriter, r *http.Request) {
|
||||
// HandlePackagePath dispatches /api/package/{ecosystem}/* to the appropriate handler.
|
||||
// Resolves namespaced package names (Composer vendor/name, npm @scope/name) from the path.
|
||||
func (h *APIHandler) HandlePackagePath(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
wildcard := chi.URLParam(r, "*")
|
||||
segments := splitWildcardPath(wildcard)
|
||||
|
||||
if ecosystem == "" || name == "" {
|
||||
if ecosystem == "" || len(segments) == 0 {
|
||||
http.Error(w, "ecosystem and name are required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle scoped npm packages (e.g., @scope/name)
|
||||
if strings.HasPrefix(name, "@") {
|
||||
// The path is split, so we need to get the rest
|
||||
rest := chi.URLParam(r, "rest")
|
||||
if rest != "" {
|
||||
name = name + "/" + rest
|
||||
}
|
||||
// For the API, we don't have a DB to resolve names, so we use a heuristic:
|
||||
// the last segment that looks like a version (contains a digit) is the version,
|
||||
// everything before it is the name. If no version-like segment, it's all name.
|
||||
//
|
||||
// With 1 segment: package lookup (name only)
|
||||
// With 2+ segments: last segment is version, rest is name
|
||||
// Exception: if this is a namespaced ecosystem and we have exactly 2 segments,
|
||||
// it could be vendor/name with no version. The enrichment service handles
|
||||
// both cases (it will try to look up the package either way).
|
||||
if len(segments) == 1 {
|
||||
h.getPackage(w, r, ecosystem, segments[0])
|
||||
return
|
||||
}
|
||||
|
||||
// Try the full path as a package name first via enrichment.
|
||||
// If it resolves, this is a package-only lookup.
|
||||
fullName := strings.Join(segments, "/")
|
||||
info, err := h.enrichment.EnrichPackage(r.Context(), ecosystem, fullName)
|
||||
if err == nil && info != nil {
|
||||
resp := &PackageResponse{
|
||||
Ecosystem: info.Ecosystem,
|
||||
Name: info.Name,
|
||||
LatestVersion: info.LatestVersion,
|
||||
License: info.License,
|
||||
LicenseCategory: string(h.enrichment.CategorizeLicense(info.License)),
|
||||
Description: info.Description,
|
||||
Homepage: info.Homepage,
|
||||
Repository: info.Repository,
|
||||
RegistryURL: info.RegistryURL,
|
||||
}
|
||||
writeJSON(w, resp)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, last segment is the version.
|
||||
name := strings.Join(segments[:len(segments)-1], "/")
|
||||
version := segments[len(segments)-1]
|
||||
h.getVersion(w, r, ecosystem, name, version)
|
||||
}
|
||||
|
||||
func (h *APIHandler) getPackage(w http.ResponseWriter, r *http.Request, ecosystem, name string) {
|
||||
info, err := h.enrichment.EnrichPackage(r.Context(), ecosystem, name)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to enrich package", http.StatusInternalServerError)
|
||||
|
|
@ -191,28 +214,7 @@ func (h *APIHandler) HandleGetPackage(w http.ResponseWriter, r *http.Request) {
|
|||
writeJSON(w, resp)
|
||||
}
|
||||
|
||||
// HandleGetVersion handles GET /api/package/{ecosystem}/{name}/{version}
|
||||
// @Summary Get version metadata and vulnerabilities
|
||||
// @Description Returns enriched package+version metadata and vulnerability data.
|
||||
// @Tags api
|
||||
// @Produce json
|
||||
// @Param ecosystem path string true "Ecosystem"
|
||||
// @Param name path string true "Package name"
|
||||
// @Param version path string true "Version"
|
||||
// @Success 200 {object} EnrichmentResponse
|
||||
// @Failure 400 {string} string
|
||||
// @Failure 500 {string} string
|
||||
// @Router /api/package/{ecosystem}/{name}/{version} [get]
|
||||
func (h *APIHandler) HandleGetVersion(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
version := chi.URLParam(r, "version")
|
||||
|
||||
if ecosystem == "" || name == "" || version == "" {
|
||||
http.Error(w, "ecosystem, name, and version are required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
func (h *APIHandler) getVersion(w http.ResponseWriter, r *http.Request, ecosystem, name, version string) {
|
||||
result, err := h.enrichment.EnrichFull(r.Context(), ecosystem, name, version)
|
||||
if err != nil {
|
||||
http.Error(w, "failed to enrich version", http.StatusInternalServerError)
|
||||
|
|
@ -267,32 +269,31 @@ func (h *APIHandler) HandleGetVersion(w http.ResponseWriter, r *http.Request) {
|
|||
writeJSON(w, resp)
|
||||
}
|
||||
|
||||
// HandleGetVulns handles GET /api/vulns/{ecosystem}/{name}
|
||||
// @Summary Get vulnerabilities for a package or version
|
||||
// @Description Returns vulnerabilities for a package across versions, or for a specific version if provided.
|
||||
// @Tags api
|
||||
// @Produce json
|
||||
// @Param ecosystem path string true "Ecosystem"
|
||||
// @Param name path string true "Package name"
|
||||
// @Param version path string false "Version"
|
||||
// @Success 200 {object} VulnsResponse
|
||||
// @Failure 400 {string} string
|
||||
// @Failure 500 {string} string
|
||||
// @Router /api/vulns/{ecosystem}/{name} [get]
|
||||
// @Router /api/vulns/{ecosystem}/{name}/{version} [get]
|
||||
func (h *APIHandler) HandleGetVulns(w http.ResponseWriter, r *http.Request) {
|
||||
// HandleVulnsPath dispatches /api/vulns/{ecosystem}/* to the vulns handler.
|
||||
// Supports both {name} and {name}/{version} paths with namespaced package names.
|
||||
func (h *APIHandler) HandleVulnsPath(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
version := chi.URLParam(r, "version")
|
||||
wildcard := chi.URLParam(r, "*")
|
||||
segments := splitWildcardPath(wildcard)
|
||||
|
||||
if ecosystem == "" || name == "" {
|
||||
if ecosystem == "" || len(segments) == 0 {
|
||||
http.Error(w, "ecosystem and name are required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// If no version specified, use "0" to get all vulnerabilities
|
||||
if version == "" {
|
||||
version = "0"
|
||||
// Last segment could be a version. Try full path as name first,
|
||||
// then split off the last segment as version.
|
||||
name := strings.Join(segments, "/")
|
||||
version := "0"
|
||||
|
||||
if len(segments) > 1 {
|
||||
// Try enrichment with the full path as name.
|
||||
// If it doesn't resolve, assume last segment is version.
|
||||
info, err := h.enrichment.EnrichPackage(r.Context(), ecosystem, name)
|
||||
if err != nil || info == nil {
|
||||
name = strings.Join(segments[:len(segments)-1], "/")
|
||||
version = segments[len(segments)-1]
|
||||
}
|
||||
}
|
||||
|
||||
vulns, err := h.enrichment.CheckVulnerabilities(r.Context(), ecosystem, name, version)
|
||||
|
|
@ -584,11 +585,11 @@ func (h *APIHandler) HandlePackagesList(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
validSorts := map[string]bool{
|
||||
defaultSortBy: true,
|
||||
"name": true,
|
||||
"size": true,
|
||||
"cached_at": true,
|
||||
"ecosystem": true,
|
||||
"vulns": true,
|
||||
"name": true,
|
||||
"size": true,
|
||||
"cached_at": true,
|
||||
"ecosystem": true,
|
||||
"vulns": true,
|
||||
}
|
||||
if !validSorts[sortBy] {
|
||||
http.Error(w, "invalid sort parameter", http.StatusBadRequest)
|
||||
|
|
|
|||
|
|
@ -31,55 +31,37 @@ func TestNewAPIHandler(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHandleGetPackage_MissingParams(t *testing.T) {
|
||||
func TestHandlePackagePath_MissingParams(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
svc := enrichment.New(logger)
|
||||
h := NewAPIHandler(svc, nil)
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/package/{ecosystem}/*", h.HandlePackagePath)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/package//", nil)
|
||||
req.SetPathValue("ecosystem", "")
|
||||
req.SetPathValue("name", "")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.HandleGetPackage(w, req)
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code)
|
||||
if w.Code != http.StatusBadRequest && w.Code != http.StatusNotFound {
|
||||
t.Errorf("expected status 400 or 404, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetVersion_MissingParams(t *testing.T) {
|
||||
func TestHandleVulnsPath_MissingParams(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
svc := enrichment.New(logger)
|
||||
h := NewAPIHandler(svc, nil)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/package///", nil)
|
||||
req.SetPathValue("ecosystem", "")
|
||||
req.SetPathValue("name", "")
|
||||
req.SetPathValue("version", "")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.HandleGetVersion(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetVulns_MissingParams(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
svc := enrichment.New(logger)
|
||||
h := NewAPIHandler(svc, nil)
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/vulns/{ecosystem}/*", h.HandleVulnsPath)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/vulns//", nil)
|
||||
req.SetPathValue("ecosystem", "")
|
||||
req.SetPathValue("name", "")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.HandleGetVulns(w, req)
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code)
|
||||
if w.Code != http.StatusBadRequest && w.Code != http.StatusNotFound {
|
||||
t.Errorf("expected status 400 or 404, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -17,15 +18,72 @@ import (
|
|||
|
||||
const contentTypePlainText = "text/plain; charset=utf-8"
|
||||
|
||||
// getStripPrefix returns the path prefix to strip for a given ecosystem.
|
||||
// npm packages wrap content in a "package/" directory.
|
||||
func getStripPrefix(ecosystem string) string {
|
||||
switch ecosystem {
|
||||
case "npm":
|
||||
return "package/"
|
||||
default:
|
||||
// archiveFilename returns a filename suitable for archive format detection.
|
||||
// Some ecosystems (e.g. composer) store artifacts with bare hash filenames
|
||||
// that have no extension. This adds .zip when the original has no extension
|
||||
// and the content is likely a zip archive.
|
||||
func archiveFilename(filename string) string {
|
||||
if path.Ext(filename) == "" {
|
||||
return filename + ".zip"
|
||||
}
|
||||
return filename
|
||||
}
|
||||
|
||||
// detectSingleRootDir returns the single top-level directory name if all files
|
||||
// in the archive live under one common directory (e.g. GitHub zipballs use
|
||||
// "repo-hash/"). Returns "" if there's no single root or the archive is flat.
|
||||
func detectSingleRootDir(reader archives.Reader) string {
|
||||
files, err := reader.List()
|
||||
if err != nil || len(files) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var root string
|
||||
for _, f := range files {
|
||||
parts := strings.SplitN(f.Path, "/", 2) //nolint:mnd // split into dir + rest
|
||||
if len(parts) == 0 {
|
||||
continue
|
||||
}
|
||||
dir := parts[0]
|
||||
if root == "" {
|
||||
root = dir
|
||||
} else if dir != root {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
return ""
|
||||
}
|
||||
return root + "/"
|
||||
}
|
||||
|
||||
// openArchive opens a cached artifact as an archive reader, auto-detecting
|
||||
// and stripping a single top-level directory prefix (like GitHub zipballs).
|
||||
// For npm, the hardcoded "package/" prefix takes precedence.
|
||||
func openArchive(filename string, content io.Reader, ecosystem string) (archives.Reader, error) { //nolint:ireturn // wraps multiple archive implementations
|
||||
fname := archiveFilename(filename)
|
||||
|
||||
// npm always uses package/ prefix
|
||||
if ecosystem == "npm" {
|
||||
return archives.OpenWithPrefix(fname, content, "package/")
|
||||
}
|
||||
|
||||
// Read content into memory so we can scan then wrap with prefix
|
||||
data, err := io.ReadAll(content)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading artifact: %w", err)
|
||||
}
|
||||
|
||||
// Open once to detect root prefix
|
||||
probe, err := archives.Open(fname, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefix := detectSingleRootDir(probe)
|
||||
_ = probe.Close()
|
||||
|
||||
return archives.OpenWithPrefix(fname, bytes.NewReader(data), prefix)
|
||||
}
|
||||
|
||||
// BrowseListResponse contains the file listing for a directory in an archives.
|
||||
|
|
@ -57,10 +115,85 @@ type BrowseFileInfo struct {
|
|||
// @Failure 404 {string} string
|
||||
// @Failure 500 {string} string
|
||||
// @Router /api/browse/{ecosystem}/{name}/{version} [get]
|
||||
func (s *Server) handleBrowseList(w http.ResponseWriter, r *http.Request) {
|
||||
// handleBrowsePath dispatches /api/browse/{ecosystem}/* to the appropriate browse handler.
|
||||
// It resolves namespaced package names by consulting the database.
|
||||
//
|
||||
// Supported paths:
|
||||
//
|
||||
// {name}/{version} -> browse list
|
||||
// {name}/{version}/file/{path} -> browse file
|
||||
func (s *Server) handleBrowsePath(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
version := chi.URLParam(r, "version")
|
||||
wildcard := chi.URLParam(r, "*")
|
||||
segments := splitWildcardPath(wildcard)
|
||||
|
||||
if ecosystem == "" || len(segments) < 2 {
|
||||
http.Error(w, "ecosystem, name, and version required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for /file/ in the path for browse file requests.
|
||||
fileIdx := -1
|
||||
for i, seg := range segments {
|
||||
if seg == "file" && i > 0 {
|
||||
fileIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fileIdx >= 0 {
|
||||
// Everything before "file" is name+version, everything after is the file path.
|
||||
nameVersionSegments := segments[:fileIdx]
|
||||
filePath := strings.Join(segments[fileIdx+1:], "/")
|
||||
|
||||
name, rest := resolvePackageName(s.db, ecosystem, nameVersionSegments)
|
||||
if name == "" && len(nameVersionSegments) >= 2 {
|
||||
name = strings.Join(nameVersionSegments[:len(nameVersionSegments)-1], "/")
|
||||
rest = nameVersionSegments[len(nameVersionSegments)-1:]
|
||||
}
|
||||
if len(rest) != 1 {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
s.browseFile(w, r, ecosystem, name, rest[0], filePath)
|
||||
return
|
||||
}
|
||||
|
||||
// No /file/ segment: this is a browse list.
|
||||
name, rest := resolvePackageName(s.db, ecosystem, segments)
|
||||
if name == "" && len(segments) >= 2 {
|
||||
name = strings.Join(segments[:len(segments)-1], "/")
|
||||
rest = segments[len(segments)-1:]
|
||||
}
|
||||
if len(rest) != 1 {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
s.browseList(w, r, ecosystem, name, rest[0])
|
||||
}
|
||||
|
||||
// handleComparePath dispatches /api/compare/{ecosystem}/* to the compare handler.
|
||||
// Supported paths: {name}/{fromVersion}/{toVersion}
|
||||
func (s *Server) handleComparePath(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
wildcard := chi.URLParam(r, "*")
|
||||
segments := splitWildcardPath(wildcard)
|
||||
|
||||
if ecosystem == "" || len(segments) < 3 {
|
||||
http.Error(w, "ecosystem, name, fromVersion, and toVersion required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// The last two segments are fromVersion and toVersion.
|
||||
// Everything before that is the package name.
|
||||
name := strings.Join(segments[:len(segments)-2], "/")
|
||||
fromVersion := segments[len(segments)-2]
|
||||
toVersion := segments[len(segments)-1]
|
||||
|
||||
s.compareDiff(w, r, ecosystem, name, fromVersion, toVersion)
|
||||
}
|
||||
|
||||
func (s *Server) browseList(w http.ResponseWriter, r *http.Request, ecosystem, name, version string) {
|
||||
dirPath := r.URL.Query().Get("path")
|
||||
|
||||
// Get the artifact for this version
|
||||
|
|
@ -99,9 +232,8 @@ func (s *Server) handleBrowseList(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
defer func() { _ = artifactReader.Close() }()
|
||||
|
||||
// Open archive with appropriate prefix stripping
|
||||
stripPrefix := getStripPrefix(ecosystem)
|
||||
archiveReader, err := archives.OpenWithPrefix(cachedArtifact.Filename, artifactReader, stripPrefix)
|
||||
// Open archive with auto-detected prefix stripping
|
||||
archiveReader, err := openArchive(cachedArtifact.Filename, artifactReader, ecosystem)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to open archive", "error", err, "filename", cachedArtifact.Filename)
|
||||
http.Error(w, "failed to open archive", http.StatusInternalServerError)
|
||||
|
|
@ -152,13 +284,7 @@ func (s *Server) handleBrowseList(w http.ResponseWriter, r *http.Request) {
|
|||
// @Failure 404 {string} string
|
||||
// @Failure 500 {string} string
|
||||
// @Router /api/browse/{ecosystem}/{name}/{version}/file/{filepath} [get]
|
||||
func (s *Server) handleBrowseFile(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
version := chi.URLParam(r, "version")
|
||||
|
||||
// Get the wildcard path
|
||||
filePath := chi.URLParam(r, "*")
|
||||
func (s *Server) browseFile(w http.ResponseWriter, r *http.Request, ecosystem, name, version, filePath string) {
|
||||
if filePath == "" {
|
||||
http.Error(w, "file path required", http.StatusBadRequest)
|
||||
return
|
||||
|
|
@ -200,9 +326,8 @@ func (s *Server) handleBrowseFile(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
defer func() { _ = artifactReader.Close() }()
|
||||
|
||||
// Open archive with appropriate prefix stripping
|
||||
stripPrefix := getStripPrefix(ecosystem)
|
||||
archiveReader, err := archives.OpenWithPrefix(cachedArtifact.Filename, artifactReader, stripPrefix)
|
||||
// Open archive with auto-detected prefix stripping
|
||||
archiveReader, err := openArchive(cachedArtifact.Filename, artifactReader, ecosystem)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to open archive", "error", err, "filename", cachedArtifact.Filename)
|
||||
http.Error(w, "failed to open archive", http.StatusInternalServerError)
|
||||
|
|
@ -345,24 +470,7 @@ type BrowseSourceData struct {
|
|||
Version string
|
||||
}
|
||||
|
||||
// handleBrowseSource renders the source code browser UI.
|
||||
// GET /package/{ecosystem}/{name}/{version}/browse
|
||||
func (s *Server) handleBrowseSource(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
version := chi.URLParam(r, "version")
|
||||
|
||||
data := BrowseSourceData{
|
||||
Ecosystem: ecosystem,
|
||||
PackageName: name,
|
||||
Version: version,
|
||||
}
|
||||
|
||||
if err := s.templates.Render(w, "browse_source", data); err != nil {
|
||||
s.logger.Error("failed to render browse source page", "error", err)
|
||||
http.Error(w, "internal server error", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
// handleBrowseSource is now showBrowseSource in server.go, dispatched via handlePackagePath.
|
||||
|
||||
// handleCompareDiff compares two versions and returns a diff.
|
||||
// GET /api/compare/{ecosystem}/{name}/{fromVersion}/{toVersion}
|
||||
|
|
@ -378,12 +486,7 @@ func (s *Server) handleBrowseSource(w http.ResponseWriter, r *http.Request) {
|
|||
// @Failure 404 {string} string
|
||||
// @Failure 500 {string} string
|
||||
// @Router /api/compare/{ecosystem}/{name}/{fromVersion}/{toVersion} [get]
|
||||
func (s *Server) handleCompareDiff(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
fromVersion := chi.URLParam(r, "fromVersion")
|
||||
toVersion := chi.URLParam(r, "toVersion")
|
||||
|
||||
func (s *Server) compareDiff(w http.ResponseWriter, r *http.Request, ecosystem, name, fromVersion, toVersion string) {
|
||||
// Get artifacts for both versions
|
||||
fromPURL := purl.MakePURLString(ecosystem, name, fromVersion)
|
||||
toPURL := purl.MakePURLString(ecosystem, name, toVersion)
|
||||
|
|
@ -437,9 +540,7 @@ func (s *Server) handleCompareDiff(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
defer func() { _ = toReader.Close() }()
|
||||
|
||||
stripPrefix := getStripPrefix(ecosystem)
|
||||
|
||||
fromArchive, err := archives.OpenWithPrefix(fromArtifact.Filename, fromReader, stripPrefix)
|
||||
fromArchive, err := openArchive(fromArtifact.Filename, fromReader, ecosystem)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to open from archive", "error", err)
|
||||
http.Error(w, "failed to open from archive", http.StatusInternalServerError)
|
||||
|
|
@ -447,7 +548,7 @@ func (s *Server) handleCompareDiff(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
defer func() { _ = fromArchive.Close() }()
|
||||
|
||||
toArchive, err := archives.OpenWithPrefix(toArtifact.Filename, toReader, stripPrefix)
|
||||
toArchive, err := openArchive(toArtifact.Filename, toReader, ecosystem)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to open to archive", "error", err)
|
||||
http.Error(w, "failed to open to archive", http.StatusInternalServerError)
|
||||
|
|
@ -475,34 +576,4 @@ type ComparePageData struct {
|
|||
ToVersion string
|
||||
}
|
||||
|
||||
// handleComparePage renders the version comparison UI.
|
||||
// GET /package/{ecosystem}/{name}/compare/{versions}
|
||||
// where {versions} is in format "fromVersion...toVersion"
|
||||
func (s *Server) handleComparePage(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
versions := chi.URLParam(r, "versions")
|
||||
|
||||
// Parse versions (format: "1.0.0...2.0.0")
|
||||
const compareVersionParts = 2
|
||||
parts := strings.Split(versions, "...")
|
||||
if len(parts) != compareVersionParts {
|
||||
http.Error(w, "invalid version format, use: version1...version2", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
fromVersion := parts[0]
|
||||
toVersion := parts[1]
|
||||
|
||||
data := ComparePageData{
|
||||
Ecosystem: ecosystem,
|
||||
PackageName: name,
|
||||
FromVersion: fromVersion,
|
||||
ToVersion: toVersion,
|
||||
}
|
||||
|
||||
if err := s.templates.Render(w, "compare_versions", data); err != nil {
|
||||
s.logger.Error("failed to render compare page", "error", err)
|
||||
http.Error(w, "internal server error", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
// handleComparePage is now showComparePage in server.go, dispatched via handlePackagePath.
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package server
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"database/sql"
|
||||
|
|
@ -168,8 +169,8 @@ func TestHandleBrowseFile(t *testing.T) {
|
|||
|
||||
func TestDetectContentType(t *testing.T) {
|
||||
tests := []struct {
|
||||
filename string
|
||||
expectedCT string
|
||||
filename string
|
||||
expectedCT string
|
||||
}{
|
||||
{"file.txt", contentTypePlainText},
|
||||
{"file.md", contentTypePlainText},
|
||||
|
|
@ -590,3 +591,195 @@ func TestHandleComparePage(t *testing.T) {
|
|||
t.Errorf("expected status 400 for invalid separator, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchiveFilename(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{"package.tar.gz", "package.tar.gz"},
|
||||
{"d2e2f014ccd6ec9fae8dbe6336a4164346a2a856", "d2e2f014ccd6ec9fae8dbe6336a4164346a2a856.zip"},
|
||||
{"file.zip", "file.zip"},
|
||||
{"archive.tgz", "archive.tgz"},
|
||||
{"noext", "noext.zip"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
got := archiveFilename(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("archiveFilename(%q) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenArchiveStripsSingleRootDir(t *testing.T) {
|
||||
data := createZipArchive(t, map[string]string{
|
||||
"repo-abc123/README.md": "hello",
|
||||
"repo-abc123/src/main.go": "package main",
|
||||
"repo-abc123/go.mod": "module test",
|
||||
})
|
||||
reader, err := openArchive("test.zip", bytes.NewReader(data), "composer")
|
||||
if err != nil {
|
||||
t.Fatalf("openArchive failed: %v", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
files, err := reader.List()
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
for _, f := range files {
|
||||
if strings.HasPrefix(f.Path, "repo-abc123/") {
|
||||
t.Errorf("file %q still has root prefix after stripping", f.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenArchiveMultipleRootDirs(t *testing.T) {
|
||||
data := createZipArchive(t, map[string]string{
|
||||
"src/main.go": "package main",
|
||||
"docs/README.md": "hello",
|
||||
})
|
||||
reader, err := openArchive("test.zip", bytes.NewReader(data), "composer")
|
||||
if err != nil {
|
||||
t.Fatalf("openArchive failed: %v", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
files, err := reader.List()
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
paths := make(map[string]bool)
|
||||
for _, f := range files {
|
||||
paths[f.Path] = true
|
||||
}
|
||||
if !paths["src/main.go"] {
|
||||
t.Error("expected src/main.go to remain unchanged")
|
||||
}
|
||||
if !paths["docs/README.md"] {
|
||||
t.Error("expected docs/README.md to remain unchanged")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenArchiveFlatNoSubdirs(t *testing.T) {
|
||||
data := createZipArchive(t, map[string]string{
|
||||
"README.md": "hello",
|
||||
"main.go": "package main",
|
||||
})
|
||||
reader, err := openArchive("test.zip", bytes.NewReader(data), "composer")
|
||||
if err != nil {
|
||||
t.Fatalf("openArchive failed: %v", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
files, err := reader.List()
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
paths := make(map[string]bool)
|
||||
for _, f := range files {
|
||||
paths[f.Path] = true
|
||||
}
|
||||
if !paths["README.md"] {
|
||||
t.Error("expected README.md at root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenArchiveNpmUsesPackagePrefix(t *testing.T) {
|
||||
data := createTarGzArchive(t, map[string]string{
|
||||
"package/README.md": "hello",
|
||||
"package/index.js": "module.exports = {}",
|
||||
})
|
||||
reader, err := openArchive("pkg.tgz", bytes.NewReader(data), "npm")
|
||||
if err != nil {
|
||||
t.Fatalf("openArchive failed: %v", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
files, err := reader.List()
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
for _, f := range files {
|
||||
if strings.HasPrefix(f.Path, "package/") {
|
||||
t.Errorf("file %q still has package/ prefix", f.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenArchiveExtensionlessFilename(t *testing.T) {
|
||||
data := createZipArchive(t, map[string]string{
|
||||
"repo-hash/README.md": "hello",
|
||||
})
|
||||
reader, err := openArchive("d2e2f014ccd6ec9fae8dbe6336a4164346a2a856", bytes.NewReader(data), "composer")
|
||||
if err != nil {
|
||||
t.Fatalf("openArchive failed: %v", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
files, err := reader.List()
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
if len(files) == 0 {
|
||||
t.Fatal("expected files in archive")
|
||||
}
|
||||
for _, f := range files {
|
||||
if strings.HasPrefix(f.Path, "repo-hash/") {
|
||||
t.Errorf("file %q still has root prefix", f.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createZipArchive(t *testing.T, files map[string]string) []byte {
|
||||
t.Helper()
|
||||
buf := new(bytes.Buffer)
|
||||
w := zip.NewWriter(buf)
|
||||
|
||||
for name, content := range files {
|
||||
f, err := w.Create(name)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create zip entry: %v", err)
|
||||
}
|
||||
if _, err := f.Write([]byte(content)); err != nil {
|
||||
t.Fatalf("failed to write zip content: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatalf("failed to close zip writer: %v", err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func createTarGzArchive(t *testing.T, files map[string]string) []byte {
|
||||
t.Helper()
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
tw := tar.NewWriter(gw)
|
||||
|
||||
for name, content := range files {
|
||||
header := &tar.Header{
|
||||
Name: name,
|
||||
Size: int64(len(content)),
|
||||
Mode: 0644,
|
||||
}
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
t.Fatalf("failed to write tar header: %v", err)
|
||||
}
|
||||
if _, err := tw.Write([]byte(content)); err != nil {
|
||||
t.Fatalf("failed to write tar content: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatalf("failed to close tar writer: %v", err)
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
t.Fatalf("failed to close gzip writer: %v", err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -114,22 +114,25 @@ type PackagesListPageData struct {
|
|||
}
|
||||
|
||||
func supportedEcosystems() []string {
|
||||
// this list should be kept sorted in lexicographic order so
|
||||
// that the 'select' list in the UI will be in the expected
|
||||
// order
|
||||
return []string{
|
||||
"npm",
|
||||
"cargo",
|
||||
"gem",
|
||||
"go",
|
||||
"hex",
|
||||
"pub",
|
||||
"pypi",
|
||||
"maven",
|
||||
"nuget",
|
||||
"composer",
|
||||
"conan",
|
||||
"conda",
|
||||
"cran",
|
||||
"oci",
|
||||
"deb",
|
||||
"gem",
|
||||
"golang",
|
||||
"hex",
|
||||
"maven",
|
||||
"npm",
|
||||
"nuget",
|
||||
"oci",
|
||||
"pub",
|
||||
"pypi",
|
||||
"rpm",
|
||||
}
|
||||
}
|
||||
|
|
|
|||
70
internal/server/mirror_api.go
Normal file
70
internal/server/mirror_api.go
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/mirror"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// MirrorAPIHandler handles mirror API requests.
|
||||
type MirrorAPIHandler struct {
|
||||
jobs *mirror.JobStore
|
||||
}
|
||||
|
||||
// NewMirrorAPIHandler creates a new mirror API handler.
|
||||
func NewMirrorAPIHandler(jobs *mirror.JobStore) *MirrorAPIHandler {
|
||||
return &MirrorAPIHandler{jobs: jobs}
|
||||
}
|
||||
|
||||
// HandleCreate starts a new mirror job.
|
||||
func (h *MirrorAPIHandler) HandleCreate(w http.ResponseWriter, r *http.Request) {
|
||||
var req mirror.JobRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
writeJSON(w, map[string]string{"error": "invalid request body"})
|
||||
return
|
||||
}
|
||||
|
||||
id, err := h.jobs.Create(req)
|
||||
if err != nil {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
writeJSON(w, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
writeJSON(w, map[string]string{"id": id})
|
||||
}
|
||||
|
||||
// HandleGet returns the status of a mirror job.
|
||||
func (h *MirrorAPIHandler) HandleGet(w http.ResponseWriter, r *http.Request) {
|
||||
id := chi.URLParam(r, "id")
|
||||
job := h.jobs.Get(id)
|
||||
if job == nil {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
writeJSON(w, map[string]string{"error": "job not found"})
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
writeJSON(w, job)
|
||||
}
|
||||
|
||||
// HandleCancel cancels a running mirror job.
|
||||
func (h *MirrorAPIHandler) HandleCancel(w http.ResponseWriter, r *http.Request) {
|
||||
id := chi.URLParam(r, "id")
|
||||
if h.jobs.Cancel(id) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
writeJSON(w, map[string]string{"status": "canceled"})
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
writeJSON(w, map[string]string{"error": "job not found or not running"})
|
||||
}
|
||||
}
|
||||
163
internal/server/mirror_api_test.go
Normal file
163
internal/server/mirror_api_test.go
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/database"
|
||||
"github.com/git-pkgs/proxy/internal/handler"
|
||||
"github.com/git-pkgs/proxy/internal/mirror"
|
||||
"github.com/git-pkgs/proxy/internal/storage"
|
||||
"github.com/git-pkgs/registries/fetch"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
func setupMirrorAPI(t *testing.T) *MirrorAPIHandler {
|
||||
t.Helper()
|
||||
|
||||
dbPath := t.TempDir() + "/test.db"
|
||||
db, err := database.Create(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("creating database: %v", err)
|
||||
}
|
||||
if err := db.MigrateSchema(); err != nil {
|
||||
t.Fatalf("migrating schema: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = db.Close() })
|
||||
|
||||
storeDir := t.TempDir()
|
||||
store, err := storage.OpenBucket(context.Background(), "file://"+storeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("opening storage: %v", err)
|
||||
}
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
|
||||
fetcher := fetch.NewFetcher()
|
||||
resolver := fetch.NewResolver()
|
||||
proxy := handler.NewProxy(db, store, fetcher, resolver, logger)
|
||||
|
||||
m := mirror.New(proxy, db, store, logger, 1)
|
||||
js := mirror.NewJobStore(context.Background(), m)
|
||||
return NewMirrorAPIHandler(js)
|
||||
}
|
||||
|
||||
func TestMirrorAPICreateJob(t *testing.T) {
|
||||
h := setupMirrorAPI(t)
|
||||
|
||||
body, _ := json.Marshal(mirror.JobRequest{
|
||||
PURLs: []string{"pkg:npm/lodash@4.17.21"},
|
||||
})
|
||||
|
||||
req := httptest.NewRequest("POST", "/api/mirror", bytes.NewReader(body))
|
||||
w := httptest.NewRecorder()
|
||||
h.HandleCreate(w, req)
|
||||
|
||||
if w.Code != http.StatusAccepted {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusAccepted)
|
||||
}
|
||||
|
||||
var resp map[string]string
|
||||
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if resp["id"] == "" {
|
||||
t.Error("expected non-empty job ID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorAPICreateInvalidBody(t *testing.T) {
|
||||
h := setupMirrorAPI(t)
|
||||
|
||||
req := httptest.NewRequest("POST", "/api/mirror", bytes.NewReader([]byte("not json")))
|
||||
w := httptest.NewRecorder()
|
||||
h.HandleCreate(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorAPICreateEmptyRequest(t *testing.T) {
|
||||
h := setupMirrorAPI(t)
|
||||
|
||||
body, _ := json.Marshal(mirror.JobRequest{})
|
||||
req := httptest.NewRequest("POST", "/api/mirror", bytes.NewReader(body))
|
||||
w := httptest.NewRecorder()
|
||||
h.HandleCreate(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorAPIGetNotFound(t *testing.T) {
|
||||
h := setupMirrorAPI(t)
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/mirror/{id}", h.HandleGet)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/mirror/nonexistent", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorAPICancelNotFound(t *testing.T) {
|
||||
h := setupMirrorAPI(t)
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Delete("/api/mirror/{id}", h.HandleCancel)
|
||||
|
||||
req := httptest.NewRequest("DELETE", "/api/mirror/nonexistent", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorAPICreateAndGetJob(t *testing.T) {
|
||||
h := setupMirrorAPI(t)
|
||||
|
||||
// Create a job
|
||||
body, _ := json.Marshal(mirror.JobRequest{
|
||||
PURLs: []string{"pkg:npm/lodash@4.17.21"},
|
||||
})
|
||||
createReq := httptest.NewRequest("POST", "/api/mirror", bytes.NewReader(body))
|
||||
createW := httptest.NewRecorder()
|
||||
h.HandleCreate(createW, createReq)
|
||||
|
||||
var createResp map[string]string
|
||||
_ = json.NewDecoder(createW.Body).Decode(&createResp)
|
||||
jobID := createResp["id"]
|
||||
|
||||
// Get the job
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/mirror/{id}", h.HandleGet)
|
||||
|
||||
getReq := httptest.NewRequest("GET", "/api/mirror/"+jobID, nil)
|
||||
getW := httptest.NewRecorder()
|
||||
r.ServeHTTP(getW, getReq)
|
||||
|
||||
if getW.Code != http.StatusOK {
|
||||
t.Errorf("status = %d, want %d", getW.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
var job mirror.Job
|
||||
if err := json.NewDecoder(getW.Body).Decode(&job); err != nil {
|
||||
t.Fatalf("decoding job: %v", err)
|
||||
}
|
||||
if job.ID != jobID {
|
||||
t.Errorf("job ID = %q, want %q", job.ID, jobID)
|
||||
}
|
||||
}
|
||||
41
internal/server/resolve.go
Normal file
41
internal/server/resolve.go
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/database"
|
||||
)
|
||||
|
||||
// resolvePackageName determines the package name from a wildcard path by
|
||||
// checking the database. This handles namespaced packages like Composer's
|
||||
// vendor/name format where the package name contains a slash.
|
||||
//
|
||||
// It tries the full path as a package name first. If not found, it splits
|
||||
// off the last segment as a non-name suffix (version, action, etc.) and
|
||||
// tries again, working backwards until a match is found or segments run out.
|
||||
//
|
||||
// Returns the package name and the remaining path segments after the name.
|
||||
// If no package is found, returns empty name and the original segments.
|
||||
func resolvePackageName(db *database.DB, ecosystem string, segments []string) (name string, rest []string) {
|
||||
// Try increasingly longer prefixes as the package name.
|
||||
// Start with the longest possible name (all segments) and work down.
|
||||
for i := len(segments); i >= 1; i-- {
|
||||
candidate := strings.Join(segments[:i], "/")
|
||||
pkg, err := db.GetPackageByEcosystemName(ecosystem, candidate)
|
||||
if err == nil && pkg != nil {
|
||||
return candidate, segments[i:]
|
||||
}
|
||||
}
|
||||
|
||||
return "", segments
|
||||
}
|
||||
|
||||
// splitWildcardPath splits a chi wildcard path value into segments,
|
||||
// trimming any leading/trailing slashes.
|
||||
func splitWildcardPath(path string) []string {
|
||||
path = strings.Trim(path, "/")
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(path, "/")
|
||||
}
|
||||
120
internal/server/resolve_test.go
Normal file
120
internal/server/resolve_test.go
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/git-pkgs/proxy/internal/database"
|
||||
)
|
||||
|
||||
func newTestDB(t *testing.T) (*database.DB, func()) {
|
||||
t.Helper()
|
||||
dir, err := os.MkdirTemp("", "resolve-test-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db, err := database.Create(filepath.Join(dir, "test.db"))
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(dir)
|
||||
t.Fatal(err)
|
||||
}
|
||||
return db, func() { _ = db.Close(); _ = os.RemoveAll(dir) }
|
||||
}
|
||||
|
||||
func seedPackage(t *testing.T, db *database.DB, ecosystem, name, purl string) {
|
||||
t.Helper()
|
||||
if err := db.UpsertPackage(&database.Package{
|
||||
PURL: purl, Ecosystem: ecosystem, Name: name,
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to upsert package %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvePackageName(t *testing.T) {
|
||||
db, cleanup := newTestDB(t)
|
||||
defer cleanup()
|
||||
|
||||
seedPackage(t, db, "npm", "lodash", "pkg:npm/lodash")
|
||||
seedPackage(t, db, "composer", "monolog/monolog", "pkg:composer/monolog/monolog")
|
||||
seedPackage(t, db, "composer", "symfony/console", "pkg:composer/symfony/console")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ecosystem string
|
||||
segments []string
|
||||
wantName string
|
||||
wantRest []string
|
||||
}{
|
||||
{
|
||||
name: "simple package", ecosystem: "npm",
|
||||
segments: []string{"lodash"}, wantName: "lodash", wantRest: nil,
|
||||
},
|
||||
{
|
||||
name: "simple package with version", ecosystem: "npm",
|
||||
segments: []string{"lodash", "4.17.21"}, wantName: "lodash", wantRest: []string{"4.17.21"},
|
||||
},
|
||||
{
|
||||
name: "namespaced package", ecosystem: "composer",
|
||||
segments: []string{"monolog", "monolog"}, wantName: "monolog/monolog", wantRest: nil,
|
||||
},
|
||||
{
|
||||
name: "namespaced package with version", ecosystem: "composer",
|
||||
segments: []string{"symfony", "console", "6.0.0"}, wantName: "symfony/console", wantRest: []string{"6.0.0"},
|
||||
},
|
||||
{
|
||||
name: "namespaced with version and action", ecosystem: "composer",
|
||||
segments: []string{"symfony", "console", "6.0.0", "browse"},
|
||||
wantName: "symfony/console", wantRest: []string{"6.0.0", "browse"},
|
||||
},
|
||||
{
|
||||
name: "not found", ecosystem: "npm",
|
||||
segments: []string{"nonexistent"}, wantName: "", wantRest: []string{"nonexistent"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
name, rest := resolvePackageName(db, tt.ecosystem, tt.segments)
|
||||
if name != tt.wantName {
|
||||
t.Errorf("name = %q, want %q", name, tt.wantName)
|
||||
}
|
||||
if len(rest) != len(tt.wantRest) {
|
||||
t.Errorf("rest = %v, want %v", rest, tt.wantRest)
|
||||
} else {
|
||||
for i := range rest {
|
||||
if rest[i] != tt.wantRest[i] {
|
||||
t.Errorf("rest[%d] = %q, want %q", i, rest[i], tt.wantRest[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitWildcardPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want []string
|
||||
}{
|
||||
{"lodash", []string{"lodash"}},
|
||||
{"lodash/4.17.21", []string{"lodash", "4.17.21"}},
|
||||
{"monolog/monolog", []string{"monolog", "monolog"}},
|
||||
{"symfony/console/6.0.0/browse", []string{"symfony", "console", "6.0.0", "browse"}},
|
||||
{"", nil},
|
||||
{"/", nil},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := splitWildcardPath(tt.input)
|
||||
if len(got) != len(tt.want) {
|
||||
t.Errorf("splitWildcardPath(%q) = %v, want %v", tt.input, got, tt.want)
|
||||
continue
|
||||
}
|
||||
for i := range got {
|
||||
if got[i] != tt.want[i] {
|
||||
t.Errorf("splitWildcardPath(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -43,6 +43,7 @@ import (
|
|||
"log/slog"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swaggerdoc "github.com/git-pkgs/proxy/docs/swagger"
|
||||
|
|
@ -52,6 +53,7 @@ import (
|
|||
"github.com/git-pkgs/proxy/internal/enrichment"
|
||||
"github.com/git-pkgs/proxy/internal/handler"
|
||||
"github.com/git-pkgs/proxy/internal/metrics"
|
||||
"github.com/git-pkgs/proxy/internal/mirror"
|
||||
"github.com/git-pkgs/proxy/internal/storage"
|
||||
"github.com/git-pkgs/purl"
|
||||
"github.com/git-pkgs/registries/fetch"
|
||||
|
|
@ -76,6 +78,7 @@ type Server struct {
|
|||
logger *slog.Logger
|
||||
http *http.Server
|
||||
templates *Templates
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// New creates a new Server with the given configuration.
|
||||
|
|
@ -121,20 +124,12 @@ func New(cfg *config.Config, logger *slog.Logger) (*Server, error) {
|
|||
return nil, fmt.Errorf("verifying storage connectivity: %w", err)
|
||||
}
|
||||
|
||||
// Load templates
|
||||
templates, err := NewTemplates()
|
||||
if err != nil {
|
||||
_ = store.Close()
|
||||
_ = db.Close()
|
||||
return nil, fmt.Errorf("loading templates: %w", err)
|
||||
}
|
||||
|
||||
return &Server{
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
storage: store,
|
||||
logger: logger,
|
||||
templates: templates,
|
||||
templates: &Templates{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -151,6 +146,8 @@ func (s *Server) Start() error {
|
|||
}
|
||||
proxy := handler.NewProxy(s.db, s.storage, fetcher, resolver, s.logger)
|
||||
proxy.Cooldown = cd
|
||||
proxy.CacheMetadata = s.cfg.CacheMetadata
|
||||
proxy.MetadataTTL = s.cfg.ParseMetadataTTL()
|
||||
|
||||
// Create router with Chi
|
||||
r := chi.NewRouter()
|
||||
|
|
@ -218,30 +215,37 @@ func (s *Server) Start() error {
|
|||
r.Get("/install", s.handleInstall)
|
||||
r.Get("/search", s.handleSearch)
|
||||
r.Get("/packages", s.handlePackagesList)
|
||||
r.Get("/package/{ecosystem}/{name}", s.handlePackageShow)
|
||||
r.Get("/package/{ecosystem}/{name}/{version}", s.handleVersionShow)
|
||||
r.Get("/package/{ecosystem}/{name}/{version}/browse", s.handleBrowseSource)
|
||||
r.Get("/package/{ecosystem}/*", s.handlePackagePath)
|
||||
|
||||
// API endpoints for enrichment data
|
||||
enrichSvc := enrichment.New(s.logger)
|
||||
apiHandler := NewAPIHandler(enrichSvc, s.db)
|
||||
|
||||
r.Get("/api/package/{ecosystem}/{name}", apiHandler.HandleGetPackage)
|
||||
r.Get("/api/package/{ecosystem}/{name}/{version}", apiHandler.HandleGetVersion)
|
||||
r.Get("/api/vulns/{ecosystem}/{name}", apiHandler.HandleGetVulns)
|
||||
r.Get("/api/vulns/{ecosystem}/{name}/{version}", apiHandler.HandleGetVulns)
|
||||
r.Get("/api/package/{ecosystem}/*", apiHandler.HandlePackagePath)
|
||||
r.Get("/api/vulns/{ecosystem}/*", apiHandler.HandleVulnsPath)
|
||||
r.Post("/api/outdated", apiHandler.HandleOutdated)
|
||||
r.Post("/api/bulk", apiHandler.HandleBulkLookup)
|
||||
r.Get("/api/search", apiHandler.HandleSearch)
|
||||
r.Get("/api/packages", apiHandler.HandlePackagesList)
|
||||
|
||||
// Archive browsing endpoints
|
||||
r.Get("/api/browse/{ecosystem}/{name}/{version}", s.handleBrowseList)
|
||||
r.Get("/api/browse/{ecosystem}/{name}/{version}/file/*", s.handleBrowseFile)
|
||||
// Archive browsing and comparison endpoints also use wildcard for namespaced packages
|
||||
r.Get("/api/browse/{ecosystem}/*", s.handleBrowsePath)
|
||||
r.Get("/api/compare/{ecosystem}/*", s.handleComparePath)
|
||||
|
||||
// Version comparison endpoints
|
||||
r.Get("/api/compare/{ecosystem}/{name}/{fromVersion}/{toVersion}", s.handleCompareDiff)
|
||||
r.Get("/package/{ecosystem}/{name}/compare/{versions}", s.handleComparePage)
|
||||
// Start background context (used by mirror jobs and cleanup)
|
||||
bgCtx, bgCancel := context.WithCancel(context.Background())
|
||||
s.cancel = bgCancel
|
||||
|
||||
// Mirror API endpoints (opt-in via mirror_api config or PROXY_MIRROR_API env)
|
||||
if s.cfg.MirrorAPI {
|
||||
mirrorSvc := mirror.New(proxy, s.db, s.storage, s.logger, 4) //nolint:mnd // default concurrency
|
||||
jobStore := mirror.NewJobStore(bgCtx, mirrorSvc)
|
||||
mirrorAPI := NewMirrorAPIHandler(jobStore)
|
||||
r.Post("/api/mirror", mirrorAPI.HandleCreate)
|
||||
r.Get("/api/mirror/{id}", mirrorAPI.HandleGet)
|
||||
r.Delete("/api/mirror/{id}", mirrorAPI.HandleCancel)
|
||||
go jobStore.StartCleanup(bgCtx)
|
||||
}
|
||||
|
||||
s.http = &http.Server{
|
||||
Addr: s.cfg.Listen,
|
||||
|
|
@ -256,8 +260,6 @@ func (s *Server) Start() error {
|
|||
"base_url", s.cfg.BaseURL,
|
||||
"storage", s.storage.URL(),
|
||||
"database", s.cfg.Database.Path)
|
||||
|
||||
// Start background goroutine to update cache stats metrics
|
||||
go s.updateCacheStatsMetrics()
|
||||
|
||||
return s.http.ListenAndServe()
|
||||
|
|
@ -289,6 +291,10 @@ func (s *Server) updateCacheStats() {
|
|||
func (s *Server) Shutdown(ctx context.Context) error {
|
||||
s.logger.Info("shutting down server")
|
||||
|
||||
if s.cancel != nil {
|
||||
s.cancel()
|
||||
}
|
||||
|
||||
var errs []error
|
||||
|
||||
if s.http != nil {
|
||||
|
|
@ -592,15 +598,71 @@ func (s *Server) handlePackagesList(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Server) handlePackageShow(w http.ResponseWriter, r *http.Request) {
|
||||
// handlePackagePath dispatches wildcard package routes to the appropriate handler.
|
||||
// It resolves namespaced package names (e.g., Composer vendor/name) by consulting
|
||||
// the database to determine which path segments are part of the package name.
|
||||
//
|
||||
// Supported paths:
|
||||
//
|
||||
// {name} -> package show
|
||||
// {name}/{version} -> version show
|
||||
// {name}/{version}/browse -> browse source
|
||||
// {name}/compare/{v1}...{v2} -> compare versions
|
||||
func (s *Server) handlePackagePath(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
wildcard := chi.URLParam(r, "*")
|
||||
segments := splitWildcardPath(wildcard)
|
||||
|
||||
if ecosystem == "" || name == "" {
|
||||
http.Error(w, "ecosystem and name required", http.StatusBadRequest)
|
||||
if ecosystem == "" || len(segments) == 0 {
|
||||
http.Error(w, "ecosystem and package name required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for compare route: {name}/compare/{versions}
|
||||
for i, seg := range segments {
|
||||
if seg == "compare" && i > 0 && i < len(segments)-1 {
|
||||
name := strings.Join(segments[:i], "/")
|
||||
versions := strings.Join(segments[i+1:], "/")
|
||||
s.showComparePage(w, ecosystem, name, versions)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for browse suffix
|
||||
browse := false
|
||||
if len(segments) > 1 && segments[len(segments)-1] == "browse" {
|
||||
browse = true
|
||||
segments = segments[:len(segments)-1]
|
||||
}
|
||||
|
||||
// Resolve package name from the remaining segments using DB lookup.
|
||||
name, rest := resolvePackageName(s.db, ecosystem, segments)
|
||||
|
||||
if name == "" {
|
||||
// No package found in DB. Fall back to heuristic: assume the last
|
||||
// segment is a version (if present) and everything else is the name.
|
||||
if len(segments) == 1 {
|
||||
// Single segment, no DB match: try package show (will 404).
|
||||
s.showPackage(w, ecosystem, segments[0])
|
||||
return
|
||||
}
|
||||
name = strings.Join(segments[:len(segments)-1], "/")
|
||||
rest = segments[len(segments)-1:]
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(rest) == 0 && !browse:
|
||||
s.showPackage(w, ecosystem, name)
|
||||
case len(rest) == 1 && browse:
|
||||
s.showBrowseSource(w, ecosystem, name, rest[0])
|
||||
case len(rest) == 1:
|
||||
s.showVersion(w, ecosystem, name, rest[0])
|
||||
default:
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) showPackage(w http.ResponseWriter, ecosystem, name string) {
|
||||
pkg, err := s.db.GetPackageByEcosystemName(ecosystem, name)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to get package", "error", err, "ecosystem", ecosystem, "name", name)
|
||||
|
|
@ -636,16 +698,7 @@ func (s *Server) handlePackageShow(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleVersionShow(w http.ResponseWriter, r *http.Request) {
|
||||
ecosystem := chi.URLParam(r, "ecosystem")
|
||||
name := chi.URLParam(r, "name")
|
||||
version := chi.URLParam(r, "version")
|
||||
|
||||
if ecosystem == "" || name == "" || version == "" {
|
||||
http.Error(w, "ecosystem, name, and version required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) showVersion(w http.ResponseWriter, ecosystem, name, version string) {
|
||||
pkg, err := s.db.GetPackageByEcosystemName(ecosystem, name)
|
||||
if err != nil || pkg == nil {
|
||||
s.logger.Error("failed to get package", "error", err)
|
||||
|
|
@ -675,7 +728,6 @@ func (s *Server) handleVersionShow(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
isOutdated := pkg.LatestVersion.Valid && pkg.LatestVersion.String != version
|
||||
|
||||
// Check if any artifact is cached
|
||||
hasCached := false
|
||||
for _, art := range artifacts {
|
||||
if art.StoragePath.Valid {
|
||||
|
|
@ -699,6 +751,40 @@ func (s *Server) handleVersionShow(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Server) showBrowseSource(w http.ResponseWriter, ecosystem, name, version string) {
|
||||
data := BrowseSourceData{
|
||||
Ecosystem: ecosystem,
|
||||
PackageName: name,
|
||||
Version: version,
|
||||
}
|
||||
|
||||
if err := s.templates.Render(w, "browse_source", data); err != nil {
|
||||
s.logger.Error("failed to render browse source page", "error", err)
|
||||
http.Error(w, "internal server error", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) showComparePage(w http.ResponseWriter, ecosystem, name, versions string) {
|
||||
const compareVersionParts = 2
|
||||
parts := strings.Split(versions, "...")
|
||||
if len(parts) != compareVersionParts {
|
||||
http.Error(w, "invalid version format, use: version1...version2", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
data := ComparePageData{
|
||||
Ecosystem: ecosystem,
|
||||
PackageName: name,
|
||||
FromVersion: parts[0],
|
||||
ToVersion: parts[1],
|
||||
}
|
||||
|
||||
if err := s.templates.Render(w, "compare_versions", data); err != nil {
|
||||
s.logger.Error("failed to render compare page", "error", err)
|
||||
http.Error(w, "internal server error", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
// handleHealth responds with a simple health check.
|
||||
// @Summary Health check
|
||||
// @Tags meta
|
||||
|
|
|
|||
|
|
@ -79,21 +79,13 @@ func newTestServer(t *testing.T) *testServer {
|
|||
r.Mount("/go", http.StripPrefix("/go", goHandler.Routes()))
|
||||
r.Mount("/pypi", http.StripPrefix("/pypi", pypiHandler.Routes()))
|
||||
|
||||
// Load templates
|
||||
templates, err := NewTemplates()
|
||||
if err != nil {
|
||||
_ = db.Close()
|
||||
_ = os.RemoveAll(tempDir)
|
||||
t.Fatalf("failed to load templates: %v", err)
|
||||
}
|
||||
|
||||
// Create a minimal server struct for the handlers
|
||||
s := &Server{
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
storage: store,
|
||||
logger: logger,
|
||||
templates: templates,
|
||||
templates: &Templates{},
|
||||
}
|
||||
|
||||
r.Get("/health", s.handleHealth)
|
||||
|
|
@ -101,13 +93,9 @@ func newTestServer(t *testing.T) *testServer {
|
|||
r.Get("/openapi.json", s.handleOpenAPIJSON)
|
||||
r.Mount("/static", http.StripPrefix("/static/", staticHandler()))
|
||||
r.Get("/search", s.handleSearch)
|
||||
r.Get("/package/{ecosystem}/{name}", s.handlePackageShow)
|
||||
r.Get("/package/{ecosystem}/{name}/{version}", s.handleVersionShow)
|
||||
r.Get("/package/{ecosystem}/{name}/{version}/browse", s.handleBrowseSource)
|
||||
r.Get("/api/browse/{ecosystem}/{name}/{version}", s.handleBrowseList)
|
||||
r.Get("/api/browse/{ecosystem}/{name}/{version}/file/*", s.handleBrowseFile)
|
||||
r.Get("/api/compare/{ecosystem}/{name}/{fromVersion}/{toVersion}", s.handleCompareDiff)
|
||||
r.Get("/package/{ecosystem}/{name}/compare/{versions}", s.handleComparePage)
|
||||
r.Get("/package/{ecosystem}/*", s.handlePackagePath)
|
||||
r.Get("/api/browse/{ecosystem}/*", s.handleBrowsePath)
|
||||
r.Get("/api/compare/{ecosystem}/*", s.handleComparePath)
|
||||
r.Get("/", s.handleRoot)
|
||||
r.Get("/install", s.handleInstall)
|
||||
r.Get("/packages", s.handlePackagesList)
|
||||
|
|
@ -332,10 +320,14 @@ func TestGoList(t *testing.T) {
|
|||
w := httptest.NewRecorder()
|
||||
ts.handler.ServeHTTP(w, req)
|
||||
|
||||
// The handler is mounted if we get a Go proxy error (not a generic 404)
|
||||
body := w.Body.String()
|
||||
if w.Code == http.StatusNotFound && !strings.Contains(body, "example.com") {
|
||||
t.Errorf("go handler should be mounted, got status %d, body: %s", w.Code, body)
|
||||
// The handler is mounted if we get a response from the proxy (404 from upstream
|
||||
// or 502 from connection failure), not a chi router 404.
|
||||
// With metadata caching, upstream 404 is cleanly returned as our own 404.
|
||||
if w.Code == http.StatusNotFound {
|
||||
body := w.Body.String()
|
||||
if !strings.Contains(body, "not found") {
|
||||
t.Errorf("go handler should be mounted, got status %d, body: %s", w.Code, body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -701,6 +693,113 @@ func TestPackageShowPage_WithLicense(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestComposerNamespacedPackageRoutes(t *testing.T) {
|
||||
ts := newTestServer(t)
|
||||
defer ts.close()
|
||||
|
||||
// Seed two Composer packages with vendor/name format.
|
||||
for _, p := range []struct {
|
||||
purl, name, versionPURL string
|
||||
}{
|
||||
{"pkg:composer/monolog/monolog", "monolog/monolog", "pkg:composer/monolog/monolog@3.0.0"},
|
||||
{"pkg:composer/symfony/console", "symfony/console", "pkg:composer/symfony/console@6.0.0"},
|
||||
} {
|
||||
if err := ts.db.UpsertPackage(&database.Package{
|
||||
PURL: p.purl, Ecosystem: "composer", Name: p.name,
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to upsert package %s: %v", p.name, err)
|
||||
}
|
||||
if err := ts.db.UpsertVersion(&database.Version{
|
||||
PURL: p.versionPURL, PackagePURL: p.purl,
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to upsert version for %s: %v", p.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
want string
|
||||
}{
|
||||
{"package show", "/package/composer/monolog/monolog", "monolog/monolog"},
|
||||
{"version show", "/package/composer/symfony/console/6.0.0", "symfony/console"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", tt.url, nil)
|
||||
w := httptest.NewRecorder()
|
||||
ts.handler.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("GET %s: expected status 200, got %d", tt.url, w.Code)
|
||||
}
|
||||
if !strings.Contains(w.Body.String(), tt.want) {
|
||||
t.Errorf("GET %s: expected body to contain %q", tt.url, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespacedPackageRoutes(t *testing.T) {
|
||||
ts := newTestServer(t)
|
||||
defer ts.close()
|
||||
|
||||
// Seed packages from ecosystems that use slashes in package names.
|
||||
pkgs := []struct {
|
||||
purl, ecosystem, name, versionPURL string
|
||||
}{
|
||||
// npm scoped packages
|
||||
{"pkg:npm/%40babel/core", "npm", "@babel/core", "pkg:npm/%40babel/core@7.24.0"},
|
||||
// Go modules (multi-segment paths)
|
||||
{"pkg:golang/github.com/stretchr/testify", "golang", "github.com/stretchr/testify", "pkg:golang/github.com/stretchr/testify@1.9.0"},
|
||||
// OCI/container images
|
||||
{"pkg:oci/library/nginx", "oci", "library/nginx", "pkg:oci/library/nginx@sha256:abc123"},
|
||||
// Conda (channel/name)
|
||||
{"pkg:conda/conda-forge/numpy", "conda", "conda-forge/numpy", "pkg:conda/conda-forge/numpy@1.26.4"},
|
||||
// Conan (name/version@user/channel)
|
||||
{"pkg:conan/zlib/1.2.13@demo/stable", "conan", "zlib/1.2.13@demo/stable", "pkg:conan/zlib/1.2.13@demo/stable@rev1"},
|
||||
}
|
||||
|
||||
for _, p := range pkgs {
|
||||
if err := ts.db.UpsertPackage(&database.Package{
|
||||
PURL: p.purl, Ecosystem: p.ecosystem, Name: p.name,
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to upsert package %s: %v", p.name, err)
|
||||
}
|
||||
if err := ts.db.UpsertVersion(&database.Version{
|
||||
PURL: p.versionPURL, PackagePURL: p.purl,
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to upsert version for %s: %v", p.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
want int
|
||||
}{
|
||||
{"npm scoped package show", "/package/npm/@babel/core", http.StatusOK},
|
||||
{"golang module show", "/package/golang/github.com/stretchr/testify", http.StatusOK},
|
||||
{"oci image show", "/package/oci/library/nginx", http.StatusOK},
|
||||
{"conda package show", "/package/conda/conda-forge/numpy", http.StatusOK},
|
||||
{"conan package show", "/package/conan/zlib/1.2.13@demo/stable", http.StatusOK},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", tt.url, nil)
|
||||
w := httptest.NewRecorder()
|
||||
ts.handler.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != tt.want {
|
||||
t.Errorf("GET %s: expected status %d, got %d (body: %s)",
|
||||
tt.url, tt.want, w.Code, w.Body.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearchPage_WithSeededResults(t *testing.T) {
|
||||
ts := newTestServer(t)
|
||||
defer ts.close()
|
||||
|
|
@ -894,10 +993,10 @@ func TestNewServer_StorageConnectivityCheck(t *testing.T) {
|
|||
// On Windows, OpenBucket normalises to file:///C:/path; on Unix the
|
||||
// absolute path already starts with /, so file:// + /path == file:///path.
|
||||
wantPrefix := "file://"
|
||||
wantSuffix := filepath.ToSlash(storagePath)
|
||||
wantPath := filepath.ToSlash(storagePath)
|
||||
got := srv.storage.URL()
|
||||
if !strings.HasPrefix(got, wantPrefix) || !strings.HasSuffix(got, wantSuffix) {
|
||||
t.Errorf("expected storage URL ending with %s, got %s", wantSuffix, got)
|
||||
if !strings.HasPrefix(got, wantPrefix) || !strings.Contains(got, wantPath) {
|
||||
t.Errorf("expected storage URL containing %s, got %s", wantPath, got)
|
||||
}
|
||||
|
||||
_ = srv.db.Close()
|
||||
|
|
|
|||
|
|
@ -5,61 +5,70 @@ import (
|
|||
"html/template"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
//go:embed templates/**/*.html
|
||||
var templatesFS embed.FS
|
||||
|
||||
// Templates holds parsed templates for each page.
|
||||
// Templates holds lazily-parsed templates for each page.
|
||||
type Templates struct {
|
||||
once sync.Once
|
||||
pages map[string]*template.Template
|
||||
err error
|
||||
}
|
||||
|
||||
// NewTemplates loads and parses all templates from the embedded filesystem.
|
||||
func NewTemplates() (*Templates, error) {
|
||||
pages := make(map[string]*template.Template)
|
||||
// load parses all templates from the embedded filesystem on first call.
|
||||
func (t *Templates) load() error {
|
||||
t.once.Do(func() {
|
||||
pages := make(map[string]*template.Template)
|
||||
|
||||
// Define custom template functions
|
||||
funcMap := template.FuncMap{
|
||||
"add": func(a, b int) int { return a + b },
|
||||
"sub": func(a, b int) int { return a - b },
|
||||
"supportedEcosystems": supportedEcosystems,
|
||||
"ecosystemBadgeClass": ecosystemBadgeClasses,
|
||||
"ecosystemBadgeLabel": ecosystemBadgeLabel,
|
||||
}
|
||||
|
||||
// Get all page files
|
||||
pageFiles, err := templatesFS.ReadDir("templates/pages")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pageFile := range pageFiles {
|
||||
if pageFile.IsDir() {
|
||||
continue
|
||||
funcMap := template.FuncMap{
|
||||
"add": func(a, b int) int { return a + b },
|
||||
"sub": func(a, b int) int { return a - b },
|
||||
"supportedEcosystems": supportedEcosystems,
|
||||
"ecosystemBadgeClass": ecosystemBadgeClasses,
|
||||
"ecosystemBadgeLabel": ecosystemBadgeLabel,
|
||||
}
|
||||
|
||||
pageName := pageFile.Name()
|
||||
pageName = pageName[:len(pageName)-len(filepath.Ext(pageName))]
|
||||
|
||||
// Parse all layout files + components + this page with custom functions
|
||||
tmpl, err := template.New("").Funcs(funcMap).ParseFS(templatesFS,
|
||||
"templates/layout/*.html",
|
||||
"templates/components/*.html",
|
||||
"templates/pages/"+pageFile.Name(),
|
||||
)
|
||||
pageFiles, err := templatesFS.ReadDir("templates/pages")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
t.err = err
|
||||
return
|
||||
}
|
||||
|
||||
pages[pageName] = tmpl
|
||||
}
|
||||
for _, pageFile := range pageFiles {
|
||||
if pageFile.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
return &Templates{pages: pages}, nil
|
||||
pageName := pageFile.Name()
|
||||
pageName = pageName[:len(pageName)-len(filepath.Ext(pageName))]
|
||||
|
||||
tmpl, err := template.New("").Funcs(funcMap).ParseFS(templatesFS,
|
||||
"templates/layout/*.html",
|
||||
"templates/components/*.html",
|
||||
"templates/pages/"+pageFile.Name(),
|
||||
)
|
||||
if err != nil {
|
||||
t.err = err
|
||||
return
|
||||
}
|
||||
|
||||
pages[pageName] = tmpl
|
||||
}
|
||||
|
||||
t.pages = pages
|
||||
})
|
||||
return t.err
|
||||
}
|
||||
|
||||
// Render renders a page template with the given data.
|
||||
func (t *Templates) Render(w http.ResponseWriter, pageName string, data any) error {
|
||||
if err := t.load(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
|
||||
tmpl, ok := t.pages[pageName]
|
||||
|
|
|
|||
|
|
@ -11,10 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func TestTemplatesRenderAllPages(t *testing.T) {
|
||||
templates, err := NewTemplates()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load templates: %v", err)
|
||||
}
|
||||
templates := &Templates{}
|
||||
|
||||
tests := []struct {
|
||||
page string
|
||||
|
|
@ -156,14 +153,26 @@ func TestTemplatesRenderAllPages(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTemplatesRenderUnknownPage(t *testing.T) {
|
||||
templates, err := NewTemplates()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load templates: %v", err)
|
||||
func TestTemplatesLazyLoading(t *testing.T) {
|
||||
templates := &Templates{}
|
||||
|
||||
if templates.pages != nil {
|
||||
t.Fatal("expected pages to be nil before first Render call")
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
err = templates.Render(w, "nonexistent_page", nil)
|
||||
_ = templates.Render(w, "dashboard", DashboardData{})
|
||||
|
||||
if templates.pages == nil {
|
||||
t.Fatal("expected pages to be populated after first Render call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTemplatesRenderUnknownPage(t *testing.T) {
|
||||
templates := &Templates{}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
err := templates.Render(w, "nonexistent_page", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error for unknown page")
|
||||
}
|
||||
|
|
@ -326,26 +335,6 @@ func TestSearchPage_EcosystemFilter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetStripPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
ecosystem string
|
||||
want string
|
||||
}{
|
||||
{"npm", "package/"},
|
||||
{"cargo", ""},
|
||||
{"pypi", ""},
|
||||
{"gem", ""},
|
||||
{"", ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := getStripPrefix(tt.ecosystem)
|
||||
if got != tt.want {
|
||||
t.Errorf("getStripPrefix(%q) = %q, want %q", tt.ecosystem, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcosystemBadgeLabel(t *testing.T) {
|
||||
tests := []struct {
|
||||
ecosystem string
|
||||
|
|
@ -424,3 +413,30 @@ func TestCategorizeLicense(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTemplatesParse(b *testing.B) {
|
||||
for b.Loop() {
|
||||
t := &Templates{}
|
||||
if err := t.load(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkServerCreate(b *testing.B) {
|
||||
for b.Loop() {
|
||||
_ = &Server{
|
||||
templates: &Templates{},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFirstRender(b *testing.B) {
|
||||
for b.Loop() {
|
||||
t := &Templates{}
|
||||
w := httptest.NewRecorder()
|
||||
if err := t.Render(w, "dashboard", DashboardData{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,6 +70,12 @@ func OpenBucket(ctx context.Context, urlStr string) (*Blob, error) {
|
|||
} else {
|
||||
urlStr = "file://" + urlPath
|
||||
}
|
||||
|
||||
// Create temp files next to the final path instead of in os.TempDir.
|
||||
// This avoids "invalid cross-device link" errors from os.Rename when
|
||||
// the bucket directory and os.TempDir are on different filesystems
|
||||
// (e.g. Docker volume mounts).
|
||||
urlStr += "?no_tmp_dir=true"
|
||||
}
|
||||
|
||||
bucket, err := blob.OpenBucket(ctx, urlStr)
|
||||
|
|
|
|||
|
|
@ -217,6 +217,44 @@ func TestBlobOverwrite(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestOpenBucketSetsNoTmpDir(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
b, err := OpenBucket(ctx, fileURLFromPath(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("OpenBucket failed: %v", err)
|
||||
}
|
||||
defer func() { _ = b.Close() }()
|
||||
|
||||
// fileblob uses os.TempDir() by default for temp files, then os.Rename to
|
||||
// the final path. This fails with "invalid cross-device link" when the bucket
|
||||
// dir and os.TempDir() are on different filesystems (e.g. Docker volumes).
|
||||
// OpenBucket must set no_tmp_dir=true so temp files are created next to the
|
||||
// final path instead.
|
||||
if !strings.Contains(b.URL(), "no_tmp_dir=true") {
|
||||
t.Errorf("URL should contain no_tmp_dir=true to avoid cross-device rename errors, got %q", b.URL())
|
||||
}
|
||||
|
||||
// Verify Store still works with the parameter set
|
||||
content := "cross-device test"
|
||||
_, _, err = b.Store(ctx, "test/cross-device.txt", strings.NewReader(content))
|
||||
if err != nil {
|
||||
t.Fatalf("Store failed with no_tmp_dir=true: %v", err)
|
||||
}
|
||||
|
||||
r, err := b.Open(ctx, "test/cross-device.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
defer func() { _ = r.Close() }()
|
||||
|
||||
data, _ := io.ReadAll(r)
|
||||
if string(data) != content {
|
||||
t.Errorf("content = %q, want %q", string(data), content)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestBlob(t *testing.T) *Blob {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
|
|
|||
|
|
@ -219,9 +219,9 @@ func TestFilesystemUsedSpace(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add some files
|
||||
_, _, _ = fs.Store(ctx, "a.txt", strings.NewReader("aaaa")) // 4 bytes
|
||||
_, _, _ = fs.Store(ctx, "b.txt", strings.NewReader("bbbbbb")) // 6 bytes
|
||||
_, _, _ = fs.Store(ctx, "c/d.txt", strings.NewReader("ccccc")) // 5 bytes
|
||||
_, _, _ = fs.Store(ctx, "a.txt", strings.NewReader("aaaa")) // 4 bytes
|
||||
_, _, _ = fs.Store(ctx, "b.txt", strings.NewReader("bbbbbb")) // 6 bytes
|
||||
_, _, _ = fs.Store(ctx, "c/d.txt", strings.NewReader("ccccc")) // 5 bytes
|
||||
|
||||
used, err = fs.UsedSpace(ctx)
|
||||
if err != nil {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue