diff --git a/NOTICE.txt b/NOTICE.txt index ad0de293db9..8ded3b2cdbe 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -73,189 +73,40 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/dgraph-io/badger/v2 -Version: v2.2007.4 -Licence type (autodetected): Apache-2.0 +Dependency : github.com/cockroachdb/pebble/v2 +Version: v2.0.2 +Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/badger/v2@v2.2007.4/LICENSE: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/pebble/v2@v2.0.2/LICENSE: - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - END OF TERMS AND CONDITIONS +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- @@ -3474,6 +3325,37 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/testify +Version: v1.10.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.10.0/LICENSE: + +MIT License + +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : go.elastic.co/apm/module/apmelasticsearch/v2 Version: v2.6.3 @@ -6283,77 +6165,256 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +Dependency : github.com/beorn7/perks +Version: v1.0.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/beorn7/perks@v1.0.1/LICENSE: + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/cockroachdb/crlib +Version: v0.0.0-20241015224233-894974b3ad94 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/crlib@v0.0.0-20241015224233-894974b3ad94/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and --------------------------------------------------------------------------------- -Dependency : github.com/beorn7/perks -Version: v1.0.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -Contents of probable licence file $GOMODCACHE/github.com/beorn7/perks@v1.0.1/LICENSE: + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -Copyright (C) 2013 Blake Mizerany + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. --------------------------------------------------------------------------------- -Dependency : github.com/cespare/xxhash -Version: v1.1.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + END OF TERMS AND CONDITIONS -Contents of probable licence file $GOMODCACHE/github.com/cespare/xxhash@v1.1.0/LICENSE.txt: + APPENDIX: How to apply the Apache License to your work. -Copyright (c) 2016 Caleb Spare + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -MIT License + Copyright [yyyy] [name of copyright owner] -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. + http://www.apache.org/licenses/LICENSE-2.0 -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- @@ -7238,14 +7299,14 @@ Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/redact@v1.1 -------------------------------------------------------------------------------- -Dependency : github.com/cockroachdb/tokenbucket -Version: v0.0.0-20230807174530-cc333fc44b06 +Dependency : github.com/cockroachdb/swiss +Version: v0.0.0-20240612210725-f4de07ae6964 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/tokenbucket@v0.0.0-20230807174530-cc333fc44b06/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/swiss@v0.0.0-20240612210725-f4de07ae6964/LICENSE: - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -7425,7 +7486,7 @@ Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/tokenbucket APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -7433,7 +7494,7 @@ Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/tokenbucket same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -7448,38 +7509,14 @@ Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/tokenbucket limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/davecgh/go-spew -Version: v1.1.2-0.20180830191138-d8f796af33cc -Licence type (autodetected): ISC --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.2-0.20180830191138-d8f796af33cc/LICENSE: - -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -------------------------------------------------------------------------------- -Dependency : github.com/dgraph-io/ristretto -Version: v0.2.0 +Dependency : github.com/cockroachdb/tokenbucket +Version: v0.0.0-20230807174530-cc333fc44b06 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.2.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/cockroachdb/tokenbucket@v0.0.0-20230807174530-cc333fc44b06/LICENSE: Apache License Version 2.0, January 2004 @@ -7658,36 +7695,55 @@ Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0. END OF TERMS AND CONDITIONS + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + -------------------------------------------------------------------------------- -Dependency : github.com/dgryski/go-farm -Version: v0.0.0-20200201041132-a6ae2369ad13 -Licence type (autodetected): MIT +Dependency : github.com/davecgh/go-spew +Version: v1.1.2-0.20180830191138-d8f796af33cc +Licence type (autodetected): ISC -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-farm@v0.0.0-20200201041132-a6ae2369ad13/LICENSE: - -Copyright (c) 2014-2017 Damian Gryski -Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com +Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.2-0.20180830191138-d8f796af33cc/LICENSE: -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +ISC License -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +Copyright (c) 2012-2016 Dave Collins -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- @@ -9993,11 +10049,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/golang/snappy -Version: v0.0.4 +Version: v0.0.5-0.20231225225746-43d5d4cd4e0e Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/golang/snappy@v0.0.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/golang/snappy@v0.0.5-0.20231225225746-43d5d4cd4e0e/LICENSE: Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. @@ -13543,6 +13599,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/pmezard/go-difflib +Version: v1.0.1-0.20181226105442-5d4384ee4fb2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pmezard/go-difflib@v1.0.1-0.20181226105442-5d4384ee4fb2/LICENSE: + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/prometheus/client_golang Version: v1.20.5 diff --git a/go.mod b/go.mod index b71ee729bfb..3cf035494d1 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.23.0 require ( github.com/KimMachineGun/automemlimit v0.7.0 github.com/cespare/xxhash/v2 v2.3.0 - github.com/dgraph-io/badger/v2 v2.2007.4 + github.com/cockroachdb/pebble/v2 v2.0.2 github.com/dustin/go-humanize v1.0.1 github.com/elastic/apm-aggregation v1.2.0 github.com/elastic/apm-data v1.16.0 @@ -60,20 +60,18 @@ require ( require ( github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/axiomhq/hyperloglog v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect + github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v1.1.2 // indirect github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20240612210725-f4de07ae6964 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dgraph-io/ristretto v0.2.0 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140 // indirect github.com/dlclark/regexp2 v1.8.1 // indirect github.com/docker/go-connections v0.5.0 // indirect @@ -97,7 +95,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/gomodule/redigo v1.8.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect diff --git a/go.sum b/go.sum index 34f89952fe9..c1bf5ef0118 100644 --- a/go.sum +++ b/go.sum @@ -21,10 +21,8 @@ github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlD github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= -github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= @@ -61,44 +59,38 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/pebble/v2 v2.0.2 h1:PmmN7V/rDK+xgp5HiPV9e7ycAalyMjKwSIrcj/4HQz4= +github.com/cockroachdb/pebble/v2 v2.0.2/go.mod h1:NgxgNcWwyG/uxkLUZGM2aelshaLIZvc0hCX7SCfaO8s= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20240612210725-f4de07ae6964 h1:Ew0znI2JatzKy52N1iS5muUsHkf2UJuhocH7uFW7jjs= +github.com/cockroachdb/swiss v0.0.0-20240612210725-f4de07ae6964/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= -github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= -github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140 h1:y7y0Oa6UawqTFPCDw9JG6pdKt4F9pAhHv0B7FMGaGD0= github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -113,7 +105,6 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 h1:RrkoB0pT3gnjXhL/t10BSP1mcr/0Ldea2uMyuBr2SWk= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= @@ -180,9 +171,10 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/getsentry/sentry-go v0.29.1 h1:DyZuChN8Hz3ARxGVV8ePaNXh1dQ7d76AiB117xcREwA= github.com/getsentry/sentry-go v0.29.1/go.mod h1:x3AtIzN01d6SiWkderzaH28Tm0lgkafpJ5Bm3li39O0= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -214,14 +206,12 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -257,10 +247,8 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -284,7 +272,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -302,7 +289,6 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -312,10 +298,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mileusna/useragent v1.3.4 h1:MiuRRuvGjEie1+yZHO88UBYg8YBC/ddF6T7F56i3PCk= github.com/mileusna/useragent v1.3.4/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -335,7 +319,6 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -369,26 +352,16 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI= github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -408,7 +381,6 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -421,7 +393,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -483,7 +454,6 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -538,11 +508,9 @@ golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -615,7 +583,6 @@ google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNI google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/internal/beater/config/config_test.go b/internal/beater/config/config_test.go index 9b136f95b72..fbc8c08d3d3 100644 --- a/internal/beater/config/config_test.go +++ b/internal/beater/config/config_test.go @@ -362,7 +362,6 @@ func TestUnpackConfig(t *testing.T) { ESConfig: elasticsearch.DefaultConfig(), Interval: 1 * time.Minute, IngestRateDecayFactor: 0.25, - StorageGCInterval: 5 * time.Minute, StorageLimit: "3GB", StorageLimitParsed: 3000000000, TTL: 30 * time.Minute, @@ -494,7 +493,6 @@ func TestUnpackConfig(t *testing.T) { ESConfig: elasticsearch.DefaultConfig(), Interval: 2 * time.Minute, IngestRateDecayFactor: 1.0, - StorageGCInterval: 5 * time.Minute, StorageLimit: "1GB", StorageLimitParsed: 1000000000, TTL: 30 * time.Minute, diff --git a/internal/beater/config/sampling.go b/internal/beater/config/sampling.go index 0e00e1fd0c0..07bff0d1a23 100644 --- a/internal/beater/config/sampling.go +++ b/internal/beater/config/sampling.go @@ -48,7 +48,6 @@ type TailSamplingConfig struct { ESConfig *elasticsearch.Config `config:"elasticsearch"` Interval time.Duration `config:"interval" validate:"min=1s"` IngestRateDecayFactor float64 `config:"ingest_rate_decay" validate:"min=0, max=1"` - StorageGCInterval time.Duration `config:"storage_gc_interval" validate:"min=1s"` TTL time.Duration `config:"ttl" validate:"min=1s"` StorageLimit string `config:"storage_limit"` StorageLimitParsed uint64 @@ -151,7 +150,6 @@ func defaultTailSamplingConfig() TailSamplingConfig { ESConfig: elasticsearch.DefaultConfig(), Interval: 1 * time.Minute, IngestRateDecayFactor: 0.25, - StorageGCInterval: 5 * time.Minute, TTL: 30 * time.Minute, StorageLimit: "3GB", DiscardOnWriteFailure: false, diff --git a/x-pack/apm-server/main.go b/x-pack/apm-server/main.go index 9001eb3c96e..c56d8a2ecf7 100644 --- a/x-pack/apm-server/main.go +++ b/x-pack/apm-server/main.go @@ -40,12 +40,9 @@ var ( // will hopefully disappear in the future, when agents no longer send unsampled transactions. samplingMonitoringRegistry = monitoring.Default.GetRegistry("apm-server.sampling") - // badgerDB holds the badger database to use when tail-based sampling is configured. - badgerMu sync.Mutex - badgerDB *eventstorage.StorageManager - - storageMu sync.Mutex - storage *eventstorage.ManagedReadWriter + // db holds the database to use when tail-based sampling is configured. + dbMu sync.Mutex + db *eventstorage.StorageManager // samplerUUID is a UUID used to identify sampled trace ID documents // published by this process. @@ -117,11 +114,10 @@ func newTailSamplingProcessor(args beater.ServerParams) (*sampling.Processor, er } storageDir := paths.Resolve(paths.Data, tailSamplingStorageDir) - badgerDB, err = getBadgerDB(storageDir) + db, err := getDB(storageDir) if err != nil { - return nil, fmt.Errorf("failed to get Badger database: %w", err) + return nil, fmt.Errorf("failed to get TBS database: %w", err) } - readWriter := getStorage(badgerDB) policies := make([]sampling.Policy, len(tailSamplingConfig.Policies)) for i, in := range tailSamplingConfig.Policies { @@ -155,10 +151,7 @@ func newTailSamplingProcessor(args beater.ServerParams) (*sampling.Processor, er UUID: samplerUUID.String(), }, StorageConfig: sampling.StorageConfig{ - DB: badgerDB, - Storage: readWriter, - StorageDir: storageDir, - StorageGCInterval: tailSamplingConfig.StorageGCInterval, + DB: db, StorageLimit: tailSamplingConfig.StorageLimitParsed, TTL: tailSamplingConfig.TTL, DiscardOnWriteFailure: tailSamplingConfig.DiscardOnWriteFailure, @@ -166,26 +159,17 @@ func newTailSamplingProcessor(args beater.ServerParams) (*sampling.Processor, er }) } -func getBadgerDB(storageDir string) (*eventstorage.StorageManager, error) { - badgerMu.Lock() - defer badgerMu.Unlock() - if badgerDB == nil { +func getDB(storageDir string) (*eventstorage.StorageManager, error) { + dbMu.Lock() + defer dbMu.Unlock() + if db == nil { sm, err := eventstorage.NewStorageManager(storageDir) if err != nil { return nil, err } - badgerDB = sm - } - return badgerDB, nil -} - -func getStorage(sm *eventstorage.StorageManager) *eventstorage.ManagedReadWriter { - storageMu.Lock() - defer storageMu.Unlock() - if storage == nil { - storage = sm.NewReadWriter() + db = sm } - return storage + return db, nil } // runServerWithProcessors runs the APM Server and the given list of processors. @@ -249,19 +233,19 @@ func wrapServer(args beater.ServerParams, runServer beater.RunServerFunc) (beate return args, wrappedRunServer, nil } -// closeBadger is called at process exit time to close the badger.DB opened +// closeDB is called at process exit time to close the StorageManager opened // by the tail-based sampling processor constructor, if any. This is never -// called concurrently with opening badger.DB/accessing the badgerDB global, -// so it does not need to hold badgerMu. -func closeBadger() error { - if badgerDB != nil { - return badgerDB.Close() +// called concurrently with opening DB/accessing the db global, +// so it does not need to hold dbMu. +func closeDB() error { + if db != nil { + return db.Close() } return nil } func cleanup() error { - return closeBadger() + return closeDB() } func Main() error { diff --git a/x-pack/apm-server/main_test.go b/x-pack/apm-server/main_test.go index fad69edc008..e79f5c6dd8c 100644 --- a/x-pack/apm-server/main_test.go +++ b/x-pack/apm-server/main_test.go @@ -33,7 +33,7 @@ func TestMonitoring(t *testing.T) { home := t.TempDir() err := paths.InitPaths(&paths.Path{Home: home}) require.NoError(t, err) - defer closeBadger() // close badger.DB so data dir can be deleted on Windows + defer closeDB() // close DB so data dir can be deleted on Windows cfg := config.DefaultConfig() cfg.Sampling.Tail.Enabled = true diff --git a/x-pack/apm-server/sampling/config.go b/x-pack/apm-server/sampling/config.go index b6d4d6ce252..1e74a4c70f6 100644 --- a/x-pack/apm-server/sampling/config.go +++ b/x-pack/apm-server/sampling/config.go @@ -95,24 +95,16 @@ type DataStreamConfig struct { // StorageConfig holds Processor configuration related to event storage. type StorageConfig struct { - // DB holds the badger database in which event storage will be maintained. + // DB holds the StorageManager in which event storage will be maintained. // // DB will not be closed when the processor is closed. DB *eventstorage.StorageManager - // Storage holds the read writers which provide sharded, locked access to storage. - // - // Storage lives outside processor lifecycle and will not be closed when processor - // is closed - Storage rw - - // StorageDir holds the directory in which event storage will be maintained. - StorageDir string - - // StorageGCInterval holds the amount of time between storage garbage collections. - StorageGCInterval time.Duration + // Storage overrides the default DB storage RW. + // For testing only. + Storage eventstorage.RW - // StorageLimit for the badger database, in bytes. + // StorageLimit for the TBS database, in bytes. StorageLimit uint64 // TTL holds the amount of time before events and sampling decisions @@ -242,15 +234,6 @@ func (config StorageConfig) validate() error { if config.DB == nil { return errors.New("DB unspecified") } - if config.Storage == nil { - return errors.New("Storage unspecified") - } - if config.StorageDir == "" { - return errors.New("StorageDir unspecified") - } - if config.StorageGCInterval <= 0 { - return errors.New("StorageGCInterval unspecified or negative") - } if config.TTL <= 0 { return errors.New("TTL unspecified or negative") } diff --git a/x-pack/apm-server/sampling/config_test.go b/x-pack/apm-server/sampling/config_test.go index 697cafec588..b5575a939b3 100644 --- a/x-pack/apm-server/sampling/config_test.go +++ b/x-pack/apm-server/sampling/config_test.go @@ -73,15 +73,6 @@ func TestNewProcessorConfigInvalid(t *testing.T) { assertInvalidConfigError("invalid storage config: DB unspecified") config.DB = &eventstorage.StorageManager{} - assertInvalidConfigError("invalid storage config: Storage unspecified") - config.Storage = &eventstorage.ManagedReadWriter{} - - assertInvalidConfigError("invalid storage config: StorageDir unspecified") - config.StorageDir = "tbs" - - assertInvalidConfigError("invalid storage config: StorageGCInterval unspecified or negative") - config.StorageGCInterval = 1 - assertInvalidConfigError("invalid storage config: TTL unspecified or negative") config.TTL = 1 } diff --git a/x-pack/apm-server/sampling/eventstorage/badger.go b/x-pack/apm-server/sampling/eventstorage/badger.go deleted file mode 100644 index 99219262d4f..00000000000 --- a/x-pack/apm-server/sampling/eventstorage/badger.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License 2.0; -// you may not use this file except in compliance with the Elastic License 2.0. - -package eventstorage - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/elastic/apm-server/internal/logs" - "github.com/elastic/elastic-agent-libs/logp" -) - -const ( - defaultValueLogFileSize = 64 * 1024 * 1024 -) - -// OpenBadger creates or opens a Badger database with the specified location -// and value log file size. If the value log file size is <= 0, the default -// of 64MB will be used. -// -// NOTE(axw) only one badger.DB for a given storage directory may be open at any given time. -func OpenBadger(storageDir string, valueLogFileSize int64) (*badger.DB, error) { - logger := logp.NewLogger(logs.Sampling) - // Tunable memory options: - // - NumMemtables - default 5 in-mem tables (MaxTableSize default) - // - NumLevelZeroTables - default 5 - number of L0 tables before compaction starts. - // - NumLevelZeroTablesStall - number of L0 tables before writing stalls (waiting for compaction). - // - IndexCacheSize - default all in mem, Each table has its own bloom filter and each bloom filter is approximately of 5 MB. - // - MaxTableSize - Default 64MB - if valueLogFileSize <= 0 { - valueLogFileSize = defaultValueLogFileSize - } - const tableLimit = 4 - badgerOpts := badger.DefaultOptions(storageDir). - WithLogger(&LogpAdaptor{Logger: logger}). - WithTruncate(true). // Truncate unreadable files which cannot be read. - WithNumMemtables(tableLimit). // in-memory tables. - WithNumLevelZeroTables(tableLimit). // L0 tables. - WithNumLevelZeroTablesStall(tableLimit * 3). // Maintain the default 1-to-3 ratio before stalling. - WithMaxTableSize(int64(16 << 20)). // Max LSM table or file size. - WithValueLogFileSize(valueLogFileSize) // vlog file size. - - return badger.Open(badgerOpts) -} diff --git a/x-pack/apm-server/sampling/eventstorage/doc.go b/x-pack/apm-server/sampling/eventstorage/doc.go new file mode 100644 index 00000000000..4bd93791b94 --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/doc.go @@ -0,0 +1,14 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Package eventstorage implements the storage layer for tail-based sampling event +// and sampling decision read-writes. +// +// The database of choice is Pebble, which does not have TTL handling built-in, +// and we implement our own TTL handling on top of the database: +// - TTL is divided up into N parts, where N is partitionsPerTTL. +// - A database holds N + 1 + 1 partitions. +// - Every TTL/N we will discard the oldest partition, so we keep a rolling window of N+1 partitions. +// - Writes will go to the most recent partition, and we'll read across N+1 partitions +package eventstorage diff --git a/x-pack/apm-server/sampling/eventstorage/logger.go b/x-pack/apm-server/sampling/eventstorage/logger.go deleted file mode 100644 index 695a68e8a78..00000000000 --- a/x-pack/apm-server/sampling/eventstorage/logger.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License 2.0; -// you may not use this file except in compliance with the Elastic License 2.0. - -package eventstorage - -import ( - "fmt" - "sync" - - "github.com/elastic/elastic-agent-libs/logp" -) - -// LogpAdaptor adapts logp.Logger to the badger.Logger interface. -type LogpAdaptor struct { - *logp.Logger - - mu sync.RWMutex - last string -} - -// Errorf prints the log message when the current message isn't the same as the -// previously logged message. -func (a *LogpAdaptor) Errorf(format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if a.setLast(msg) { - a.Logger.Errorf(format, args...) - } -} - -func (a *LogpAdaptor) setLast(msg string) bool { - a.mu.RLock() - if msg != a.last { - a.mu.RUnlock() - return false - } - a.mu.RUnlock() - a.mu.Lock() - defer a.mu.Unlock() - shouldSet := msg != a.last - if shouldSet { - a.last = msg - } - return shouldSet -} - -// Warningf adapts badger.Logger.Warningf to logp.Logger.Warngf. -func (a *LogpAdaptor) Warningf(format string, args ...interface{}) { - a.Warnf(format, args...) -} diff --git a/x-pack/apm-server/sampling/eventstorage/partition_rw.go b/x-pack/apm-server/sampling/eventstorage/partition_rw.go new file mode 100644 index 00000000000..67686725146 --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/partition_rw.go @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage + +import ( + "errors" + + "github.com/elastic/apm-data/model/modelpb" +) + +// PartitionReadWriter reads from and writes to storage across partitions. +type PartitionReadWriter struct { + s *Storage +} + +// WriteTraceSampled records the tail-sampling decision for the given trace ID. +func (rw *PartitionReadWriter) WriteTraceSampled(traceID string, sampled bool) error { + rw.s.partitioner.mu.RLock() + defer rw.s.partitioner.mu.RUnlock() + pid := rw.s.partitioner.Current() + return NewPrefixReadWriter(rw.s.db, byte(pid), rw.s.codec).WriteTraceSampled(traceID, sampled) +} + +// IsTraceSampled reports whether traceID belongs to a trace that is sampled +// or unsampled. If no sampling decision has been recorded, IsTraceSampled +// returns ErrNotFound. +// +// The performance of IsTraceSampled is crucial since it is in the hot path. +// It is called +// 1. when a remote sampling decision is received from pubsub +// 2. (hot path) when a transaction / span comes in, check if a sampling decision has already been made +func (rw *PartitionReadWriter) IsTraceSampled(traceID string) (bool, error) { + rw.s.partitioner.mu.RLock() + defer rw.s.partitioner.mu.RUnlock() + var errs []error + for pid := range rw.s.partitioner.Actives() { + sampled, err := NewPrefixReadWriter(rw.s.db, byte(pid), rw.s.codec).IsTraceSampled(traceID) + if err == nil { + return sampled, nil + } else if err != ErrNotFound { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return false, errors.Join(errs...) + } + return false, ErrNotFound +} + +// WriteTraceEvent writes a trace event to storage. +func (rw *PartitionReadWriter) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error { + rw.s.partitioner.mu.RLock() + defer rw.s.partitioner.mu.RUnlock() + pid := rw.s.partitioner.Current() + return NewPrefixReadWriter(rw.s.db, byte(pid), rw.s.codec).WriteTraceEvent(traceID, id, event) +} + +// DeleteTraceEvent deletes the trace event from storage. +func (rw *PartitionReadWriter) DeleteTraceEvent(traceID, id string) error { + rw.s.partitioner.mu.RLock() + defer rw.s.partitioner.mu.RUnlock() + var errs []error + for pid := range rw.s.partitioner.Actives() { + err := NewPrefixReadWriter(rw.s.db, byte(pid), rw.s.codec).DeleteTraceEvent(traceID, id) + if err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) +} + +// ReadTraceEvents reads trace events with the given trace ID from storage into out. +func (rw *PartitionReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { + rw.s.partitioner.mu.RLock() + defer rw.s.partitioner.mu.RUnlock() + var errs []error + for pid := range rw.s.partitioner.Actives() { + err := NewPrefixReadWriter(rw.s.db, byte(pid), rw.s.codec).ReadTraceEvents(traceID, out) + if err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) +} diff --git a/x-pack/apm-server/sampling/eventstorage/partitioner.go b/x-pack/apm-server/sampling/eventstorage/partitioner.go new file mode 100644 index 00000000000..1e9e63bd149 --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/partitioner.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage + +import ( + "iter" + "sync" +) + +const ( + // maxTotalPartitions is the maximum number of total partitions. + // It is used for a sanity check specific to how we use it as a byte prefix in database keys. + // It MUST be less than 256 to be contained in a byte. + // It has additional (arbitrary) limitations: + // - MUST be less than reservedKeyPrefix to avoid accidentally overwriting reserved keys down the line. + // - MUST be less than traceIDSeparator to avoid being misinterpreted as the separator during pebble internal key comparisons + maxTotalPartitions = int(min(reservedKeyPrefix, traceIDSeparator)) - 1 +) + +// Partitioner is a partitioned ring with `total` number of partitions. +// 1 of them is inactive while all the others are active. +// `current` points at the rightmost active partition. +// +// Example for total=4: +// (A: active, I: inactive, ^ points at the current active entry) +// A-I-A-A +// ^...... +// current +type Partitioner struct { + total int // length of the ring + current int + mu sync.RWMutex +} + +// NewPartitioner returns a partitioner with `actives` number of active partitions. +func NewPartitioner(actives, currentID int) *Partitioner { + total := actives + 1 // actives + 1 inactive + if total >= maxTotalPartitions { + panic("too many partitions") + } + return &Partitioner{total: total, current: currentID} +} + +// Rotate rotates partitions to the right by 1 position and +// returns the ID of the new current active entry. +// +// Example for total=4: +// (A: active, I: inactive, ^ points at the current active entry) +// A-I-A-A +// ^...... +// +// After Rotate: +// A-A-I-A +// ..^.... +func (p *Partitioner) Rotate() int { + p.mu.Lock() + defer p.mu.Unlock() + p.current = (p.current + 1) % p.total + return p.current +} + +// Actives returns an iterator containing all active partitions. +// It contains total - 1 partitions. +// Callers should obtain p.mu.RLock when using the returned PIDs. +func (p *Partitioner) Actives() iter.Seq[int] { + cur := p.current + return func(yield func(int) bool) { + for i := 0; i < p.total-1; i++ { + if !yield((cur + p.total - i) % p.total) { + return + } + } + } +} + +// Inactive returns the ID of the inactive partition. +// Callers should obtain p.mu.RLock when using the returned PID. +func (p *Partitioner) Inactive() int { + return (p.current + 1) % p.total +} + +// Current returns the ID of the current partition (rightmost active). +// Callers should obtain p.mu.RLock when using the returned PID. +func (p *Partitioner) Current() int { + return p.current +} diff --git a/x-pack/apm-server/sampling/eventstorage/partitioner_test.go b/x-pack/apm-server/sampling/eventstorage/partitioner_test.go new file mode 100644 index 00000000000..4f302d47085 --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/partitioner_test.go @@ -0,0 +1,58 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage_test + +import ( + "iter" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" +) + +func iterToSlice[T any](it iter.Seq[T]) (s []T) { + for i := range it { + s = append(s, i) + } + return +} + +func TestPartitioner(t *testing.T) { + p := eventstorage.NewPartitioner(2, 0) // partition id 0, 1, 2 + + assert.Equal(t, 0, p.Current()) + assert.Equal(t, 1, p.Inactive()) + assert.Equal(t, []int{0, 2}, iterToSlice(p.Actives())) + + // 0 -> 1 + p.Rotate() + + assert.Equal(t, 1, p.Current()) + assert.Equal(t, 2, p.Inactive()) + assert.Equal(t, []int{1, 0}, iterToSlice(p.Actives())) + + // 1 -> 2 + p.Rotate() + + assert.Equal(t, 2, p.Current()) + assert.Equal(t, 0, p.Inactive()) + assert.Equal(t, []int{2, 1}, iterToSlice(p.Actives())) + + // 2 -> 0 + p.Rotate() + + assert.Equal(t, 0, p.Current()) + assert.Equal(t, 1, p.Inactive()) + assert.Equal(t, []int{0, 2}, iterToSlice(p.Actives())) +} + +func TestPartitionerCurrentID(t *testing.T) { + p := eventstorage.NewPartitioner(2, 1) + + assert.Equal(t, 1, p.Current()) + assert.Equal(t, 2, p.Inactive()) + assert.Equal(t, []int{1, 0}, iterToSlice(p.Actives())) +} diff --git a/x-pack/apm-server/sampling/eventstorage/pebble.go b/x-pack/apm-server/sampling/eventstorage/pebble.go new file mode 100644 index 00000000000..319836c622d --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/pebble.go @@ -0,0 +1,74 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage + +import ( + "bytes" + "path/filepath" + + "github.com/cockroachdb/pebble/v2" + "github.com/cockroachdb/pebble/v2/bloom" + + "github.com/elastic/apm-server/internal/logs" + "github.com/elastic/elastic-agent-libs/logp" +) + +func eventComparer() *pebble.Comparer { + comparer := *pebble.DefaultComparer + // Required for prefix bloom filter + comparer.Split = func(k []byte) int { + if idx := bytes.IndexByte(k, traceIDSeparator); idx != -1 { + return idx + 1 + } + // If traceID separator does not exist, consider the entire key as prefix. + // This is required for deletes like DeleteRange([]byte{0}, []byte{1}) to work without specifying the separator. + return len(k) + } + comparer.Compare = func(a, b []byte) int { + ap := comparer.Split(a) // a prefix length + bp := comparer.Split(b) // b prefix length + if prefixCmp := bytes.Compare(a[:ap], b[:bp]); prefixCmp != 0 { + return prefixCmp + } + return comparer.ComparePointSuffixes(a[ap:], b[bp:]) + } + comparer.Name = "apmserver.EventComparer" + return &comparer +} + +func OpenEventPebble(storageDir string) (*pebble.DB, error) { + opts := &pebble.Options{ + FormatMajorVersion: pebble.FormatColumnarBlocks, + Logger: logp.NewLogger(logs.Sampling), + MemTableSize: 16 << 20, + Levels: []pebble.LevelOptions{ + { + BlockSize: 16 << 10, + Compression: func() pebble.Compression { return pebble.SnappyCompression }, + FilterPolicy: bloom.FilterPolicy(10), + FilterType: pebble.TableFilter, + }, + }, + Comparer: eventComparer(), + } + opts.Experimental.MaxWriterConcurrency = 1 // >0 enables parallel writers, the actual value doesn't matter + return pebble.Open(filepath.Join(storageDir, "event"), opts) +} + +func OpenDecisionPebble(storageDir string) (*pebble.DB, error) { + return pebble.Open(filepath.Join(storageDir, "decision"), &pebble.Options{ + FormatMajorVersion: pebble.FormatColumnarBlocks, + Logger: logp.NewLogger(logs.Sampling), + MemTableSize: 2 << 20, + Levels: []pebble.LevelOptions{ + { + BlockSize: 2 << 10, + Compression: func() pebble.Compression { return pebble.NoCompression }, + FilterPolicy: bloom.FilterPolicy(10), + FilterType: pebble.TableFilter, + }, + }, + }) +} diff --git a/x-pack/apm-server/sampling/eventstorage/pebble_test.go b/x-pack/apm-server/sampling/eventstorage/pebble_test.go new file mode 100644 index 00000000000..e2ec93a914b --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/pebble_test.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/assert" +) + +func TestEventComparer(t *testing.T) { + err := pebble.CheckComparer(eventComparer(), [][]byte{ + []byte("12:"), + []byte("123:"), + []byte("foo1:"), + []byte("foo12:"), + []byte("foo2:"), + }, [][]byte{ + []byte("12"), + []byte("123"), + []byte("bar1"), + []byte("bar12"), + []byte("bar2"), + }) + assert.NoError(t, err) +} diff --git a/x-pack/apm-server/sampling/eventstorage/prefix.go b/x-pack/apm-server/sampling/eventstorage/prefix.go new file mode 100644 index 00000000000..35df65e6159 --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/prefix.go @@ -0,0 +1,132 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage + +import ( + "bytes" + "errors" + "fmt" + + "github.com/cockroachdb/pebble/v2" + + "github.com/elastic/apm-data/model/modelpb" +) + +const ( + // NOTE(axw) these values (and their meanings) must remain stable + // over time, to avoid misinterpreting historical data. + entryMetaTraceSampled byte = 's' + entryMetaTraceUnsampled byte = 'u' + + // traceIDSeparator is the separator between trace ID and transaction / span ID + traceIDSeparator byte = ':' +) + +var ( + // ErrNotFound is returned by the RW.IsTraceSampled method, + // for non-existing trace IDs. + ErrNotFound = errors.New("key not found") +) + +func NewPrefixReadWriter(db db, prefix byte, codec Codec) PrefixReadWriter { + return PrefixReadWriter{db: db, prefix: prefix, codec: codec} +} + +type PrefixReadWriter struct { + db db + prefix byte + codec Codec +} + +func (rw PrefixReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { + var b bytes.Buffer + b.Grow(1 + len(traceID) + 1) + b.WriteByte(rw.prefix) + b.WriteString(traceID) + b.WriteByte(traceIDSeparator) + + iter, err := rw.db.NewIter(&pebble.IterOptions{}) + if err != nil { + return err + } + defer iter.Close() + + // SeekPrefixGE uses prefix bloom filter for on disk tables. + // These bloom filters are cached in memory, and a "miss" on bloom filter avoids disk IO to check the actual table. + // Memtables still need to be scanned as pebble has no bloom filter on memtables. + // + // SeekPrefixGE ensures the prefix is present and does not require lower bound and upper bound to be set on iterator. + if valid := iter.SeekPrefixGE(b.Bytes()); !valid { + return nil + } + for ; iter.Valid(); iter.Next() { + event := &modelpb.APMEvent{} + data, err := iter.ValueAndErr() + if err != nil { + return err + } + if err := rw.codec.DecodeEvent(data, event); err != nil { + return fmt.Errorf("codec failed to decode event: %w", err) + } + *out = append(*out, event) + } + return nil +} + +func (rw PrefixReadWriter) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error { + data, err := rw.codec.EncodeEvent(event) + if err != nil { + return err + } + var b bytes.Buffer + b.Grow(1 + len(traceID) + 1 + len(id)) + b.WriteByte(rw.prefix) + b.WriteString(traceID) + b.WriteByte(traceIDSeparator) + b.WriteString(id) + key := b.Bytes() + return rw.db.Set(key, data, pebble.NoSync) +} + +func (rw PrefixReadWriter) WriteTraceSampled(traceID string, sampled bool) error { + var b bytes.Buffer + b.Grow(1 + len(traceID)) + b.WriteByte(rw.prefix) + b.WriteString(traceID) + + meta := entryMetaTraceUnsampled + if sampled { + meta = entryMetaTraceSampled + } + return rw.db.Set(b.Bytes(), []byte{meta}, pebble.NoSync) +} + +func (rw PrefixReadWriter) IsTraceSampled(traceID string) (bool, error) { + var b bytes.Buffer + b.Grow(1 + len(traceID)) + b.WriteByte(rw.prefix) + b.WriteString(traceID) + + item, closer, err := rw.db.Get(b.Bytes()) + if err == pebble.ErrNotFound { + return false, ErrNotFound + } else if err != nil { + return false, err + } + defer closer.Close() + return item[0] == entryMetaTraceSampled, nil +} + +func (rw PrefixReadWriter) DeleteTraceEvent(traceID, id string) error { + var b bytes.Buffer + b.Grow(1 + len(traceID) + 1 + len(id)) + b.WriteByte(rw.prefix) + b.WriteString(traceID) + b.WriteByte(traceIDSeparator) + b.WriteString(id) + key := b.Bytes() + + return rw.db.Delete(key, pebble.NoSync) +} diff --git a/x-pack/apm-server/sampling/eventstorage/prefix_test.go b/x-pack/apm-server/sampling/eventstorage/prefix_test.go new file mode 100644 index 00000000000..b54696e6a3e --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/prefix_test.go @@ -0,0 +1,180 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage_test + +import ( + "fmt" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/gofrs/uuid/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-data/model/modelpb" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" +) + +func newEventPebble(t *testing.T) *pebble.DB { + db, err := eventstorage.OpenEventPebble(t.TempDir()) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + return db +} + +func newDecisionPebble(t *testing.T) *pebble.DB { + db, err := eventstorage.OpenDecisionPebble(t.TempDir()) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + return db +} + +func TestPrefixReadWriter_WriteTraceEvent(t *testing.T) { + codec := eventstorage.ProtobufCodec{} + db := newEventPebble(t) + traceID := "foo" + txnID := "bar" + txn := makeTransaction(txnID, traceID) + rw := eventstorage.NewPrefixReadWriter(db, 1, codec) + + check := func() { + err := rw.WriteTraceEvent(traceID, txnID, txn) + assert.NoError(t, err) + item, closer, err := db.Get(append([]byte{1}, []byte("foo:bar")...)) + assert.NoError(t, err) + defer closer.Close() + var actual modelpb.APMEvent + err = codec.DecodeEvent(item, &actual) + assert.NoError(t, err) + assert.Equal(t, *txn, actual) + } + + check() + + // Try writing to the same key again to simulate misbehaving agent / race condition + check() +} + +func TestPrefixReadWriter_ReadTraceEvents(t *testing.T) { + codec := eventstorage.ProtobufCodec{} + db := newEventPebble(t) + rw := eventstorage.NewPrefixReadWriter(db, 1, codec) + + traceID := "foo1" + for _, txnID := range []string{"bar", "baz"} { + txn := makeTransaction(txnID, traceID) + err := rw.WriteTraceEvent(traceID, txnID, txn) + require.NoError(t, err) + } + + // Create transactions with similar trace IDs to ensure that iterator upper bound is enforced + txn := makeTransaction("bar", "foo2") + err := rw.WriteTraceEvent("foo2", "bar", txn) + require.NoError(t, err) + + txn = makeTransaction("bar", "foo12") + err = rw.WriteTraceEvent("foo12", "bar", txn) + require.NoError(t, err) + + var out modelpb.Batch + err = rw.ReadTraceEvents(traceID, &out) + assert.NoError(t, err) + assert.Equal(t, modelpb.Batch{ + makeTransaction("bar", traceID), + makeTransaction("baz", traceID), + }, out) +} + +func TestPrefixReadWriter_DeleteTraceEvent(t *testing.T) { + codec := eventstorage.ProtobufCodec{} + db := newEventPebble(t) + traceID := "foo" + txnID := "bar" + txn := makeTransaction(txnID, traceID) + rw := eventstorage.NewPrefixReadWriter(db, 1, codec) + err := rw.WriteTraceEvent(traceID, txnID, txn) + require.NoError(t, err) + + key := append([]byte{1}, []byte("foo:bar")...) + + _, closer, err := db.Get(key) + assert.NoError(t, err) + err = closer.Close() + assert.NoError(t, err) + + err = rw.DeleteTraceEvent(traceID, txnID) + assert.NoError(t, err) + + _, _, err = db.Get(key) + assert.ErrorIs(t, err, pebble.ErrNotFound) +} + +func TestPrefixReadWriter_WriteTraceSampled(t *testing.T) { + for _, sampled := range []bool{true, false} { + t.Run(fmt.Sprintf("sampled=%v", sampled), func(t *testing.T) { + codec := eventstorage.ProtobufCodec{} + db := newDecisionPebble(t) + traceID := "foo" + rw := eventstorage.NewPrefixReadWriter(db, 1, codec) + + check := func() { + err := rw.WriteTraceSampled(traceID, sampled) + assert.NoError(t, err) + item, closer, err := db.Get(append([]byte{1}, []byte("foo")...)) + assert.NoError(t, err) + defer closer.Close() + assert.NoError(t, err) + if sampled { + assert.Equal(t, []byte{'s'}, item) + } else { + assert.Equal(t, []byte{'u'}, item) + } + } + + check() + + // Try writing to the same key again to simulate misbehaving agent / race condition + check() + }) + } +} + +func TestPrefixReadWriter_IsTraceSampled(t *testing.T) { + for _, tc := range []struct { + sampled bool + missing bool + }{ + { + sampled: true, + }, + { + sampled: false, + }, + { + missing: true, + }, + } { + t.Run(fmt.Sprintf("sampled=%v,missing=%v", tc.sampled, tc.missing), func(t *testing.T) { + db := newDecisionPebble(t) + rw := eventstorage.NewPrefixReadWriter(db, 1, nopCodec{}) + traceID := uuid.Must(uuid.NewV4()).String() + if !tc.missing { + err := rw.WriteTraceSampled(traceID, tc.sampled) + require.NoError(t, err) + } + sampled, err := rw.IsTraceSampled(traceID) + if tc.missing { + assert.ErrorIs(t, err, eventstorage.ErrNotFound) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.sampled, sampled) + } + }) + } +} diff --git a/x-pack/apm-server/sampling/eventstorage/rw.go b/x-pack/apm-server/sampling/eventstorage/rw.go new file mode 100644 index 00000000000..e42ef8ce345 --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/rw.go @@ -0,0 +1,109 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage + +import ( + "errors" + "fmt" + + "github.com/elastic/apm-data/model/modelpb" +) + +var ( + // ErrLimitReached is returned by RW methods when storage usage + // is greater than configured limit. + ErrLimitReached = errors.New("configured storage limit reached") +) + +type RW interface { + ReadTraceEvents(traceID string, out *modelpb.Batch) error + WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error + WriteTraceSampled(traceID string, sampled bool) error + IsTraceSampled(traceID string) (bool, error) + DeleteTraceEvent(traceID, id string) error +} + +type SplitReadWriter struct { + eventRW, decisionRW RW +} + +func (s SplitReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { + return s.eventRW.ReadTraceEvents(traceID, out) +} + +func (s SplitReadWriter) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error { + return s.eventRW.WriteTraceEvent(traceID, id, event) +} + +func (s SplitReadWriter) WriteTraceSampled(traceID string, sampled bool) error { + return s.decisionRW.WriteTraceSampled(traceID, sampled) +} + +func (s SplitReadWriter) IsTraceSampled(traceID string) (bool, error) { + return s.decisionRW.IsTraceSampled(traceID) +} + +func (s SplitReadWriter) DeleteTraceEvent(traceID, id string) error { + return s.eventRW.DeleteTraceEvent(traceID, id) +} + +func (s SplitReadWriter) Close() error { + return nil +} + +type storageLimitChecker interface { + DiskUsage() uint64 + StorageLimit() uint64 +} + +type StorageLimitReadWriter struct { + checker storageLimitChecker + nextRW RW +} + +func NewStorageLimitReadWriter(checker storageLimitChecker, nextRW RW) StorageLimitReadWriter { + return StorageLimitReadWriter{ + checker: checker, + nextRW: nextRW, + } +} + +func (s StorageLimitReadWriter) checkStorageLimit() error { + limit := s.checker.StorageLimit() + if limit != 0 { + usage := s.checker.DiskUsage() + if usage >= limit { + return fmt.Errorf("%w (current: %d, limit %d)", ErrLimitReached, usage, limit) + } + } + return nil +} + +func (s StorageLimitReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { + return s.nextRW.ReadTraceEvents(traceID, out) +} + +func (s StorageLimitReadWriter) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error { + if err := s.checkStorageLimit(); err != nil { + return err + } + return s.nextRW.WriteTraceEvent(traceID, id, event) +} + +func (s StorageLimitReadWriter) WriteTraceSampled(traceID string, sampled bool) error { + if err := s.checkStorageLimit(); err != nil { + return err + } + return s.nextRW.WriteTraceSampled(traceID, sampled) +} + +func (s StorageLimitReadWriter) IsTraceSampled(traceID string) (bool, error) { + return s.nextRW.IsTraceSampled(traceID) +} + +func (s StorageLimitReadWriter) DeleteTraceEvent(traceID, id string) error { + // Technically DeleteTraceEvent writes, but it should have a net effect of reducing disk usage + return s.nextRW.DeleteTraceEvent(traceID, id) +} diff --git a/x-pack/apm-server/sampling/eventstorage/rw_test.go b/x-pack/apm-server/sampling/eventstorage/rw_test.go new file mode 100644 index 00000000000..bcf6490c16d --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/rw_test.go @@ -0,0 +1,109 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-data/model/modelpb" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" +) + +type mockChecker struct { + usage, limit uint64 +} + +func (m mockChecker) DiskUsage() uint64 { + return m.usage +} + +func (m mockChecker) StorageLimit() uint64 { + return m.limit +} + +type mockRW struct { + callback func() +} + +func (m mockRW) ReadTraceEvents(traceID string, out *modelpb.Batch) error { + m.callback() + return nil +} + +func (m mockRW) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error { + m.callback() + return nil +} + +func (m mockRW) WriteTraceSampled(traceID string, sampled bool) error { + m.callback() + return nil +} + +func (m mockRW) IsTraceSampled(traceID string) (bool, error) { + m.callback() + return false, nil +} + +func (m mockRW) DeleteTraceEvent(traceID, id string) error { + m.callback() + return nil +} + +func (m mockRW) Flush() error { + m.callback() + return nil +} + +func TestStorageLimitReadWriter(t *testing.T) { + for _, tt := range []struct { + limit, usage uint64 + wantCalled bool + }{ + { + limit: 0, // unlimited + usage: 1, + wantCalled: true, + }, + { + limit: 2, + usage: 3, + wantCalled: false, + }, + } { + t.Run(fmt.Sprintf("limit=%d,usage=%d", tt.limit, tt.usage), func(t *testing.T) { + checker := mockChecker{limit: tt.limit, usage: tt.usage} + var callCount int + rw := eventstorage.NewStorageLimitReadWriter(checker, mockRW{ + callback: func() { + callCount++ + }, + }) + assert.NoError(t, rw.ReadTraceEvents("foo", nil)) + _, err := rw.IsTraceSampled("foo") + assert.NoError(t, err) + assert.NoError(t, rw.DeleteTraceEvent("foo", "bar")) + + err = rw.WriteTraceEvent("foo", "bar", nil) + if tt.wantCalled { + assert.NoError(t, err) + assert.Equal(t, 4, callCount) + } else { + assert.Error(t, err) + } + err = rw.WriteTraceSampled("foo", true) + if tt.wantCalled { + assert.NoError(t, err) + assert.Equal(t, 5, callCount) + } else { + assert.Error(t, err) + } + }) + } + +} diff --git a/x-pack/apm-server/sampling/eventstorage/sharded.go b/x-pack/apm-server/sampling/eventstorage/sharded.go deleted file mode 100644 index 032f3fddad7..00000000000 --- a/x-pack/apm-server/sampling/eventstorage/sharded.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License 2.0; -// you may not use this file except in compliance with the Elastic License 2.0. - -package eventstorage - -import ( - "errors" - "runtime" - "sync" - - "github.com/cespare/xxhash/v2" - - "github.com/elastic/apm-data/model/modelpb" -) - -// ShardedReadWriter provides sharded, locked, access to a Storage. -// -// ShardedReadWriter shards on trace ID. -type ShardedReadWriter struct { - readWriters []lockedReadWriter -} - -func newShardedReadWriter(storage *Storage) *ShardedReadWriter { - s := &ShardedReadWriter{ - // Create as many ReadWriters as there are GOMAXPROCS, which considers - // cgroup quotas, so we can ideally minimise lock contention, and scale - // up accordingly with more CPU. - readWriters: make([]lockedReadWriter, runtime.GOMAXPROCS(0)), - } - for i := range s.readWriters { - s.readWriters[i].rw = storage.NewReadWriter() - } - return s -} - -// Close closes all sharded storage readWriters. -func (s *ShardedReadWriter) Close() { - for i := range s.readWriters { - s.readWriters[i].Close() - } -} - -// Flush flushes all sharded storage readWriters. -func (s *ShardedReadWriter) Flush() error { - var errs []error - for i := range s.readWriters { - if err := s.readWriters[i].Flush(); err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} - -// ReadTraceEvents calls Writer.ReadTraceEvents, using a sharded, locked, Writer. -func (s *ShardedReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { - return s.getWriter(traceID).ReadTraceEvents(traceID, out) -} - -// WriteTraceEvent calls Writer.WriteTraceEvent, using a sharded, locked, Writer. -func (s *ShardedReadWriter) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent, opts WriterOpts) error { - return s.getWriter(traceID).WriteTraceEvent(traceID, id, event, opts) -} - -// WriteTraceSampled calls Writer.WriteTraceSampled, using a sharded, locked, Writer. -func (s *ShardedReadWriter) WriteTraceSampled(traceID string, sampled bool, opts WriterOpts) error { - return s.getWriter(traceID).WriteTraceSampled(traceID, sampled, opts) -} - -// IsTraceSampled calls Writer.IsTraceSampled, using a sharded, locked, Writer. -func (s *ShardedReadWriter) IsTraceSampled(traceID string) (bool, error) { - return s.getWriter(traceID).IsTraceSampled(traceID) -} - -// DeleteTraceEvent calls Writer.DeleteTraceEvent, using a sharded, locked, Writer. -func (s *ShardedReadWriter) DeleteTraceEvent(traceID, id string) error { - return s.getWriter(traceID).DeleteTraceEvent(traceID, id) -} - -// getWriter returns an event storage writer for the given trace ID. -// -// This method is idempotent, which is necessary to avoid transaction -// conflicts and ensure all events are reported once a sampling decision -// has been recorded. -func (s *ShardedReadWriter) getWriter(traceID string) *lockedReadWriter { - var h xxhash.Digest - h.WriteString(traceID) - return &s.readWriters[h.Sum64()%uint64(len(s.readWriters))] -} - -type lockedReadWriter struct { - mu sync.Mutex - rw *ReadWriter -} - -func (rw *lockedReadWriter) Close() { - rw.mu.Lock() - defer rw.mu.Unlock() - rw.rw.Close() -} - -func (rw *lockedReadWriter) Flush() error { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.rw.Flush() -} - -func (rw *lockedReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.rw.ReadTraceEvents(traceID, out) -} - -func (rw *lockedReadWriter) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent, opts WriterOpts) error { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.rw.WriteTraceEvent(traceID, id, event, opts) -} - -func (rw *lockedReadWriter) WriteTraceSampled(traceID string, sampled bool, opts WriterOpts) error { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.rw.WriteTraceSampled(traceID, sampled, opts) -} - -func (rw *lockedReadWriter) IsTraceSampled(traceID string) (bool, error) { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.rw.IsTraceSampled(traceID) -} - -func (rw *lockedReadWriter) DeleteTraceEvent(traceID, id string) error { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.rw.DeleteTraceEvent(traceID, id) -} diff --git a/x-pack/apm-server/sampling/eventstorage/sharded_bench_test.go b/x-pack/apm-server/sampling/eventstorage/sharded_bench_test.go deleted file mode 100644 index 7ef76309093..00000000000 --- a/x-pack/apm-server/sampling/eventstorage/sharded_bench_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License 2.0; -// you may not use this file except in compliance with the Elastic License 2.0. - -package eventstorage_test - -import ( - "testing" - "time" - - "github.com/gofrs/uuid/v5" - - "github.com/elastic/apm-data/model/modelpb" - "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" -) - -func BenchmarkShardedWriteTransactionUncontended(b *testing.B) { - db := newBadgerDB(b, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - sharded := store.NewShardedReadWriter() - defer sharded.Close() - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } - - b.RunParallel(func(pb *testing.PB) { - traceID := uuid.Must(uuid.NewV4()).String() - transaction := &modelpb.APMEvent{ - Transaction: &modelpb.Transaction{Id: traceID}, - } - for pb.Next() { - if err := sharded.WriteTraceEvent(traceID, traceID, transaction, wOpts); err != nil { - b.Fatal(err) - } - } - }) -} - -func BenchmarkShardedWriteTransactionContended(b *testing.B) { - db := newBadgerDB(b, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - sharded := store.NewShardedReadWriter() - defer sharded.Close() - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } - - // Use a single trace ID, causing all events to go through - // the same sharded writer, contending for a single lock. - traceID := uuid.Must(uuid.NewV4()).String() - - b.RunParallel(func(pb *testing.PB) { - transactionID := uuid.Must(uuid.NewV4()).String() - transaction := &modelpb.APMEvent{ - Transaction: &modelpb.Transaction{Id: transactionID}, - } - for pb.Next() { - if err := sharded.WriteTraceEvent(traceID, transactionID, transaction, wOpts); err != nil { - b.Fatal(err) - } - } - }) -} diff --git a/x-pack/apm-server/sampling/eventstorage/storage.go b/x-pack/apm-server/sampling/eventstorage/storage.go index c11c89d647e..47f0b6fb59e 100644 --- a/x-pack/apm-server/sampling/eventstorage/storage.go +++ b/x-pack/apm-server/sampling/eventstorage/storage.go @@ -5,51 +5,25 @@ package eventstorage import ( - "bytes" - "errors" - "fmt" - "sync/atomic" - "time" + "io" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/elastic/apm-data/model/modelpb" ) -const ( - // NOTE(axw) these values (and their meanings) must remain stable - // over time, to avoid misinterpreting historical data. - entryMetaTraceSampled = 's' - entryMetaTraceUnsampled = 'u' - entryMetaTraceEvent = 'e' - - // Initial transaction size - // len(txnKey) + 10 - baseTransactionSize = 10 + 11 -) - -var ( - // ErrNotFound is returned by by the Storage.IsTraceSampled method, - // for non-existing trace IDs. - ErrNotFound = errors.New("key not found") - - // ErrLimitReached is returned by the ReadWriter.Flush method when - // the configured StorageLimiter.Limit is true. - ErrLimitReached = errors.New("configured storage limit reached") -) - type db interface { - NewTransaction(update bool) *badger.Txn - Size() (lsm, vlog int64) - Close() error + Get(key []byte) ([]byte, io.Closer, error) + Set(key, value []byte, opts *pebble.WriteOptions) error + Delete(key []byte, opts *pebble.WriteOptions) error + NewIter(o *pebble.IterOptions) (*pebble.Iterator, error) } // Storage provides storage for sampled transactions and spans, // and for recording trace sampling decisions. type Storage struct { - db db - // pendingSize tracks the total size of pending writes across ReadWriters - pendingSize *atomic.Int64 + db db + partitioner *Partitioner codec Codec } @@ -60,286 +34,18 @@ type Codec interface { } // New returns a new Storage using db and codec. -func New(db db, codec Codec) *Storage { - return &Storage{db: db, pendingSize: &atomic.Int64{}, codec: codec} -} - -// NewShardedReadWriter returns a new ShardedReadWriter, for sharded -// reading and writing. -// -// The returned ShardedReadWriter must be closed when it is no longer -// needed. -func (s *Storage) NewShardedReadWriter() *ShardedReadWriter { - return newShardedReadWriter(s) -} - -// NewReadWriter returns a new ReadWriter for reading events from and -// writing events to storage. -// -// The returned ReadWriter must be closed when it is no longer needed. -func (s *Storage) NewReadWriter() *ReadWriter { - s.pendingSize.Add(baseTransactionSize) - return &ReadWriter{ - s: s, - txn: nil, // lazy init to avoid deadlock in storage manager - pendingSize: baseTransactionSize, - } -} - -// WriterOpts provides configuration options for writes to storage -type WriterOpts struct { - TTL time.Duration - StorageLimitInBytes int64 -} - -// ReadWriter provides a means of reading events from storage, and batched -// writing of events to storage. -// -// ReadWriter is not safe for concurrent access. All operations that involve -// a given trace ID should be performed with the same ReadWriter in order to -// avoid conflicts, e.g. by using consistent hashing to distribute to one of -// a set of ReadWriters, such as implemented by ShardedReadWriter. -type ReadWriter struct { - s *Storage - txn *badger.Txn - - // readKeyBuf is a reusable buffer for keys used in read operations. - // This must not be used in write operations, as keys are expected to - // be unmodified until the end of a transaction. - readKeyBuf []byte - pendingWrites int - // pendingSize tracks the size of pending writes in the current ReadWriter - pendingSize int64 -} - -func (rw *ReadWriter) lazyInit() { - if rw.txn == nil { - rw.txn = rw.s.db.NewTransaction(true) - } -} - -// Close closes the writer. Any writes that have not been flushed may be lost. -// -// This must be called when the writer is no longer needed, in order to reclaim -// resources. -func (rw *ReadWriter) Close() { - if rw.txn != nil { - rw.txn.Discard() - } -} - -// Flush waits for preceding writes to be committed to storage. -// -// Flush must be called to ensure writes are committed to storage. -// If Flush is not called before the writer is closed, then writes -// may be lost. -func (rw *ReadWriter) Flush() error { - rw.lazyInit() - - const flushErrFmt = "failed to flush pending writes: %w" - err := rw.txn.Commit() - rw.txn = rw.s.db.NewTransaction(true) - rw.s.pendingSize.Add(-rw.pendingSize) - rw.pendingWrites = 0 - rw.pendingSize = baseTransactionSize - rw.s.pendingSize.Add(baseTransactionSize) - if err != nil { - return fmt.Errorf(flushErrFmt, err) - } - return nil -} - -// WriteTraceSampled records the tail-sampling decision for the given trace ID. -func (rw *ReadWriter) WriteTraceSampled(traceID string, sampled bool, opts WriterOpts) error { - rw.lazyInit() - - key := []byte(traceID) - var meta uint8 = entryMetaTraceUnsampled - if sampled { - meta = entryMetaTraceSampled - } - return rw.writeEntry(badger.NewEntry(key[:], nil).WithMeta(meta), opts) -} - -// IsTraceSampled reports whether traceID belongs to a trace that is sampled -// or unsampled. If no sampling decision has been recorded, IsTraceSampled -// returns ErrNotFound. -func (rw *ReadWriter) IsTraceSampled(traceID string) (bool, error) { - rw.lazyInit() - - rw.readKeyBuf = append(rw.readKeyBuf[:0], traceID...) - item, err := rw.txn.Get(rw.readKeyBuf) - if err != nil { - if err == badger.ErrKeyNotFound { - return false, ErrNotFound - } - return false, err - } - return item.UserMeta() == entryMetaTraceSampled, nil -} - -// WriteTraceEvent writes a trace event to storage. -// -// WriteTraceEvent may return before the write is committed to storage. -// Call Flush to ensure the write is committed. -func (rw *ReadWriter) WriteTraceEvent(traceID string, id string, event *modelpb.APMEvent, opts WriterOpts) error { - rw.lazyInit() - - data, err := rw.s.codec.EncodeEvent(event) - if err != nil { - return err - } - var buf bytes.Buffer - buf.Grow(len(traceID) + 1 + len(id)) - buf.WriteString(traceID) - buf.WriteByte(':') - buf.WriteString(id) - key := buf.Bytes() - return rw.writeEntry(badger.NewEntry(key, data).WithMeta(entryMetaTraceEvent), opts) -} - -func (rw *ReadWriter) writeEntry(e *badger.Entry, opts WriterOpts) error { - rw.pendingWrites++ - entrySize := estimateSize(e) - // The badger database has an async size reconciliation, with a 1 minute - // ticker that keeps the lsm and vlog sizes updated in an in-memory map. - // It's OK to call call s.db.Size() on the hot path, since the memory - // lookup is cheap. - lsm, vlog := rw.s.db.Size() - - // there are multiple ReadWriters writing to the same storage so add - // the entry size and consider the new value to avoid TOCTOU issues. - pendingSize := rw.s.pendingSize.Add(entrySize) - rw.pendingSize += entrySize - - if current := pendingSize + lsm + vlog; opts.StorageLimitInBytes != 0 && current >= opts.StorageLimitInBytes { - // flush what we currently have and discard the current entry - if err := rw.Flush(); err != nil { - return err - } - return fmt.Errorf("%w (current: %d, limit: %d)", ErrLimitReached, current, opts.StorageLimitInBytes) - } - - if rw.pendingWrites >= 200 { - // Attempt to flush if there are 200 or more uncommitted writes. - // This ensures calls to ReadTraceEvents are not slowed down; - // ReadTraceEvents uses an iterator, which must sort all keys - // of uncommitted writes. - // The 200 value yielded a good balance between read and write speed: - // https://github.com/elastic/apm-server/pull/8407#issuecomment-1162994643 - if err := rw.Flush(); err != nil { - return err - } - - // the current ReadWriter flushed the transaction and reset the pendingSize so add - // the entrySize again. - rw.pendingSize += entrySize - rw.s.pendingSize.Add(entrySize) - } - - err := rw.txn.SetEntry(e.WithTTL(opts.TTL)) - - // If the transaction is already too big to accommodate the new entry, flush - // the existing transaction and set the entry on a new one, otherwise, - // returns early. - if err != badger.ErrTxnTooBig { - return err - } - if err := rw.Flush(); err != nil { - return err - } - rw.pendingSize += entrySize - rw.s.pendingSize.Add(entrySize) - return rw.txn.SetEntry(e.WithTTL(opts.TTL)) -} - -func estimateSize(e *badger.Entry) int64 { - // See badger WithValueThreshold option - // An storage usage of an entry depends on its size - // - // if len(e.Value) < threshold { - // return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta - // } - // return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas. - // - // Make a good estimate by reserving more space - estimate := len(e.Key) + len(e.Value) + 12 + 2 - // Extra bytes for the version in key. - return int64(estimate) + 10 -} - -// DeleteTraceEvent deletes the trace event from storage. -func (rw *ReadWriter) DeleteTraceEvent(traceID, id string) error { - rw.lazyInit() - - var buf bytes.Buffer - buf.Grow(len(traceID) + 1 + len(id)) - buf.WriteString(traceID) - buf.WriteByte(':') - buf.WriteString(id) - key := buf.Bytes() - - err := rw.txn.Delete(key) - // If the transaction is already too big to accommodate the new entry, flush - // the existing transaction and set the entry on a new one, otherwise, - // returns early. - if err != badger.ErrTxnTooBig { - return err - } - if err := rw.Flush(); err != nil { - return err +func New(db db, partitioner *Partitioner, codec Codec) *Storage { + return &Storage{ + db: db, + partitioner: partitioner, + codec: codec, } - - return rw.txn.Delete(key) } -// ReadTraceEvents reads trace events with the given trace ID from storage into out. -func (rw *ReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { - rw.lazyInit() - - opts := badger.DefaultIteratorOptions - rw.readKeyBuf = append(append(rw.readKeyBuf[:0], traceID...), ':') - opts.Prefix = rw.readKeyBuf - - // 1st pass: check whether there exist keys matching the prefix. - // Do not prefetch values so that the check is done in-memory. - // This is to optimize for cases when it is a miss. - opts.PrefetchValues = false - iter := rw.txn.NewIterator(opts) - iter.Rewind() - if !iter.Valid() { - iter.Close() - return nil - } - iter.Close() - - // 2nd pass: this is only done when there exist keys matching the prefix. - // Fetch the events with PrefetchValues for performance. - // This is to optimize for cases when it is a hit. - opts.PrefetchValues = true - iter = rw.txn.NewIterator(opts) - defer iter.Close() - for iter.Rewind(); iter.Valid(); iter.Next() { - item := iter.Item() - if item.IsDeletedOrExpired() { - continue - } - switch item.UserMeta() { - case entryMetaTraceEvent: - event := &modelpb.APMEvent{} - if err := item.Value(func(data []byte) error { - if err := rw.s.codec.DecodeEvent(data, event); err != nil { - return fmt.Errorf("codec failed to decode event: %w", err) - } - return nil - }); err != nil { - return err - } - *out = append(*out, event) - default: - // Unknown entry meta: ignore. - continue - } +// NewReadWriter returns a new PartitionReadWriter for reading events from and +// writing events to storage. +func (s *Storage) NewReadWriter() *PartitionReadWriter { + return &PartitionReadWriter{ + s: s, } - return nil } diff --git a/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go b/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go index 90f58fbd268..fa464df323b 100644 --- a/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go +++ b/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go @@ -8,10 +8,8 @@ import ( "encoding/hex" "fmt" "testing" - "time" "github.com/gofrs/uuid/v5" - "github.com/stretchr/testify/assert" "github.com/elastic/apm-data/model/modelpb" "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" @@ -19,10 +17,8 @@ import ( func BenchmarkWriteTransaction(b *testing.B) { test := func(b *testing.B, codec eventstorage.Codec, bigTX bool) { - db := newBadgerDB(b, badgerOptions) - store := eventstorage.New(db, codec) - readWriter := store.NewReadWriter() - defer readWriter.Close() + sm := newStorageManager(b, eventstorage.WithCodec(codec)) + readWriter := sm.NewReadWriter() traceID := hex.EncodeToString([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) transactionID := hex.EncodeToString([]byte{1, 2, 3, 4, 5, 6, 7, 8}) @@ -39,16 +35,11 @@ func BenchmarkWriteTransaction(b *testing.B) { b.ResetTimer() - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } for i := 0; i < b.N; i++ { - if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction, wOpts); err != nil { + if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction); err != nil { b.Fatal(err) } } - assert.NoError(b, readWriter.Flush()) } type testCase struct { @@ -87,14 +78,8 @@ func BenchmarkReadEvents(b *testing.B) { counts := []int{0, 1, 10, 100, 199, 399, 1000} for _, count := range counts { b.Run(fmt.Sprintf("%d events", count), func(b *testing.B) { - db := newBadgerDB(b, badgerOptions) - store := eventstorage.New(db, codec) - readWriter := store.NewReadWriter() - defer readWriter.Close() - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } + sm := newStorageManager(b, eventstorage.WithCodec(codec)) + readWriter := sm.NewReadWriter() for i := 0; i < count; i++ { transactionID := uuid.Must(uuid.NewV4()).String() @@ -108,13 +93,11 @@ func BenchmarkReadEvents(b *testing.B) { }, } } - if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction, wOpts); err != nil { + if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction); err != nil { b.Fatal(err) } } - // NOTE(marclop) We want to check how badly the read performance is affected with - // by having uncommitted events in the badger TX. b.ResetTimer() var batch modelpb.Batch for i := 0; i < b.N; i++ { @@ -167,17 +150,11 @@ func BenchmarkReadEventsHit(b *testing.B) { // And causes next iteration setup to take a very long time. const txnCountInTrace = 5 - test := func(b *testing.B, codec eventstorage.Codec, bigTX bool) { + test := func(b *testing.B, bigTX bool, reloadDB bool) { for _, hit := range []bool{false, true} { b.Run(fmt.Sprintf("hit=%v", hit), func(b *testing.B) { - db := newBadgerDB(b, badgerOptions) - store := eventstorage.New(db, codec) - readWriter := store.NewReadWriter() - defer readWriter.Close() - wOpts := eventstorage.WriterOpts{ - TTL: time.Hour, - StorageLimitInBytes: 0, - } + sm := newStorageManager(b) + readWriter := sm.NewReadWriter() traceIDs := make([]string, b.N) @@ -196,15 +173,20 @@ func BenchmarkReadEventsHit(b *testing.B) { }, } } - if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction, wOpts); err != nil { + if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction); err != nil { b.Fatal(err) } } } - if err := readWriter.Flush(); err != nil { - b.Fatal(err) + + if reloadDB { + if err := sm.Reload(); err != nil { + b.Fatal(err) + } } + readWriter = sm.NewReadWriter() + b.ResetTimer() var batch modelpb.Batch for i := 0; i < b.N; i++ { @@ -224,9 +206,13 @@ func BenchmarkReadEventsHit(b *testing.B) { } } - for _, bigTX := range []bool{true, false} { - b.Run(fmt.Sprintf("bigTX=%v", bigTX), func(b *testing.B) { - test(b, eventstorage.ProtobufCodec{}, bigTX) + for _, reloadDB := range []bool{false, true} { + b.Run(fmt.Sprintf("reloadDB=%v", reloadDB), func(b *testing.B) { + for _, bigTX := range []bool{true, false} { + b.Run(fmt.Sprintf("bigTX=%v", bigTX), func(b *testing.B) { + test(b, bigTX, reloadDB) + }) + } }) } } @@ -237,19 +223,13 @@ func BenchmarkIsTraceSampled(b *testing.B) { unknownTraceUUID := uuid.Must(uuid.NewV4()) // Test with varying numbers of events in the trace. - db := newBadgerDB(b, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - readWriter := store.NewReadWriter() - defer readWriter.Close() - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } + sm := newStorageManager(b) + readWriter := sm.NewReadWriter() - if err := readWriter.WriteTraceSampled(sampledTraceUUID.String(), true, wOpts); err != nil { + if err := readWriter.WriteTraceSampled(sampledTraceUUID.String(), true); err != nil { b.Fatal(err) } - if err := readWriter.WriteTraceSampled(unsampledTraceUUID.String(), false, wOpts); err != nil { + if err := readWriter.WriteTraceSampled(unsampledTraceUUID.String(), false); err != nil { b.Fatal(err) } diff --git a/x-pack/apm-server/sampling/eventstorage/storage_manager.go b/x-pack/apm-server/sampling/eventstorage/storage_manager.go index 8446f8fbf89..2d806a0bc84 100644 --- a/x-pack/apm-server/sampling/eventstorage/storage_manager.go +++ b/x-pack/apm-server/sampling/eventstorage/storage_manager.go @@ -5,19 +5,18 @@ package eventstorage import ( + "encoding/json" "errors" "fmt" - "io/fs" "os" "path/filepath" - "strings" "sync" + "sync/atomic" "time" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "golang.org/x/sync/errgroup" - "github.com/elastic/apm-data/model/modelpb" "github.com/elastic/apm-server/internal/logs" "github.com/elastic/elastic-agent-libs/logp" ) @@ -26,315 +25,301 @@ const ( // subscriberPositionFile holds the file name used for persisting // the subscriber position across server restarts. subscriberPositionFile = "subscriber_position.json" -) -var ( - errDropAndRecreateInProgress = errors.New("db drop and recreate in progress") + // partitionsPerTTL holds the number of partitions that events in 1 TTL should be stored over. + // Increasing partitionsPerTTL increases read amplification, but decreases storage overhead, + // as TTL GC can be performed sooner. + // + // For example, partitionPerTTL=1 means we need to keep 2 partitions active, + // such that the last entry in the previous partition is also kept for a full TTL. + // This means storage requirement is 2 * TTL, and it needs to read 2 keys per trace ID read. + // If partitionPerTTL=2, storage requirement is 1.5 * TTL at the expense of 3 reads per trace ID read. + partitionsPerTTL = 1 + + // reservedKeyPrefix is the prefix of internal keys used by StorageManager + reservedKeyPrefix byte = '~' + + // partitionerMetaKey is the key used to store partitioner metadata, e.g. last partition ID, in decision DB. + partitionerMetaKey = string(reservedKeyPrefix) + "partitioner" + + // diskUsageFetchInterval is how often disk usage is fetched which is equivalent to how long disk usage is cached. + diskUsageFetchInterval = 1 * time.Second ) -// StorageManager encapsulates badger.DB. -// It is to provide file system access, simplify synchronization and enable underlying db swaps. -// It assumes exclusive access to badger DB at storageDir. +type StorageManagerOptions func(*StorageManager) + +func WithCodec(codec Codec) StorageManagerOptions { + return func(sm *StorageManager) { + sm.codec = codec + } +} + +// StorageManager encapsulates pebble.DB. +// It assumes exclusive access to pebble DB at storageDir. type StorageManager struct { storageDir string logger *logp.Logger - db *badger.DB - storage *Storage - rw *ShardedReadWriter + eventDB *pebble.DB + decisionDB *pebble.DB + eventStorage *Storage + decisionStorage *Storage + + partitioner *Partitioner + + storageLimit atomic.Uint64 + + codec Codec - // mu guards db, storage, and rw swaps. - mu sync.RWMutex // subscriberPosMu protects the subscriber file from concurrent RW. subscriberPosMu sync.Mutex + // cachedDiskUsage is a cached result of DiskUsage + cachedDiskUsage atomic.Uint64 + // runCh acts as a mutex to ensure only 1 Run is actively running per StorageManager. // as it is possible that 2 separate Run are created by 2 TBS processors during a hot reload. runCh chan struct{} } -// NewStorageManager returns a new StorageManager with badger DB at storageDir. -func NewStorageManager(storageDir string) (*StorageManager, error) { +// NewStorageManager returns a new StorageManager with pebble DB at storageDir. +func NewStorageManager(storageDir string, opts ...StorageManagerOptions) (*StorageManager, error) { sm := &StorageManager{ storageDir: storageDir, runCh: make(chan struct{}, 1), logger: logp.NewLogger(logs.Sampling), + codec: ProtobufCodec{}, } - err := sm.reset() - if err != nil { - return nil, err + for _, opt := range opts { + opt(sm) } + + if err := sm.reset(); err != nil { + return nil, fmt.Errorf("storage manager reset error: %w", err) + } + return sm, nil } -// reset initializes db, storage, and rw. -func (s *StorageManager) reset() error { - db, err := OpenBadger(s.storageDir, -1) +// reset initializes db and storage. +func (sm *StorageManager) reset() error { + eventDB, err := OpenEventPebble(sm.storageDir) if err != nil { - return err + return fmt.Errorf("open event db error: %w", err) } - s.db = db - s.storage = New(s, ProtobufCodec{}) - s.rw = s.storage.NewShardedReadWriter() + sm.eventDB = eventDB + + decisionDB, err := OpenDecisionPebble(sm.storageDir) + if err != nil { + return fmt.Errorf("open decision db error: %w", err) + } + sm.decisionDB = decisionDB + + // Only recreate partitioner on initial create + if sm.partitioner == nil { + var currentPID int + if currentPID, err = sm.loadPartitionID(); err != nil { + sm.logger.With(logp.Error(err)).Warn("failed to load partition ID, using 0 instead") + } + // We need to keep an extra partition as buffer to respect the TTL, + // as the moving window needs to cover at least TTL at all times, + // where the moving window is defined as: + // all active partitions excluding current partition + duration since the start of current partition + activePartitions := partitionsPerTTL + 1 + sm.partitioner = NewPartitioner(activePartitions, currentPID) + } + + sm.eventStorage = New(sm.eventDB, sm.partitioner, sm.codec) + sm.decisionStorage = New(sm.decisionDB, sm.partitioner, sm.codec) + + sm.updateDiskUsage() + return nil } -// Close closes StorageManager's underlying ShardedReadWriter and badger DB -func (s *StorageManager) Close() error { - s.mu.RLock() - defer s.mu.RUnlock() - s.rw.Close() - return s.db.Close() +// loadPartitionID loads the last saved partition ID from database, +// such that partitioner resumes from where it left off before an apm-server restart. +func (sm *StorageManager) loadPartitionID() (int, error) { + item, closer, err := sm.decisionDB.Get([]byte(partitionerMetaKey)) + if errors.Is(err, pebble.ErrNotFound) { + return 0, nil + } else if err != nil { + return 0, err + } + defer closer.Close() + var pid struct { + ID int `json:"id"` + } + err = json.Unmarshal(item, &pid) + return pid.ID, err } -// Size returns the db size -func (s *StorageManager) Size() (lsm, vlog int64) { - s.mu.RLock() - defer s.mu.RUnlock() - return s.db.Size() +// savePartitionID saves the partition ID to database to be loaded by loadPartitionID later. +func (sm *StorageManager) savePartitionID(pid int) error { + return sm.decisionDB.Set([]byte(partitionerMetaKey), []byte(fmt.Sprintf(`{"id":%d}`, pid)), pebble.NoSync) } -func (s *StorageManager) NewTransaction(update bool) *badger.Txn { - s.mu.RLock() - defer s.mu.RUnlock() - return s.db.NewTransaction(update) +func (sm *StorageManager) Size() (lsm, vlog int64) { + // This is reporting lsm and vlog for legacy reasons. + // vlog is always 0 because pebble does not have a vlog. + // Keeping this legacy structure such that the metrics are comparable across versions, + // and we don't need to update the tooling, e.g. kibana dashboards. + // + // TODO(carsonip): Update this to report a more helpful size to monitoring, + // maybe broken down into event DB vs decision DB, and LSM tree vs WAL vs misc. + // Also remember to update + // - x-pack/apm-server/sampling/processor.go:CollectMonitoring + // - systemtest/benchtest/expvar/metrics.go + return int64(sm.DiskUsage()), 0 } -// Run has the same lifecycle as the TBS processor as opposed to StorageManager to facilitate EA hot reload. -func (s *StorageManager) Run(stopping <-chan struct{}, gcInterval time.Duration, ttl time.Duration, storageLimit uint64, storageLimitThreshold float64) error { - select { - case <-stopping: - return nil - case s.runCh <- struct{}{}: - } - defer func() { - <-s.runCh - }() +// DiskUsage returns the disk usage of databases in bytes. +func (sm *StorageManager) DiskUsage() uint64 { + // pebble DiskSpaceUsage overhead is not high, but it adds up when performed per-event. + return sm.cachedDiskUsage.Load() +} - g := errgroup.Group{} - g.Go(func() error { - return s.runGCLoop(stopping, gcInterval) - }) - g.Go(func() error { - return s.runDropLoop(stopping, ttl, storageLimit, storageLimitThreshold) - }) - return g.Wait() +func (sm *StorageManager) updateDiskUsage() { + sm.cachedDiskUsage.Store(sm.eventDB.Metrics().DiskSpaceUsage() + sm.decisionDB.Metrics().DiskSpaceUsage()) } -// runGCLoop runs a loop that calls badger DB RunValueLogGC every gcInterval. -func (s *StorageManager) runGCLoop(stopping <-chan struct{}, gcInterval time.Duration) error { - // This goroutine is responsible for periodically garbage - // collecting the Badger value log, using the recommended - // discard ratio of 0.5. - ticker := time.NewTicker(gcInterval) +// runDiskUsageLoop runs a loop that updates cached disk usage regularly. +func (sm *StorageManager) runDiskUsageLoop(stopping <-chan struct{}) error { + ticker := time.NewTicker(diskUsageFetchInterval) defer ticker.Stop() for { select { case <-stopping: return nil case <-ticker.C: - const discardRatio = 0.5 - var err error - for err == nil { - // Keep garbage collecting until there are no more rewrites, - // or garbage collection fails. - err = s.runValueLogGC(discardRatio) - } - if err != nil && err != badger.ErrNoRewrite { - return err - } + sm.updateDiskUsage() } } } -func (s *StorageManager) runValueLogGC(discardRatio float64) error { - s.mu.RLock() - defer s.mu.RUnlock() - return s.db.RunValueLogGC(discardRatio) +func (sm *StorageManager) StorageLimit() uint64 { + return sm.storageLimit.Load() } -// runDropLoop runs a loop that detects if storage limit has been exceeded for at least ttl. -// If so, it drops and recreates the underlying badger DB. -// This is a mitigation for issue https://github.com/elastic/apm-server/issues/14923 -func (s *StorageManager) runDropLoop(stopping <-chan struct{}, ttl time.Duration, storageLimitInBytes uint64, storageLimitThreshold float64) error { - if storageLimitInBytes == 0 { - return nil - } +func (sm *StorageManager) Flush() error { + return errors.Join( + wrapNonNilErr("event db flush error: %w", sm.eventDB.Flush()), + wrapNonNilErr("decision db flush error: %w", sm.decisionDB.Flush()), + ) +} - var firstExceeded time.Time - checkAndFix := func() error { - lsm, vlog := s.Size() - // add buffer to avoid edge case storageLimitInBytes-lsm-vlog < buffer, when writes are still always rejected - buffer := int64(baseTransactionSize * len(s.rw.readWriters)) - if uint64(lsm+vlog+buffer) >= storageLimitInBytes { - now := time.Now() - if firstExceeded.IsZero() { - firstExceeded = now - s.logger.Warnf( - "badger db size (%d+%d=%d) has exceeded storage limit (%d*%.1f=%d); db will be dropped and recreated if problem persists for `sampling.tail.ttl` (%s)", - lsm, vlog, lsm+vlog, storageLimitInBytes, storageLimitThreshold, int64(float64(storageLimitInBytes)*storageLimitThreshold), ttl.String()) - } - if now.Sub(firstExceeded) >= ttl { - s.logger.Warnf("badger db size has exceeded storage limit for over `sampling.tail.ttl` (%s), please consider increasing `sampling.tail.storage_limit`; dropping and recreating badger db to recover", ttl.String()) - err := s.dropAndRecreate() - if err != nil { - s.logger.With(logp.Error(err)).Error("error dropping and recreating badger db to recover storage space") - } else { - s.logger.Info("badger db dropped and recreated") - } - firstExceeded = time.Time{} - } - } else { - firstExceeded = time.Time{} - } - return nil - } +func (sm *StorageManager) Close() error { + return sm.close() +} - timer := time.NewTicker(time.Minute) // Eval db size every minute as badger reports them with 1m lag - defer timer.Stop() - for { - if err := checkAndFix(); err != nil { - return err - } +func (sm *StorageManager) close() error { + return errors.Join( + wrapNonNilErr("event db flush error: %w", sm.eventDB.Flush()), + wrapNonNilErr("decision db flush error: %w", sm.decisionDB.Flush()), + wrapNonNilErr("event db close error: %w", sm.eventDB.Close()), + wrapNonNilErr("decision db close error: %w", sm.decisionDB.Close()), + ) +} - select { - case <-stopping: - return nil - case <-timer.C: - continue - } +// Reload flushes out pending disk writes to disk by reloading the database. +// For testing only. +func (sm *StorageManager) Reload() error { + if err := sm.close(); err != nil { + return err } + return sm.reset() } -// dropAndRecreate deletes the underlying badger DB files at the file system level, and replaces it with a new badger DB. -func (s *StorageManager) dropAndRecreate() (retErr error) { - s.mu.Lock() - defer s.mu.Unlock() - +// Run has the same lifecycle as the TBS processor as opposed to StorageManager to facilitate EA hot reload. +func (sm *StorageManager) Run(stopping <-chan struct{}, ttl time.Duration, storageLimit uint64) error { + select { + case <-stopping: + return nil + case sm.runCh <- struct{}{}: + } defer func() { - // In any case (errors or not), reset StorageManager while lock is held - err := s.reset() - if err != nil { - retErr = errors.Join(retErr, fmt.Errorf("error reopening badger db: %w", err)) - } + <-sm.runCh }() - // Intentionally not flush rw, as storage is full. - s.rw.Close() - err := s.db.Close() - if err != nil { - return fmt.Errorf("error closing badger db: %w", err) - } + sm.storageLimit.Store(storageLimit) - err = s.deleteBadgerFiles() - if err != nil { - return fmt.Errorf("error deleting badger db files: %w", err) - } + g := errgroup.Group{} + g.Go(func() error { + return sm.runTTLGCLoop(stopping, ttl) + }) + g.Go(func() error { + return sm.runDiskUsageLoop(stopping) + }) - return nil + return g.Wait() } -func (s *StorageManager) deleteBadgerFiles() error { - // Although removing the files in place can be slower, it is less error-prone than rename-and-delete. - // Delete every file except subscriber position file - var ( - rootVisited bool - sstFiles, vlogFiles int - otherFilenames []string - ) - err := filepath.WalkDir(s.storageDir, func(path string, d fs.DirEntry, _ error) error { - if !rootVisited { - rootVisited = true - return nil - } - filename := filepath.Base(path) - if filename == subscriberPositionFile { +// runTTLGCLoop runs the TTL GC loop. +// The loop triggers a rotation on partitions at an interval based on ttl and partitionsPerTTL. +func (sm *StorageManager) runTTLGCLoop(stopping <-chan struct{}, ttl time.Duration) error { + ttlGCInterval := ttl / partitionsPerTTL + ticker := time.NewTicker(ttlGCInterval) + defer ticker.Stop() + for { + select { + case <-stopping: return nil + case <-ticker.C: + sm.logger.Info("running TTL GC to clear expired entries and reclaim disk space") + if err := sm.RotatePartitions(); err != nil { + sm.logger.With(logp.Error(err)).Error("failed to rotate partition") + } + sm.logger.Info("finished running TTL GC") } - switch ext := filepath.Ext(filename); ext { - case ".sst": - sstFiles++ - case ".vlog": - vlogFiles++ - default: - otherFilenames = append(otherFilenames, filename) - } - return os.RemoveAll(path) - }) - s.logger.Infof("deleted badger files: %d SST files, %d VLOG files, %d other files: [%s]", - sstFiles, vlogFiles, len(otherFilenames), strings.Join(otherFilenames, ", ")) - return err + } } -func (s *StorageManager) ReadSubscriberPosition() ([]byte, error) { - s.subscriberPosMu.Lock() - defer s.subscriberPosMu.Unlock() - return os.ReadFile(filepath.Join(s.storageDir, subscriberPositionFile)) -} +// RotatePartitions rotates the partitions to clean up TTL-expired entries. +func (sm *StorageManager) RotatePartitions() error { + newCurrentPID := sm.partitioner.Rotate() -func (s *StorageManager) WriteSubscriberPosition(data []byte) error { - s.subscriberPosMu.Lock() - defer s.subscriberPosMu.Unlock() - return os.WriteFile(filepath.Join(s.storageDir, subscriberPositionFile), data, 0644) -} - -func (s *StorageManager) NewReadWriter() *ManagedReadWriter { - return &ManagedReadWriter{ - sm: s, + if err := sm.savePartitionID(newCurrentPID); err != nil { + return err } -} -// ManagedReadWriter is a read writer that is transparent to badger DB changes done by StorageManager. -// It is a wrapper of the ShardedReadWriter under StorageManager. -type ManagedReadWriter struct { - sm *StorageManager -} + // No lock is needed here as the only writer to sm.partitioner is exactly this function. + pidToDelete := sm.partitioner.Inactive() + lbPrefix := byte(pidToDelete) -func (s *ManagedReadWriter) ReadTraceEvents(traceID string, out *modelpb.Batch) error { - s.sm.mu.RLock() - defer s.sm.mu.RUnlock() - return s.sm.rw.ReadTraceEvents(traceID, out) -} + lb := []byte{lbPrefix} + ub := []byte{lbPrefix + 1} // Do not use % here as ub MUST BE greater than lb -func (s *ManagedReadWriter) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent, opts WriterOpts) error { - ok := s.sm.mu.TryRLock() - if !ok { - return errDropAndRecreateInProgress - } - defer s.sm.mu.RUnlock() - return s.sm.rw.WriteTraceEvent(traceID, id, event, opts) -} - -func (s *ManagedReadWriter) WriteTraceSampled(traceID string, sampled bool, opts WriterOpts) error { - ok := s.sm.mu.TryRLock() - if !ok { - return errDropAndRecreateInProgress - } - defer s.sm.mu.RUnlock() - return s.sm.rw.WriteTraceSampled(traceID, sampled, opts) + return errors.Join( + wrapNonNilErr("event db delete range error: %w", sm.eventDB.DeleteRange(lb, ub, pebble.NoSync)), + wrapNonNilErr("decision db delete range error: %w", sm.decisionDB.DeleteRange(lb, ub, pebble.NoSync)), + wrapNonNilErr("event db compact error: %w", sm.eventDB.Compact(lb, ub, false)), + wrapNonNilErr("decision db compact error: %w", sm.decisionDB.Compact(lb, ub, false)), + ) } -func (s *ManagedReadWriter) IsTraceSampled(traceID string) (bool, error) { - s.sm.mu.RLock() - defer s.sm.mu.RUnlock() - return s.sm.rw.IsTraceSampled(traceID) +func (sm *StorageManager) ReadSubscriberPosition() ([]byte, error) { + sm.subscriberPosMu.Lock() + defer sm.subscriberPosMu.Unlock() + return os.ReadFile(filepath.Join(sm.storageDir, subscriberPositionFile)) } -func (s *ManagedReadWriter) DeleteTraceEvent(traceID, id string) error { - s.sm.mu.RLock() - defer s.sm.mu.RUnlock() - return s.sm.rw.DeleteTraceEvent(traceID, id) +func (sm *StorageManager) WriteSubscriberPosition(data []byte) error { + sm.subscriberPosMu.Lock() + defer sm.subscriberPosMu.Unlock() + return os.WriteFile(filepath.Join(sm.storageDir, subscriberPositionFile), data, 0644) } -func (s *ManagedReadWriter) Flush() error { - s.sm.mu.RLock() - defer s.sm.mu.RUnlock() - return s.sm.rw.Flush() +func (sm *StorageManager) NewReadWriter() StorageLimitReadWriter { + return NewStorageLimitReadWriter(sm, SplitReadWriter{ + eventRW: sm.eventStorage.NewReadWriter(), + decisionRW: sm.decisionStorage.NewReadWriter(), + }) } -// NewBypassReadWriter returns a ReadWriter directly reading and writing to the database, -// bypassing any wrapper e.g. ShardedReadWriter. -// This should be used for testing only, useful to check if data is actually persisted to the DB. -func (s *StorageManager) NewBypassReadWriter() *ReadWriter { - return s.storage.NewReadWriter() +// wrapNonNilErr only wraps an error with format if the error is not nil. +func wrapNonNilErr(format string, err error) error { + if err == nil { + return nil + } + return fmt.Errorf(format, err) } diff --git a/x-pack/apm-server/sampling/eventstorage/storage_manager_bench_test.go b/x-pack/apm-server/sampling/eventstorage/storage_manager_bench_test.go new file mode 100644 index 00000000000..86ebc1d5430 --- /dev/null +++ b/x-pack/apm-server/sampling/eventstorage/storage_manager_bench_test.go @@ -0,0 +1,35 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package eventstorage_test + +import ( + "testing" + "time" + + "github.com/gofrs/uuid/v5" + "github.com/stretchr/testify/require" +) + +func BenchmarkStorageManager_DiskUsage(b *testing.B) { + stopping := make(chan struct{}) + defer close(stopping) + sm := newStorageManager(b) + go sm.Run(stopping, time.Second, 0) + rw := sm.NewReadWriter() + for i := 0; i < 1000; i++ { + traceID := uuid.Must(uuid.NewV4()).String() + txnID := uuid.Must(uuid.NewV4()).String() + txn := makeTransaction(txnID, traceID) + err := rw.WriteTraceEvent(traceID, txnID, txn) + require.NoError(b, err) + err = rw.WriteTraceSampled(traceID, true) + require.NoError(b, err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = sm.DiskUsage() + } + b.StopTimer() +} diff --git a/x-pack/apm-server/sampling/eventstorage/storage_manager_test.go b/x-pack/apm-server/sampling/eventstorage/storage_manager_test.go index 3dccac1ed38..1145a9d8fe4 100644 --- a/x-pack/apm-server/sampling/eventstorage/storage_manager_test.go +++ b/x-pack/apm-server/sampling/eventstorage/storage_manager_test.go @@ -2,69 +2,174 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -package eventstorage +package eventstorage_test import ( - "fmt" - "os" - "path/filepath" "testing" "time" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/elastic/apm-data/model/modelpb" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" ) -func badgerModTime(dir string) time.Time { - oldest := time.Now() - filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - ext := filepath.Ext(path) - if (ext == ".vlog" || ext == ".sst") && info.ModTime().Before(oldest) { - oldest = info.ModTime() - } - return nil - }) - return oldest +func newStorageManager(tb testing.TB, opts ...eventstorage.StorageManagerOptions) *eventstorage.StorageManager { + sm := newStorageManagerNoCleanup(tb, tb.TempDir(), opts...) + tb.Cleanup(func() { sm.Close() }) + return sm } -func TestDropAndRecreate_filesRecreated(t *testing.T) { - tempDir := t.TempDir() - sm, err := NewStorageManager(tempDir) - require.NoError(t, err) - defer sm.Close() +func newStorageManagerNoCleanup(tb testing.TB, path string, opts ...eventstorage.StorageManagerOptions) *eventstorage.StorageManager { + sm, err := eventstorage.NewStorageManager(path, opts...) + if err != nil { + tb.Fatal(err) + } + return sm +} + +func TestStorageManager_samplingDecisionTTL(t *testing.T) { + sm := newStorageManager(t) + rw := sm.NewReadWriter() + traceID := uuid.Must(uuid.NewV4()).String() + err := rw.WriteTraceSampled(traceID, true) + assert.NoError(t, err) + sampled, err := rw.IsTraceSampled(traceID) + assert.NoError(t, err) + assert.True(t, sampled) + + // after 1 TTL + err = sm.RotatePartitions() + assert.NoError(t, err) - oldModTime := badgerModTime(tempDir) + sampled, err = rw.IsTraceSampled(traceID) + assert.NoError(t, err) + assert.True(t, sampled) - err = sm.dropAndRecreate() + // after 2 TTL + err = sm.RotatePartitions() assert.NoError(t, err) - newModTime := badgerModTime(tempDir) + _, err = rw.IsTraceSampled(traceID) + assert.ErrorIs(t, err, eventstorage.ErrNotFound) - assert.Greater(t, newModTime, oldModTime) + // after 3 TTL + err = sm.RotatePartitions() + assert.NoError(t, err) + + _, err = rw.IsTraceSampled(traceID) + assert.ErrorIs(t, err, eventstorage.ErrNotFound) } -func TestDropAndRecreate_subscriberPositionFile(t *testing.T) { - for _, exists := range []bool{true, false} { - t.Run(fmt.Sprintf("exists=%t", exists), func(t *testing.T) { - tempDir := t.TempDir() - sm, err := NewStorageManager(tempDir) - require.NoError(t, err) - defer sm.Close() - - if exists { - err := sm.WriteSubscriberPosition([]byte("{}")) - require.NoError(t, err) - } - - err = sm.dropAndRecreate() - assert.NoError(t, err) - - data, err := sm.ReadSubscriberPosition() - if exists { - assert.Equal(t, "{}", string(data)) - } else { - assert.ErrorIs(t, err, os.ErrNotExist) - } - }) - } +func TestStorageManager_eventTTL(t *testing.T) { + sm := newStorageManager(t) + rw := sm.NewReadWriter() + traceID := uuid.Must(uuid.NewV4()).String() + txnID1 := uuid.Must(uuid.NewV4()).String() + txn1 := makeTransaction(txnID1, traceID) + err := rw.WriteTraceEvent(traceID, txnID1, txn1) + assert.NoError(t, err) + + var out modelpb.Batch + err = rw.ReadTraceEvents(traceID, &out) + assert.NoError(t, err) + assert.Len(t, out, 1) + + // after 1 TTL + err = sm.RotatePartitions() + assert.NoError(t, err) + + txnID2 := uuid.Must(uuid.NewV4()).String() + txn2 := makeTransaction(txnID2, traceID) + err = rw.WriteTraceEvent(traceID, txnID2, txn2) + assert.NoError(t, err) + + out = nil + err = rw.ReadTraceEvents(traceID, &out) + assert.NoError(t, err) + assert.Equal(t, modelpb.Batch{txn2, txn1}, out) + + // after 2 TTL + err = sm.RotatePartitions() + assert.NoError(t, err) + + out = nil + err = rw.ReadTraceEvents(traceID, &out) + assert.NoError(t, err) + assert.Equal(t, modelpb.Batch{txn2}, out) + + // after 3 TTL + err = sm.RotatePartitions() + assert.NoError(t, err) + + out = nil + err = rw.ReadTraceEvents(traceID, &out) + assert.NoError(t, err) + assert.Len(t, out, 0) +} + +func TestStorageManager_partitionID(t *testing.T) { + const traceID = "foo" + tmpDir := t.TempDir() + sm := newStorageManagerNoCleanup(t, tmpDir) + + // 0 -> 1 + assert.NoError(t, sm.RotatePartitions()) + + // write to partition 1 + err := sm.NewReadWriter().WriteTraceSampled(traceID, true) + assert.NoError(t, err) + + assert.NoError(t, sm.Close()) + + // it should read directly from partition 1 on startup instead of 0 + sm = newStorageManagerNoCleanup(t, tmpDir) + defer sm.Close() + sampled, err := sm.NewReadWriter().IsTraceSampled(traceID) + assert.NoError(t, err) + assert.True(t, sampled) +} + +func TestStorageManager_DiskUsage(t *testing.T) { + stopping := make(chan struct{}) + defer close(stopping) + sm := newStorageManager(t) + go sm.Run(stopping, time.Second, 0) + old := sm.DiskUsage() + + err := sm.NewReadWriter().WriteTraceSampled("foo", true) + require.NoError(t, err) + + err = sm.Flush() + require.NoError(t, err) + + assert.Eventually(t, func() bool { + return sm.DiskUsage() > old + }, 10*time.Second, 100*time.Millisecond) + + old = sm.DiskUsage() + + err = sm.NewReadWriter().WriteTraceEvent("foo", "bar", makeTransaction("bar", "foo")) + require.NoError(t, err) + + err = sm.Flush() + require.NoError(t, err) + + assert.Eventually(t, func() bool { + return sm.DiskUsage() > old + }, 10*time.Second, 100*time.Millisecond) +} + +func TestStorageManager_Run(t *testing.T) { + done := make(chan struct{}) + stopping := make(chan struct{}) + sm := newStorageManager(t) + go func() { + assert.NoError(t, sm.Run(stopping, time.Second, 0)) + close(done) + }() + close(stopping) + <-done } diff --git a/x-pack/apm-server/sampling/eventstorage/storage_test.go b/x-pack/apm-server/sampling/eventstorage/storage_test.go deleted file mode 100644 index 3f35924965e..00000000000 --- a/x-pack/apm-server/sampling/eventstorage/storage_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License 2.0; -// you may not use this file except in compliance with the Elastic License 2.0. - -package eventstorage_test - -import ( - "testing" - "time" - - "github.com/dgraph-io/badger/v2" - "github.com/gofrs/uuid/v5" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/testing/protocmp" - - "github.com/elastic/apm-data/model/modelpb" - "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" -) - -func TestWriteEvents(t *testing.T) { - // Run two tests: - // - 1 transaction and 1 span - // - 1 transaction and 100 spans - // - // The latter test will cause ReadTraceEvents to implicitly call flush. - t.Run("no_flush", func(t *testing.T) { - testWriteEvents(t, 1) - }) - t.Run("implicit_flush", func(t *testing.T) { - testWriteEvents(t, 100) - }) -} - -func testWriteEvents(t *testing.T, numSpans int) { - db := newBadgerDB(t, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - readWriter := store.NewShardedReadWriter() - defer readWriter.Close() - - beforeWrite := time.Now() - traceID := uuid.Must(uuid.NewV4()).String() - transactionID := uuid.Must(uuid.NewV4()).String() - transaction := modelpb.APMEvent{ - Transaction: &modelpb.Transaction{Id: transactionID}, - } - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } - assert.NoError(t, readWriter.WriteTraceEvent(traceID, transactionID, &transaction, wOpts)) - - var spanEvents []*modelpb.APMEvent - for i := 0; i < numSpans; i++ { - spanID := uuid.Must(uuid.NewV4()).String() - span := modelpb.APMEvent{ - Span: &modelpb.Span{Id: spanID}, - } - assert.NoError(t, readWriter.WriteTraceEvent(traceID, spanID, &span, wOpts)) - spanEvents = append(spanEvents, &span) - } - afterWrite := time.Now() - - // We can read our writes without flushing. - var batch modelpb.Batch - assert.NoError(t, readWriter.ReadTraceEvents(traceID, &batch)) - spanEvents = append(spanEvents, &transaction) - assert.Empty(t, cmp.Diff(modelpb.Batch(spanEvents), batch, - cmpopts.SortSlices(func(e1 *modelpb.APMEvent, e2 *modelpb.APMEvent) bool { - return e1.GetSpan().GetId() < e2.GetSpan().GetId() - }), - protocmp.Transform()), - ) - - // Flush in order for the writes to be visible to other readers. - assert.NoError(t, readWriter.Flush()) - - var recorded modelpb.Batch - assert.NoError(t, db.View(func(txn *badger.Txn) error { - iter := txn.NewIterator(badger.IteratorOptions{ - Prefix: []byte(traceID), - }) - defer iter.Close() - for iter.Rewind(); iter.Valid(); iter.Next() { - item := iter.Item() - expiresAt := item.ExpiresAt() - expiryTime := time.Unix(int64(expiresAt), 0) - - // The expiry time should be somewhere between when we - // started and finished writing + the TTL. The expiry time - // is recorded as seconds since the Unix epoch, hence the - // truncation. - lowerBound := beforeWrite.Add(wOpts.TTL).Truncate(time.Second) - upperBound := afterWrite.Add(wOpts.TTL).Truncate(time.Second) - assert.Condition(t, func() bool { - return !lowerBound.After(expiryTime) - }, "expiry time %s is before %s", expiryTime, lowerBound) - assert.Condition(t, func() bool { - return !expiryTime.After(upperBound) - }, "expiry time %s is after %s", expiryTime, upperBound) - - var event modelpb.APMEvent - require.Equal(t, "e", string(item.UserMeta())) - assert.NoError(t, item.Value(func(data []byte) error { - return proto.Unmarshal(data, &event) - })) - recorded = append(recorded, &event) - } - return nil - })) - assert.Empty(t, cmp.Diff(batch, recorded, protocmp.Transform())) -} - -func TestWriteTraceSampled(t *testing.T) { - db := newBadgerDB(t, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - readWriter := store.NewShardedReadWriter() - defer readWriter.Close() - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } - - before := time.Now() - assert.NoError(t, readWriter.WriteTraceSampled("sampled_trace_id", true, wOpts)) - assert.NoError(t, readWriter.WriteTraceSampled("unsampled_trace_id", false, wOpts)) - - // We can read our writes without flushing. - isSampled, err := readWriter.IsTraceSampled("sampled_trace_id") - assert.NoError(t, err) - assert.True(t, isSampled) - - // Flush in order for the writes to be visible to other readers. - assert.NoError(t, readWriter.Flush()) - - sampled := make(map[string]bool) - assert.NoError(t, db.View(func(txn *badger.Txn) error { - iter := txn.NewIterator(badger.IteratorOptions{}) - defer iter.Close() - for iter.Rewind(); iter.Valid(); iter.Next() { - item := iter.Item() - expiresAt := item.ExpiresAt() - expiryTime := time.Unix(int64(expiresAt), 0) - assert.Condition(t, func() bool { - return !before.After(expiryTime) && !expiryTime.After(before.Add(wOpts.TTL)) - }) - - key := string(item.Key()) - switch meta := item.UserMeta(); meta { - case 's': - sampled[key] = true - case 'u': - sampled[key] = false - default: - t.Fatalf("invalid meta %q", meta) - } - assert.Zero(t, item.ValueSize()) - } - return nil - })) - assert.Equal(t, map[string]bool{ - "sampled_trace_id": true, - "unsampled_trace_id": false, - }, sampled) -} - -func TestReadTraceEvents(t *testing.T) { - db := newBadgerDB(t, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - - traceID := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - require.NoError(t, db.Update(func(txn *badger.Txn) error { - key := append(traceID[:], ":12345678"...) - value, err := proto.Marshal(&modelpb.APMEvent{Transaction: &modelpb.Transaction{Name: "transaction"}}) - if err != nil { - return err - } - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { - return err - } - - key = append(traceID[:], ":87654321"...) - value, err = proto.Marshal(&modelpb.APMEvent{Span: &modelpb.Span{Name: "span"}}) - if err != nil { - return err - } - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { - return err - } - - // Write an entry with the trace ID as a prefix, but with no - // proceeding colon, causing it to be ignored. - key = append(traceID[:], "nocolon"...) - value = []byte(`not-protobuf`) - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { - return err - } - - // Write an entry with an unknown meta value. It will be ignored. - key = append(traceID[:], ":11111111"...) - value = []byte(`not-protobuf`) - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('?')); err != nil { - return err - } - return nil - })) - - reader := store.NewShardedReadWriter() - defer reader.Close() - - var events modelpb.Batch - assert.NoError(t, reader.ReadTraceEvents(string(traceID[:]), &events)) - assert.Empty(t, cmp.Diff(modelpb.Batch{ - {Transaction: &modelpb.Transaction{Name: "transaction"}}, - {Span: &modelpb.Span{Name: "span"}}, - }, events, protocmp.Transform())) -} - -func TestReadTraceEventsDecodeError(t *testing.T) { - db := newBadgerDB(t, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - - traceID := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - require.NoError(t, db.Update(func(txn *badger.Txn) error { - key := append(traceID[:], ":12345678"...) - value := []byte(`wat`) - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { - return err - } - return nil - })) - - reader := store.NewShardedReadWriter() - defer reader.Close() - - var events modelpb.Batch - err := reader.ReadTraceEvents(string(traceID[:]), &events) - assert.Error(t, err) -} - -func TestIsTraceSampled(t *testing.T) { - db := newBadgerDB(t, badgerOptions) - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - - require.NoError(t, db.Update(func(txn *badger.Txn) error { - if err := txn.SetEntry(badger.NewEntry([]byte("sampled_trace_id"), nil).WithMeta('s')); err != nil { - return err - } - if err := txn.SetEntry(badger.NewEntry([]byte("unsampled_trace_id"), nil).WithMeta('u')); err != nil { - return err - } - return nil - })) - - reader := store.NewShardedReadWriter() - defer reader.Close() - - sampled, err := reader.IsTraceSampled("sampled_trace_id") - assert.NoError(t, err) - assert.True(t, sampled) - - sampled, err = reader.IsTraceSampled("unsampled_trace_id") - assert.NoError(t, err) - assert.False(t, sampled) - - _, err = reader.IsTraceSampled("unknown_trace_id") - assert.Equal(t, err, eventstorage.ErrNotFound) -} - -func TestStorageLimit(t *testing.T) { - tempdir := t.TempDir() - opts := func() badger.Options { - opts := badgerOptions() - opts = opts.WithInMemory(false) - opts = opts.WithDir(tempdir).WithValueDir(tempdir) - return opts - } - - // Open and close the database to create a non-empty value log file, - // which will cause writes below to fail due to the storage limit being - // exceeded. We would otherwise have to rely on Badger's one minute - // timer to refresh the size. - db := newBadgerDB(t, opts) - db.Close() - db = newBadgerDB(t, opts) - - store := eventstorage.New(db, eventstorage.ProtobufCodec{}) - readWriter := store.NewReadWriter() - defer readWriter.Close() - - traceID := uuid.Must(uuid.NewV4()).String() - transactionID := uuid.Must(uuid.NewV4()).String() - transaction := modelpb.APMEvent{Transaction: &modelpb.Transaction{Id: transactionID}} - err := readWriter.WriteTraceEvent(traceID, transactionID, &transaction, eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 1, - }) - assert.ErrorIs(t, err, eventstorage.ErrLimitReached) - - // Assert the stored write has been discarded. - var batch modelpb.Batch - readWriter.ReadTraceEvents(traceID, &batch) - assert.Equal(t, 0, len(batch)) -} - -func badgerOptions() badger.Options { - return badger.DefaultOptions("").WithInMemory(true).WithLogger(nil) -} - -type badgerOptionsFunc func() badger.Options - -func newBadgerDB(tb testing.TB, badgerOptions badgerOptionsFunc) *badger.DB { - db, err := badger.Open(badgerOptions()) - if err != nil { - panic(err) - } - tb.Cleanup(func() { db.Close() }) - return db -} diff --git a/x-pack/apm-server/sampling/eventstorage/storage_whitebox_test.go b/x-pack/apm-server/sampling/eventstorage/storage_whitebox_test.go deleted file mode 100644 index 8e746aba823..00000000000 --- a/x-pack/apm-server/sampling/eventstorage/storage_whitebox_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License 2.0; -// you may not use this file except in compliance with the Elastic License 2.0. - -package eventstorage - -import ( - "testing" - "time" - - "github.com/dgraph-io/badger/v2" - "github.com/gofrs/uuid/v5" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-data/model/modelpb" -) - -func newReadWriter(tb testing.TB) *ReadWriter { - tempdir := tb.TempDir() - opts := badger.DefaultOptions("").WithLogger(nil) - opts = opts.WithInMemory(false) - opts = opts.WithDir(tempdir).WithValueDir(tempdir) - - db, err := badger.Open(opts) - if err != nil { - panic(err) - } - tb.Cleanup(func() { db.Close() }) - - store := New(db, ProtobufCodec{}) - readWriter := store.NewReadWriter() - tb.Cleanup(func() { readWriter.Close() }) - - readWriter.lazyInit() - return readWriter -} - -func TestDeleteTraceEvent_ErrTxnTooBig(t *testing.T) { - readWriter := newReadWriter(t) - - traceID, transactionID := writeEvent(t, readWriter) - assert.True(t, eventExists(t, readWriter, traceID, transactionID)) - - fillTxnUntilTxnTooBig(readWriter.txn) - - err := readWriter.DeleteTraceEvent(traceID, transactionID) - assert.NoError(t, err) - - assert.False(t, eventExists(t, readWriter, traceID, transactionID)) -} - -func TestWriteTraceEvent_ErrTxnTooBig(t *testing.T) { - readWriter := newReadWriter(t) - - fillTxnUntilTxnTooBig(readWriter.txn) - - traceID, transactionID := writeEvent(t, readWriter) - assert.True(t, eventExists(t, readWriter, traceID, transactionID)) -} - -func writeEvent(t *testing.T, readWriter *ReadWriter) (traceID, transactionID string) { - traceID = uuid.Must(uuid.NewV4()).String() - transactionID = uuid.Must(uuid.NewV4()).String() - transaction := modelpb.APMEvent{Transaction: &modelpb.Transaction{Id: transactionID}} - err := readWriter.WriteTraceEvent(traceID, transactionID, &transaction, WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - }) - assert.NoError(t, err) - return -} - -func eventExists(t *testing.T, readWriter *ReadWriter, traceID, transactionID string) (ok bool) { - var batch modelpb.Batch - err := readWriter.ReadTraceEvents(traceID, &batch) - require.NoError(t, err) - for _, e := range batch { - if e.GetTransaction().GetId() == transactionID { - ok = true - } - } - return -} - -func fillTxnUntilTxnTooBig(txn *badger.Txn) { - var err error - for { - if err == badger.ErrTxnTooBig { - break - } - entry := badger.NewEntry([]byte{0}, []byte{}) - err = txn.SetEntry(entry) - } -} diff --git a/x-pack/apm-server/sampling/processor.go b/x-pack/apm-server/sampling/processor.go index 4289a991597..a565851a856 100644 --- a/x-pack/apm-server/sampling/processor.go +++ b/x-pack/apm-server/sampling/processor.go @@ -41,7 +41,7 @@ type Processor struct { rateLimitedLogger *logp.Logger groups *traceGroups - eventStore *wrappedRW + eventStore eventstorage.RW eventMetrics *eventMetrics // heap-allocated for 64-bit alignment stopMu sync.Mutex @@ -64,13 +64,18 @@ func NewProcessor(config Config) (*Processor, error) { return nil, errors.Wrap(err, "invalid tail-sampling config") } + rw := config.Storage + if rw == nil { + rw = config.DB.NewReadWriter() + } + logger := logp.NewLogger(logs.Sampling) p := &Processor{ config: config, logger: logger, rateLimitedLogger: logger.WithOptions(logs.WithRateLimit(loggerRateLimit)), groups: newTraceGroups(config.Policies, config.MaxDynamicServices, config.IngestRateDecayFactor), - eventStore: newWrappedRW(config.Storage, config.TTL, int64(config.StorageLimit)), + eventStore: rw, eventMetrics: &eventMetrics{}, stopping: make(chan struct{}), stopped: make(chan struct{}), @@ -273,8 +278,9 @@ func (p *Processor) processSpan(event *modelpb.APMEvent) (report, stored bool, _ return traceSampled, false, nil } -// Stop stops the processor, flushing event storage. Note that the underlying -// badger.DB must be closed independently to ensure writes are synced to disk. +// Stop stops the processor. +// Note that the underlying StorageManager must be closed independently +// to ensure writes are synced to disk. func (p *Processor) Stop(ctx context.Context) error { p.stopMu.Lock() select { @@ -293,8 +299,7 @@ func (p *Processor) Stop(ctx context.Context) error { case <-p.stopped: } - // Flush event store and the underlying read writers - return p.eventStore.Flush() + return nil } // Run runs the tail-sampling processor. This method is responsible for: @@ -373,7 +378,7 @@ func (p *Processor) Run() error { } }) g.Go(func() error { - return p.config.DB.Run(p.stopping, p.config.StorageGCInterval, p.config.TTL, p.config.StorageLimit, storageLimitThreshold) + return p.config.DB.Run(p.stopping, p.config.TTL, p.config.StorageLimit) }) g.Go(func() error { // Subscribe to remotely sampled trace IDs. This is cancelled immediately when @@ -496,6 +501,10 @@ func (p *Processor) Run() error { // deleted. We delete events from local storage so // we don't publish duplicates; delivery is therefore // at-most-once, not guaranteed. + // + // TODO(carsonip): pebble supports range deletes and may be better than + // deleting events separately, but as we do not use transactions, it is + // possible to race and delete something that is not read. for _, event := range events { switch event.Type() { case modelpb.TransactionEventType: @@ -561,70 +570,3 @@ func sendTraceIDs(ctx context.Context, out chan<- string, traceIDs []string) err } return nil } - -const ( - storageLimitThreshold = 0.90 // Allow 90% of the quota to be used. -) - -type rw interface { - ReadTraceEvents(traceID string, out *modelpb.Batch) error - WriteTraceEvent(traceID, id string, event *modelpb.APMEvent, opts eventstorage.WriterOpts) error - WriteTraceSampled(traceID string, sampled bool, opts eventstorage.WriterOpts) error - IsTraceSampled(traceID string) (bool, error) - DeleteTraceEvent(traceID, id string) error - Flush() error -} - -// wrappedRW wraps configurable write options for global rw -type wrappedRW struct { - rw rw - writerOpts eventstorage.WriterOpts -} - -// Stored entries expire after ttl. -// The amount of storage that can be consumed can be limited by passing in a -// limit value greater than zero. The hard limit on storage is set to 90% of -// the limit to account for delay in the size reporting by badger. -// https://github.com/dgraph-io/badger/blob/82b00f27e3827022082225221ae05c03f0d37620/db.go#L1302-L1319. -func newWrappedRW(rw rw, ttl time.Duration, limit int64) *wrappedRW { - if limit > 1 { - limit = int64(float64(limit) * storageLimitThreshold) - } - return &wrappedRW{ - rw: rw, - writerOpts: eventstorage.WriterOpts{ - TTL: ttl, - StorageLimitInBytes: limit, - }, - } -} - -// ReadTraceEvents calls rw.ReadTraceEvents -func (s *wrappedRW) ReadTraceEvents(traceID string, out *modelpb.Batch) error { - return s.rw.ReadTraceEvents(traceID, out) -} - -// WriteTraceEvent calls rw.WriteTraceEvent using the configured WriterOpts -func (s *wrappedRW) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error { - return s.rw.WriteTraceEvent(traceID, id, event, s.writerOpts) -} - -// WriteTraceSampled calls rw.WriteTraceSampled using the configured WriterOpts -func (s *wrappedRW) WriteTraceSampled(traceID string, sampled bool) error { - return s.rw.WriteTraceSampled(traceID, sampled, s.writerOpts) -} - -// IsTraceSampled calls rw.IsTraceSampled -func (s *wrappedRW) IsTraceSampled(traceID string) (bool, error) { - return s.rw.IsTraceSampled(traceID) -} - -// DeleteTraceEvent calls rw.DeleteTraceEvent -func (s *wrappedRW) DeleteTraceEvent(traceID, id string) error { - return s.rw.DeleteTraceEvent(traceID, id) -} - -// Flush calls rw.Flush -func (s *wrappedRW) Flush() error { - return s.rw.Flush() -} diff --git a/x-pack/apm-server/sampling/processor_bench_test.go b/x-pack/apm-server/sampling/processor_bench_test.go index 196a62f0cd4..4ae725bcd2a 100644 --- a/x-pack/apm-server/sampling/processor_bench_test.go +++ b/x-pack/apm-server/sampling/processor_bench_test.go @@ -20,7 +20,7 @@ import ( ) func BenchmarkProcess(b *testing.B) { - processor, err := sampling.NewProcessor(newTempdirConfig(b)) + processor, err := sampling.NewProcessor(newTempdirConfig(b).Config) require.NoError(b, err) go processor.Run() b.Cleanup(func() { processor.Stop(context.Background()) }) diff --git a/x-pack/apm-server/sampling/processor_test.go b/x-pack/apm-server/sampling/processor_test.go index 5a301e208cf..942a55fdd92 100644 --- a/x-pack/apm-server/sampling/processor_test.go +++ b/x-pack/apm-server/sampling/processor_test.go @@ -11,9 +11,6 @@ import ( "os" "path" "path/filepath" - "runtime" - "sort" - "strings" "testing" "time" @@ -22,7 +19,6 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" "google.golang.org/protobuf/testing/protocmp" "github.com/elastic/apm-data/model/modelpb" @@ -33,7 +29,7 @@ import ( ) func TestProcessUnsampled(t *testing.T) { - processor, err := sampling.NewProcessor(newTempdirConfig(t)) + processor, err := sampling.NewProcessor(newTempdirConfig(t).Config) require.NoError(t, err) go processor.Run() defer processor.Stop(context.Background()) @@ -57,32 +53,23 @@ func TestProcessUnsampled(t *testing.T) { } func TestProcessAlreadyTailSampled(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config // Seed event storage with a tail-sampling decisions, to show that // subsequent events in the trace will be reported immediately. trace1 := modelpb.Trace{Id: "0102030405060708090a0b0c0d0e0f10"} trace2 := modelpb.Trace{Id: "0102030405060708090a0b0c0d0e0f11"} - writer := config.DB.NewBypassReadWriter() - wOpts := eventstorage.WriterOpts{ - TTL: time.Minute, - StorageLimitInBytes: 0, - } - assert.NoError(t, writer.WriteTraceSampled(trace1.Id, true, wOpts)) - assert.NoError(t, writer.Flush()) - writer.Close() - - wOpts.TTL = -1 // expire immediately - writer = config.DB.NewBypassReadWriter() - assert.NoError(t, writer.WriteTraceSampled(trace2.Id, true, wOpts)) - assert.NoError(t, writer.Flush()) - writer.Close() - - // Badger transactions created globally before committing the above writes - // will not see them due to SSI (Serializable Snapshot Isolation). Flush - // the storage so that new transactions are created for the underlying - // writer shards that can list all the events committed so far. - require.NoError(t, config.Storage.Flush()) + writer := config.DB.NewReadWriter() + assert.NoError(t, writer.WriteTraceSampled(trace2.Id, true)) + + // simulate 2 TTL + assert.NoError(t, config.DB.RotatePartitions()) + assert.NoError(t, config.DB.RotatePartitions()) + + writer = config.DB.NewReadWriter() + assert.NoError(t, writer.WriteTraceSampled(trace1.Id, true)) + + require.NoError(t, config.DB.Flush()) processor, err := sampling.NewProcessor(config) require.NoError(t, err) @@ -140,9 +127,8 @@ func TestProcessAlreadyTailSampled(t *testing.T) { // Stop the processor and flush global storage so we can access the database. assert.NoError(t, processor.Stop(context.Background())) - assert.NoError(t, config.Storage.Flush()) - reader := config.DB.NewBypassReadWriter() - defer reader.Close() + assert.NoError(t, config.DB.Flush()) + reader := config.DB.NewReadWriter() batch = nil err = reader.ReadTraceEvents(trace1.Id, &batch) @@ -167,7 +153,7 @@ func TestProcessLocalTailSampling(t *testing.T) { }, } { t.Run(fmt.Sprintf("%f", tc.sampleRate), func(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config config.Policies = []sampling.Policy{{SampleRate: tc.sampleRate}} config.FlushInterval = 10 * time.Millisecond published := make(chan string) @@ -259,9 +245,8 @@ func TestProcessLocalTailSampling(t *testing.T) { // Stop the processor and flush global storage so we can access the database. assert.NoError(t, processor.Stop(context.Background())) - assert.NoError(t, config.Storage.Flush()) - reader := config.DB.NewBypassReadWriter() - defer reader.Close() + assert.NoError(t, config.DB.Flush()) + reader := config.DB.NewReadWriter() sampled, err := reader.IsTraceSampled(sampledTraceID) assert.NoError(t, err) @@ -289,7 +274,7 @@ func TestProcessLocalTailSampling(t *testing.T) { } func TestProcessLocalTailSamplingUnsampled(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config config.FlushInterval = time.Minute processor, err := sampling.NewProcessor(config) require.NoError(t, err) @@ -323,9 +308,8 @@ func TestProcessLocalTailSamplingUnsampled(t *testing.T) { // Stop the processor so we can access the database. assert.NoError(t, processor.Stop(context.Background())) - assert.NoError(t, config.Storage.Flush()) - reader := config.DB.NewBypassReadWriter() - defer reader.Close() + assert.NoError(t, config.DB.Flush()) + reader := config.DB.NewReadWriter() var anyUnsampled bool for _, traceID := range traceIDs { @@ -344,7 +328,7 @@ func TestProcessLocalTailSamplingUnsampled(t *testing.T) { } func TestProcessLocalTailSamplingPolicyOrder(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config config.Policies = []sampling.Policy{{ PolicyCriteria: sampling.PolicyCriteria{TraceName: "trace_name"}, SampleRate: 0.5, @@ -411,7 +395,7 @@ func TestProcessLocalTailSamplingPolicyOrder(t *testing.T) { } func TestProcessRemoteTailSampling(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config config.Policies = []sampling.Policy{{SampleRate: 0.5}} config.FlushInterval = 10 * time.Millisecond @@ -476,7 +460,7 @@ func TestProcessRemoteTailSampling(t *testing.T) { // Stop the processor and flush global storage so we can access the database. assert.NoError(t, processor.Stop(context.Background())) - assert.NoError(t, config.Storage.Flush()) + assert.NoError(t, config.DB.Flush()) assert.Empty(t, published) // remote decisions don't get republished expectedMonitoring := monitoring.MakeFlatSnapshot() @@ -490,8 +474,7 @@ func TestProcessRemoteTailSampling(t *testing.T) { assert.Empty(t, cmp.Diff(trace1Events, events, protocmp.Transform())) - reader := config.DB.NewBypassReadWriter() - defer reader.Close() + reader := config.DB.NewReadWriter() sampled, err := reader.IsTraceSampled(traceID1) assert.NoError(t, err) @@ -520,11 +503,11 @@ func (m errorRW) ReadTraceEvents(traceID string, out *modelpb.Batch) error { return m.err } -func (m errorRW) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent, opts eventstorage.WriterOpts) error { +func (m errorRW) WriteTraceEvent(traceID, id string, event *modelpb.APMEvent) error { return m.err } -func (m errorRW) WriteTraceSampled(traceID string, sampled bool, opts eventstorage.WriterOpts) error { +func (m errorRW) WriteTraceSampled(traceID string, sampled bool) error { return m.err } @@ -543,7 +526,7 @@ func (m errorRW) Flush() error { func TestProcessDiscardOnWriteFailure(t *testing.T) { for _, discard := range []bool{true, false} { t.Run(fmt.Sprintf("discard=%v", discard), func(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config config.DiscardOnWriteFailure = discard config.Storage = errorRW{err: errors.New("boom")} processor, err := sampling.NewProcessor(config) @@ -576,7 +559,7 @@ func TestProcessDiscardOnWriteFailure(t *testing.T) { } func TestGroupsMonitoring(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config config.MaxDynamicServices = 5 config.FlushInterval = time.Minute config.Policies[0].SampleRate = 0.99 @@ -612,7 +595,7 @@ func TestGroupsMonitoring(t *testing.T) { } func TestStorageMonitoring(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config processor, err := sampling.NewProcessor(config) require.NoError(t, err) @@ -634,107 +617,19 @@ func TestStorageMonitoring(t *testing.T) { assert.Empty(t, batch) } - // Stop the processor and create a new one, which will reopen storage - // and calculate the storage size. Otherwise we must wait for a minute - // (hard-coded in badger) for storage metrics to be updated. processor.Stop(context.Background()) - processor, err = sampling.NewProcessor(config) - require.NoError(t, err) metrics := collectProcessorMetrics(processor) assert.NotZero(t, metrics.Ints, "sampling.storage.lsm_size") assert.NotZero(t, metrics.Ints, "sampling.storage.value_log_size") } -func TestStorageGC(t *testing.T) { - if testing.Short() { - t.Skip("skipping slow test") - } - - config := newTempdirConfig(t) - config.TTL = 10 * time.Millisecond - config.FlushInterval = 10 * time.Millisecond - - writeBatch := func(n int) { - config.StorageGCInterval = time.Hour // effectively disable - processor, err := sampling.NewProcessor(config) - require.NoError(t, err) - go processor.Run() - defer processor.Stop(context.Background()) - for i := 0; i < n; i++ { - traceID := uuid.Must(uuid.NewV4()).String() - // Create a larger event to fill up the vlog faster, especially when it is above ValueThreshold - batch := modelpb.Batch{{ - Trace: &modelpb.Trace{Id: traceID}, - Event: &modelpb.Event{Duration: uint64(123 * time.Millisecond)}, - Span: &modelpb.Span{ - Type: strings.Repeat("a", 1000), - Subtype: strings.Repeat("b", 1000), - Id: traceID, - Name: strings.Repeat("c", 1000), - }, - }} - err := processor.ProcessBatch(context.Background(), &batch) - require.NoError(t, err) - assert.Empty(t, batch) - } - } - - // Process spans until value log files have been created. - // Garbage collection is disabled at this time. - for len(vlogFilenames(config.StorageDir)) < 3 { - writeBatch(2000) - } - - config.StorageGCInterval = 10 * time.Millisecond - processor, err := sampling.NewProcessor(config) - require.NoError(t, err) - go processor.Run() - defer processor.Stop(context.Background()) - - // Wait for the first value log file to be garbage collected. - var vlogs []string - assert.Eventually(t, func() bool { - vlogs = vlogFilenames(config.StorageDir) - return len(vlogs) == 0 || vlogs[0] != "000000.vlog" - }, 10*time.Second, 100*time.Millisecond, vlogs) -} - -func TestStorageGCConcurrency(t *testing.T) { - // This test ensures that TBS processor does not return an error - // even when run concurrently e.g. in hot reload - if testing.Short() { - t.Skip("skipping slow test") - } - - config := newTempdirConfig(t) - config.TTL = 10 * time.Millisecond - config.FlushInterval = 10 * time.Millisecond - config.StorageGCInterval = 10 * time.Millisecond - - g := errgroup.Group{} - for i := 0; i < 2; i++ { - processor, err := sampling.NewProcessor(config) - require.NoError(t, err) - g.Go(processor.Run) - go func() { - time.Sleep(time.Second) - assert.NoError(t, processor.Stop(context.Background())) - }() - } - assert.NoError(t, g.Wait()) -} - func TestStorageLimit(t *testing.T) { // This test ensures that when tail sampling is configured with a hard // storage limit, the limit is respected once the size is available. // To update the database size during our test without waiting a full // minute, we store some span events, close and re-open the database, so // the size is updated. - if testing.Short() { - t.Skip("skipping slow test") - } - writeBatch := func(n int, c sampling.Config, assertBatch func(b modelpb.Batch)) *sampling.Processor { processor, err := sampling.NewProcessor(c) require.NoError(t, err) @@ -758,52 +653,39 @@ func TestStorageLimit(t *testing.T) { return processor } - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config + config.TTL = time.Hour // Write 5K span events and close the DB to persist to disk the storage // size and assert that none are reported immediately. writeBatch(5000, config, func(b modelpb.Batch) { assert.Empty(t, b, fmt.Sprintf("expected empty but size is %d", len(b))) }) - assert.NoError(t, config.Storage.Flush()) - assert.NoError(t, config.DB.Close()) - // Open a new instance of the badgerDB and check the size. - var err error - config.DB, err = eventstorage.NewStorageManager(config.StorageDir) - require.NoError(t, err) - t.Cleanup(func() { config.DB.Close() }) - config.Storage = config.DB.NewReadWriter() + err := config.DB.Reload() + assert.NoError(t, err) lsm, vlog := config.DB.Size() - assert.GreaterOrEqual(t, lsm+vlog, int64(1024)) - - config.StorageLimit = 1024 // Set the storage limit to 1024 bytes. - // Create a massive 150K span batch (per CPU) to trigger the badger error - // Transaction too big, causing the ProcessBatch to report the some traces - // immediately. - // Rather than setting a static threshold, use the runtime.NumCPU as a - // multiplier since the sharded writers use that variable and the more CPUs - // we have, the more sharded writes we'll have, resulting in a greater buffer. - // To avoid huge test time on large systems do this incrementally - for i := 1; i < runtime.NumCPU(); i++ { - processor := writeBatch(150_000*i, config, func(b modelpb.Batch) { - assert.NotEmpty(t, b) - }) + assert.Greater(t, lsm+vlog, int64(10<<10)) - failedWrites := collectProcessorMetrics(processor).Ints["sampling.events.failed_writes"] - t.Log(failedWrites) - // Ensure that there are some failed writes. + config.StorageLimit = 10 << 10 // Set the storage limit to smaller than existing storage - if failedWrites >= 1 { - return - } + processor := writeBatch(1000, config, func(b modelpb.Batch) { + assert.Len(t, b, 1000) + }) + + failedWrites := collectProcessorMetrics(processor).Ints["sampling.events.failed_writes"] + t.Log(failedWrites) + // Ensure that there are some failed writes. + if failedWrites >= 1 { + return } - t.Fatal("badger error never thrown") + t.Fatal("storage limit error never thrown") } func TestProcessRemoteTailSamplingPersistence(t *testing.T) { - config := newTempdirConfig(t) + tempdirConfig := newTempdirConfig(t) + config := tempdirConfig.Config config.Policies = []sampling.Policy{{SampleRate: 0.5}} config.FlushInterval = 10 * time.Millisecond @@ -817,7 +699,7 @@ func TestProcessRemoteTailSamplingPersistence(t *testing.T) { defer processor.Stop(context.Background()) // Wait for subscriber_position.json to be written to the storage directory. - subscriberPositionFile := filepath.Join(config.StorageDir, "subscriber_position.json") + subscriberPositionFile := filepath.Join(tempdirConfig.tempDir, "subscriber_position.json") data, info := waitFileModified(t, subscriberPositionFile, time.Time{}) assert.Equal(t, "{}", string(data)) @@ -826,133 +708,8 @@ func TestProcessRemoteTailSamplingPersistence(t *testing.T) { assert.Equal(t, `{"index_name":1}`, string(data)) } -func TestDropLoop(t *testing.T) { - // This test ensures that if badger is stuck at storage limit for TTL, - // DB is dropped and recreated. - if testing.Short() { - t.Skip("skipping slow test") - } - - makeBatch := func(n int) modelpb.Batch { - batch := make(modelpb.Batch, 0, n) - for i := 0; i < n; i++ { - traceID := uuid.Must(uuid.NewV4()).String() - batch = append(batch, &modelpb.APMEvent{ - Trace: &modelpb.Trace{Id: traceID}, - Event: &modelpb.Event{Duration: uint64(123 * time.Millisecond)}, - Span: &modelpb.Span{ - Type: "type", - Id: traceID, - }, - }) - } - return batch - } - - writeBatch := func(t *testing.T, n int, c sampling.Config, assertBatch func(b modelpb.Batch)) *sampling.Processor { - processor, err := sampling.NewProcessor(c) - require.NoError(t, err) - go processor.Run() - defer processor.Stop(context.Background()) - batch := makeBatch(n) - err = processor.ProcessBatch(context.Background(), &batch) - require.NoError(t, err) - assertBatch(batch) - return processor - } - - for _, tc := range []struct { - name string - subscriberPosExists bool - }{ - { - name: "subscriber_position_not_exist", - subscriberPosExists: false, - }, - { - name: "subscriber_position_exists", - subscriberPosExists: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - config := newTempdirConfig(t) - config.StorageGCInterval = time.Hour // effectively disable GC - - config.FlushInterval = 10 * time.Millisecond - subscriberChan := make(chan string) - subscriber := pubsubtest.SubscriberChan(subscriberChan) - config.Elasticsearch = pubsubtest.Client(nil, subscriber) - subscriberPositionFile := filepath.Join(config.StorageDir, "subscriber_position.json") - - // Write 5K span events and close the DB to persist to disk the storage - // size and assert that none are reported immediately. - writeBatch(t, 5000, config, func(b modelpb.Batch) { - assert.Empty(t, b, fmt.Sprintf("expected empty but size is %d", len(b))) - - subscriberChan <- "0102030405060708090a0b0c0d0e0f10" - assert.Eventually(t, func() bool { - data, err := config.DB.ReadSubscriberPosition() - return err == nil && string(data) == `{"index_name":1}` - }, time.Second, 100*time.Millisecond) - }) - assert.NoError(t, config.Storage.Flush()) - assert.NoError(t, config.DB.Close()) - - if !tc.subscriberPosExists { - err := os.Remove(subscriberPositionFile) - assert.NoError(t, err) - } - - func() { - // Open a new instance of the badgerDB and check the size. - var err error - config.DB, err = eventstorage.NewStorageManager(config.StorageDir) - require.NoError(t, err) - t.Cleanup(func() { config.DB.Close() }) - config.Storage = config.DB.NewReadWriter() - - lsm, vlog := config.DB.Size() - assert.Greater(t, lsm+vlog, int64(1024*1024)) - - config.Elasticsearch = pubsubtest.Client(nil, nil) // disable pubsub - - config.StorageLimit = 100 * 1024 // lower limit to trigger storage limit error - config.TTL = time.Second - processor, err := sampling.NewProcessor(config) - require.NoError(t, err) - go processor.Run() - defer processor.Stop(context.Background()) - - // wait for up to 1 minute for dropAndRecreate to kick in - // no SST files after dropping DB and before first write - var filenames []string - assert.Eventually(t, func() bool { - filenames = sstFilenames(config.StorageDir) - return len(filenames) == 0 - }, 90*time.Second, 200*time.Millisecond, filenames) - - data, err := config.DB.ReadSubscriberPosition() - assert.NoError(t, err) - if tc.subscriberPosExists { - assert.Equal(t, `{"index_name":1}`, string(data)) - } else { - assert.Equal(t, "{}", string(data)) - } - - // try to write to new DB - batch := makeBatch(10) - err = processor.ProcessBatch(context.Background(), &batch) - require.NoError(t, err) - }() - assert.NoError(t, config.DB.Close()) - assert.Greater(t, len(sstFilenames(config.StorageDir)), 0) - }) - } -} - func TestGracefulShutdown(t *testing.T) { - config := newTempdirConfig(t) + config := newTempdirConfig(t).Config sampleRate := 0.5 config.Policies = []sampling.Policy{{SampleRate: sampleRate}} config.FlushInterval = time.Minute // disable finalize @@ -979,10 +736,8 @@ func TestGracefulShutdown(t *testing.T) { assert.NoError(t, processor.ProcessBatch(context.Background(), &batch)) assert.Empty(t, batch) assert.NoError(t, processor.Stop(context.Background())) - assert.NoError(t, config.Storage.Flush()) - reader := config.DB.NewBypassReadWriter() - defer reader.Close() + reader := config.DB.NewReadWriter() var count int for i := 0; i < totalTraces; i++ { @@ -993,43 +748,46 @@ func TestGracefulShutdown(t *testing.T) { assert.Equal(t, int(sampleRate*float64(totalTraces)), count) } -func newTempdirConfig(tb testing.TB) sampling.Config { +type testConfig struct { + sampling.Config + tempDir string +} + +func newTempdirConfig(tb testing.TB) testConfig { tempdir, err := os.MkdirTemp("", "samplingtest") require.NoError(tb, err) tb.Cleanup(func() { os.RemoveAll(tempdir) }) - badgerDB, err := eventstorage.NewStorageManager(tempdir) + db, err := eventstorage.NewStorageManager(tempdir) require.NoError(tb, err) - tb.Cleanup(func() { badgerDB.Close() }) - - storage := badgerDB.NewReadWriter() - - return sampling.Config{ - BatchProcessor: modelpb.ProcessBatchFunc(func(context.Context, *modelpb.Batch) error { return nil }), - LocalSamplingConfig: sampling.LocalSamplingConfig{ - FlushInterval: time.Second, - MaxDynamicServices: 1000, - IngestRateDecayFactor: 0.9, - Policies: []sampling.Policy{ - {SampleRate: 0.1}, + tb.Cleanup(func() { db.Close() }) + + return testConfig{ + tempDir: tempdir, + Config: sampling.Config{ + BatchProcessor: modelpb.ProcessBatchFunc(func(context.Context, *modelpb.Batch) error { return nil }), + LocalSamplingConfig: sampling.LocalSamplingConfig{ + FlushInterval: time.Second, + MaxDynamicServices: 1000, + IngestRateDecayFactor: 0.9, + Policies: []sampling.Policy{ + {SampleRate: 0.1}, + }, }, - }, - RemoteSamplingConfig: sampling.RemoteSamplingConfig{ - Elasticsearch: pubsubtest.Client(nil, nil), - SampledTracesDataStream: sampling.DataStreamConfig{ - Type: "traces", - Dataset: "sampled", - Namespace: "testing", + RemoteSamplingConfig: sampling.RemoteSamplingConfig{ + Elasticsearch: pubsubtest.Client(nil, nil), + SampledTracesDataStream: sampling.DataStreamConfig{ + Type: "traces", + Dataset: "sampled", + Namespace: "testing", + }, + UUID: "local-apm-server", + }, + StorageConfig: sampling.StorageConfig{ + DB: db, + TTL: 30 * time.Minute, + StorageLimit: 0, // No storage limit. }, - UUID: "local-apm-server", - }, - StorageConfig: sampling.StorageConfig{ - DB: badgerDB, - Storage: storage, - StorageDir: tempdir, - StorageGCInterval: time.Second, - TTL: 30 * time.Minute, - StorageLimit: 0, // No storage limit. }, } } @@ -1121,31 +879,3 @@ func waitFileModified(tb testing.TB, filename string, after time.Time) ([]byte, } } } - -func vlogFilenames(storageDir string) []string { - entries, _ := os.ReadDir(storageDir) - - var vlogs []string - for _, entry := range entries { - name := entry.Name() - if strings.HasSuffix(name, ".vlog") { - vlogs = append(vlogs, name) - } - } - sort.Strings(vlogs) - return vlogs -} - -func sstFilenames(storageDir string) []string { - entries, _ := os.ReadDir(storageDir) - - var ssts []string - for _, entry := range entries { - name := entry.Name() - if strings.HasSuffix(name, ".sst") { - ssts = append(ssts, name) - } - } - sort.Strings(ssts) - return ssts -}