diff --git a/.github/workflows/build.yml b/.github/workflows/build.yaml
similarity index 88%
rename from .github/workflows/build.yml
rename to .github/workflows/build.yaml
index 12f5395..0bba6f0 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yaml
@@ -1,4 +1,4 @@
-name: Build
+name: compile-and-test
on:
pull_request:
@@ -13,7 +13,7 @@ jobs:
uses: actions/setup-java@v4
with:
distribution: 'temurin'
- java-version: 17
+ java-version: 21
- name: Setup Gradle
uses: gradle/actions/setup-gradle@v4
- name: Build with Gradle
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
new file mode 100644
index 0000000..5ad6f80
--- /dev/null
+++ b/.github/workflows/release.yaml
@@ -0,0 +1,59 @@
+name: publish
+
+on:
+ push:
+ tags: ['v*']
+ release:
+ types: [published]
+
+jobs:
+ build-artifact:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Tag Version Name Extraction
+ id: version
+ shell: bash
+ run: |
+ if [[ -n "${{ github.event.release.tag_name }}" ]]; then
+ # Relase Github
+ TAG_NAME="${{ github.event.release.tag_name }}"
+ echo "📋 Triggered by GitHub release: $TAG_NAME"
+ else
+ # Just a new tag
+ TAG_NAME=${GITHUB_REF#refs/tags/}
+ echo "🏷️ Triggered by Git tag: $TAG_NAME"
+ fi
+
+ # Get rid of the 'v', e.g. v8.18.1.0 -> 8.18.1.0
+ VERSION=${TAG_NAME#v}
+ echo "version=$VERSION" >> $GITHUB_OUTPUT
+
+ - name: Setup Java
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: 21
+
+ - name: Setup Gradle
+ uses: gradle/actions/setup-gradle@v4
+
+ - name: Java Compilation
+ run: ./gradlew -Pplugin_version=${{ steps.version.outputs.version }} clean assemble --no-daemon
+
+ - name: Upload Plugin Artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: pathhierarchy-aggregation-${{ steps.version.outputs.version }}
+ path: build/distributions/*.zip
+
+ - name: Attach ZIP to GitHub Release
+ uses: softprops/action-gh-release@v2
+ if: github.event.release.tag_name != ''
+ with:
+ files: build/distributions/pathhierarchy-aggregation-${{ steps.version.outputs.version }}.zip
+ tag_name: ${{ github.event.release.tag_name }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/LICENSE b/LICENSE
index e92b4d2..be3f7b2 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,21 +1,661 @@
-The MIT License (MIT)
-
-Copyright (c) 2018
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
diff --git a/README.md b/README.md
index 0f2c7a4..8647e63 100644
--- a/README.md
+++ b/README.md
@@ -327,4 +327,4 @@ GET /calendar/_search?size=0
License
-------
-This software is under The MIT License (MIT).
+This software is under AGPL (GNU Affero General Public License)
diff --git a/build.gradle b/build.gradle
index 19a82d5..4e4bb13 100644
--- a/build.gradle
+++ b/build.gradle
@@ -1,6 +1,9 @@
+/*
+Gradle build file for development building, testing, and packaging.
+*/
+
buildscript {
repositories {
- mavenLocal()
mavenCentral()
}
@@ -9,6 +12,11 @@ buildscript {
}
}
+// the spotless plugin is dedicated to format the code
+plugins {
+ id("com.diffplug.spotless") version "7.0.4"
+}
+
repositories {
mavenLocal()
mavenCentral()
@@ -17,14 +25,11 @@ repositories {
group = 'org.elasticsearch.plugin'
version = "${plugin_version}"
-def versions = org.elasticsearch.gradle.VersionProperties.versions
-
apply plugin: 'java'
apply plugin: 'idea'
apply plugin: 'elasticsearch.esplugin'
apply plugin: 'elasticsearch.yaml-rest-test'
-
esplugin {
name 'pathhierarchy-aggregation'
description 'Return a path hierarchy aggregation'
@@ -33,17 +38,46 @@ esplugin {
noticeFile = rootProject.file('README.md')
}
-
+def versions = org.elasticsearch.gradle.VersionProperties.versions
dependencies {
implementation "org.elasticsearch:elasticsearch:${es_version}"
yamlRestTestImplementation "org.elasticsearch.test:framework:${es_version}"
+ yamlRestTestImplementation "org.elasticsearch.test:yaml-rest-runner:${es_version}"
yamlRestTestImplementation "org.apache.logging.log4j:log4j-core:${versions.log4j}"
}
-tasks.named("yamlRestTest").configure {
- systemProperty 'tests.security.manager', 'false'
+// add -Xlint:deprecation to check the Elasticsearch deprecation warning at compile-time
+allprojects {
+ gradle.projectsEvaluated {
+ tasks.withType(JavaCompile) {
+ options.compilerArgs << "-Xlint:deprecation"
+ options.compilerArgs << "-Xlint:unchecked"
+ }
+ }
+}
+
+// automatic formatting configuration (the same configuration as elasticsearch)
+spotless {
+ java {
+ importOrderFile('config/elastic.importorder') // import order file as exported from elastic
+ eclipse().configFile('config/formatterConfig.xml')
+ trimTrailingWhitespace()
+ target 'src/**/*.java'
+ }
}
-tasks.named("test").configure {
- systemProperty 'tests.security.manager', 'false'
+check.dependsOn spotlessCheck
+
+testClusters.configureEach {
+ // Make sure the ES distribution used for rest tests is the "complete" variant
+ testDistribution = 'DEFAULT'
+
+ // Disable security (avoids annoying warnings/failures with xpack features)
+ setting 'xpack.security.enabled', 'false'
+
+ // Logging levels for debugging: logs are located in build/testclusters/runTask-0/logs/
+ setting 'logger._root', 'DEBUG'
+ setting 'logger.org.elasticsearch.plugins', 'DEBUG'
+ setting 'logger.org.elasticsearch.cluster', 'DEBUG'
+ setting 'logger.org.elasticsearch.cluster.metadata', 'TRACE'
}
diff --git a/config/elastic.importorder b/config/elastic.importorder
new file mode 100644
index 0000000..78e051e
--- /dev/null
+++ b/config/elastic.importorder
@@ -0,0 +1,7 @@
+#Eclipse configuration for import order for Elasticsearch
+0=
+1=com
+2=org
+3=java
+4=javax
+5=\#
diff --git a/config/formatterConfig.xml b/config/formatterConfig.xml
new file mode 100644
index 0000000..6291531
--- /dev/null
+++ b/config/formatterConfig.xml
@@ -0,0 +1,390 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/gradle.properties b/gradle.properties
index ead8d50..3ed2ee9 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -1,2 +1,2 @@
-es_version = 7.17.28
-plugin_version = 7.17.28.0
+es_version = 8.19.4
+plugin_version = 8.19.4.0
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
index a4b76b9..1b33c55 100644
Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index df97d72..ca025c8 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -1,6 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.14-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
diff --git a/gradlew b/gradlew
index f5feea6..23d15a9 100755
--- a/gradlew
+++ b/gradlew
@@ -86,8 +86,7 @@ done
# shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
-APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
-' "$PWD" ) || exit
+APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD=maximum
@@ -115,7 +114,7 @@ case "$( uname )" in #(
NONSTOP* ) nonstop=true ;;
esac
-CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+CLASSPATH="\\\"\\\""
# Determine the Java command to use to start the JVM.
@@ -206,7 +205,7 @@ fi
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Collect all arguments for the java command:
-# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
+# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
@@ -214,7 +213,7 @@ DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
- org.gradle.wrapper.GradleWrapperMain \
+ -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \
"$@"
# Stop when "xargs" is not available.
diff --git a/gradlew.bat b/gradlew.bat
index 9b42019..5eed7ee 100644
--- a/gradlew.bat
+++ b/gradlew.bat
@@ -70,11 +70,11 @@ goto fail
:execute
@rem Setup the command line
-set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+set CLASSPATH=
@rem Execute Gradle
-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %*
:end
@rem End local scope for the variables with windows NT shell
diff --git a/src/main/java/org/opendatasoft/elasticsearch/plugin/PathHierarchyAggregation.java b/src/main/java/org/opendatasoft/elasticsearch/plugin/PathHierarchyAggregation.java
index 63861bf..8e2b6f5 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/plugin/PathHierarchyAggregation.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/plugin/PathHierarchyAggregation.java
@@ -14,20 +14,18 @@ public class PathHierarchyAggregation extends Plugin implements SearchPlugin {
public ArrayList getAggregations() {
ArrayList r = new ArrayList<>();
r.add(
- new AggregationSpec(
- PathHierarchyAggregationBuilder.NAME,
- PathHierarchyAggregationBuilder::new,
- PathHierarchyAggregationBuilder.PARSER)
- .addResultReader(InternalPathHierarchy::new)
- .setAggregatorRegistrar(PathHierarchyAggregationBuilder::registerAggregators)
+ new AggregationSpec(
+ PathHierarchyAggregationBuilder.NAME,
+ PathHierarchyAggregationBuilder::new,
+ PathHierarchyAggregationBuilder.PARSER
+ ).addResultReader(InternalPathHierarchy::new).setAggregatorRegistrar(PathHierarchyAggregationBuilder::registerAggregators)
);
r.add(
- new AggregationSpec(
- DateHierarchyAggregationBuilder.NAME,
- DateHierarchyAggregationBuilder::new,
- DateHierarchyAggregationBuilder.PARSER)
- .addResultReader(InternalDateHierarchy::new)
- .setAggregatorRegistrar(DateHierarchyAggregationBuilder::registerAggregators)
+ new AggregationSpec(
+ DateHierarchyAggregationBuilder.NAME,
+ DateHierarchyAggregationBuilder::new,
+ DateHierarchyAggregationBuilder.PARSER
+ ).addResultReader(InternalDateHierarchy::new).setAggregatorRegistrar(DateHierarchyAggregationBuilder::registerAggregators)
);
return r;
}
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationBuilder.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationBuilder.java
index 4642461..ded77c4 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationBuilder.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationBuilder.java
@@ -1,29 +1,30 @@
package org.opendatasoft.elasticsearch.search.aggregations.bucket;
-import org.elasticsearch.Version;
-import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.TransportVersion;
+import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.Rounding;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.time.DateFormatter;
-import org.elasticsearch.xcontent.ObjectParser;
-import org.elasticsearch.xcontent.XContentBuilder;
-import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.index.mapper.DateFieldMapper;
-import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalOrder;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
+import org.elasticsearch.xcontent.ObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.time.ZoneId;
@@ -36,15 +37,18 @@
import static java.util.Collections.unmodifiableMap;
-
/**
* The builder of the aggregatorFactory. Also implements the parsing of the request.
*/
public class DateHierarchyAggregationBuilder extends ValuesSourceAggregationBuilder {
+ @Override
+ public TransportVersion getMinimalSupportedVersion() {
+ return TransportVersions.V_8_0_0;
+ }
+
public static final String NAME = "date_hierarchy";
public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY =
- new ValuesSourceRegistry.RegistryKey<>(NAME, DateHierarchyAggregationSupplier.class);
-
+ new ValuesSourceRegistry.RegistryKey<>(NAME, DateHierarchyAggregationSupplier.class);
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
public static final ParseField ORDER_FIELD = new ParseField("order");
@@ -52,7 +56,6 @@ public class DateHierarchyAggregationBuilder extends ValuesSourceAggregationBuil
public static final ParseField SHARD_SIZE_FIELD = new ParseField("shard_size");
public static final ParseField MIN_DOC_COUNT_FIELD = new ParseField("min_doc_count");
-
public static final Map INTERVAL_CONFIG;
static {
Map dateFieldUnits = new LinkedHashMap<>();
@@ -60,7 +63,7 @@ public class DateHierarchyAggregationBuilder extends ValuesSourceAggregationBuil
dateFieldUnits.put("months", new IntervalConfig(Rounding.DateTimeUnit.MONTH_OF_YEAR, "MM"));
dateFieldUnits.put("days", new IntervalConfig(Rounding.DateTimeUnit.DAY_OF_MONTH, "dd"));
dateFieldUnits.put("hours", new IntervalConfig(Rounding.DateTimeUnit.HOUR_OF_DAY, "hh"));
- dateFieldUnits.put("minutes", new IntervalConfig(Rounding.DateTimeUnit.MINUTES_OF_HOUR, "mm"));
+ dateFieldUnits.put("minutes", new IntervalConfig(Rounding.DateTimeUnit.MINUTE_OF_HOUR, "mm"));
dateFieldUnits.put("seconds", new IntervalConfig(Rounding.DateTimeUnit.SECOND_OF_MINUTE, "ss"));
INTERVAL_CONFIG = unmodifiableMap(dateFieldUnits);
}
@@ -88,13 +91,19 @@ public PreparedRounding(RoundingInfo roundingInfo, Rounding.Prepared prepared) {
public List buildRoundings() {
List roundings = new ArrayList<>();
- ZoneId timeZone = timeZone() == null ? ZoneOffset.UTC: timeZone();
+ ZoneId timeZone = timeZone() == null ? ZoneOffset.UTC : timeZone();
long now = System.currentTimeMillis();
for (String interval : INTERVAL_CONFIG.keySet()) {
- RoundingInfo ri = new RoundingInfo(interval, createRounding(INTERVAL_CONFIG.get(interval).dateTimeUnit),
- new DocValueFormat.DateTime(DateFormatter.forPattern(INTERVAL_CONFIG.get(interval).format), timeZone,
- DateFieldMapper.Resolution.MILLISECONDS));
+ RoundingInfo ri = new RoundingInfo(
+ interval,
+ createRounding(INTERVAL_CONFIG.get(interval).dateTimeUnit),
+ new DocValueFormat.DateTime(
+ DateFormatter.forPattern(INTERVAL_CONFIG.get(interval).format),
+ timeZone,
+ DateFieldMapper.Resolution.MILLISECONDS
+ )
+ );
roundings.add(new PreparedRounding(ri, ri.rounding.prepareForUnknown()));
if (interval.equals(interval())) {
@@ -112,7 +121,7 @@ public static class RoundingInfo implements Writeable {
public RoundingInfo(String interval, Rounding rounding, DocValueFormat docValueFormat) {
this.interval = interval;
- this.rounding = rounding;
+ this.rounding = rounding;
this.format = docValueFormat;
}
@@ -130,13 +139,15 @@ public void writeTo(StreamOutput out) throws IOException {
}
}
- public static final DateHierarchyAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS = new
- DateHierarchyAggregator.BucketCountThresholds(10, -1);
- public static final ObjectParser PARSER =
- ObjectParser.fromBuilder(NAME, DateHierarchyAggregationBuilder::new);
+ public static final DateHierarchyAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS =
+ new DateHierarchyAggregator.BucketCountThresholds(10, -1);
+ public static final ObjectParser PARSER = ObjectParser.fromBuilder(
+ NAME,
+ DateHierarchyAggregationBuilder::new
+ );
static {
-
- ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, true);
+ // ES 8.x introduces field validation. Setting timezoneAware to false to avoid duplication of the timezone field
+ ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
PARSER.declareString(DateHierarchyAggregationBuilder::interval, INTERVAL_FIELD);
@@ -151,8 +162,7 @@ public void writeTo(StreamOutput out) throws IOException {
PARSER.declareInt(DateHierarchyAggregationBuilder::size, SIZE_FIELD);
PARSER.declareLong(DateHierarchyAggregationBuilder::minDocCount, MIN_DOC_COUNT_FIELD);
PARSER.declareInt(DateHierarchyAggregationBuilder::shardSize, SHARD_SIZE_FIELD);
- PARSER.declareObjectArray(DateHierarchyAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p),
- ORDER_FIELD);
+ PARSER.declareObjectArray(DateHierarchyAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p), ORDER_FIELD);
}
public static AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException {
@@ -168,15 +178,15 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
private String interval = "years";
private BucketOrder order = BucketOrder.compound(BucketOrder.count(false)); // automatically adds tie-breaker key asc order
private DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds = new DateHierarchyAggregator.BucketCountThresholds(
- DEFAULT_BUCKET_COUNT_THRESHOLDS);
-
+ DEFAULT_BUCKET_COUNT_THRESHOLDS
+ );
private DateHierarchyAggregationBuilder(String name) {
super(name);
}
@Override
- protected boolean serializeTargetValueType(Version version) {
+ protected boolean serializeTargetValueType(TransportVersion version) {
return true;
}
@@ -193,8 +203,7 @@ public DateHierarchyAggregationBuilder(StreamInput in) throws IOException {
timeZone = in.readOptionalZoneId();
}
- private DateHierarchyAggregationBuilder(DateHierarchyAggregationBuilder clone, Builder factoriesBuilder,
- Map metaData) {
+ private DateHierarchyAggregationBuilder(DateHierarchyAggregationBuilder clone, Builder factoriesBuilder, Map metaData) {
super(clone, factoriesBuilder, metaData);
order = clone.order;
minDocCount = clone.minDocCount;
@@ -275,7 +284,7 @@ private DateHierarchyAggregationBuilder order(BucketOrder order) {
if (order == null) {
throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
}
- if(order instanceof InternalOrder.CompoundOrder || InternalOrder.isKeyOrder(order)) {
+ if (order instanceof InternalOrder.CompoundOrder || InternalOrder.isKeyOrder(order)) {
this.order = order; // if order already contains a tie-breaker we are good to go
} else { // otherwise add a tie-breaker by using a compound order
this.order = BucketOrder.compound(order);
@@ -292,7 +301,6 @@ private DateHierarchyAggregationBuilder order(List orders) {
return this;
}
-
/**
* Sets the size - indicating how many term buckets should be returned
* (defaults to 10)
@@ -310,7 +318,8 @@ public DateHierarchyAggregationBuilder size(int size) {
public DateHierarchyAggregationBuilder minDocCount(long minDocCount) {
if (minDocCount < 0) {
throw new IllegalArgumentException(
- "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]");
+ "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"
+ );
}
this.minDocCount = minDocCount;
return this;
@@ -336,8 +345,7 @@ public BucketCardinality bucketCardinality() {
*/
public DateHierarchyAggregationBuilder shardSize(int shardSize) {
if (shardSize <= 0) {
- throw new IllegalArgumentException(
- "[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
+ throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
}
bucketCountThresholds.setShardSize(shardSize);
return this;
@@ -351,25 +359,27 @@ public int shardSize() {
}
@Override
- protected ValuesSourceAggregatorFactory innerBuild(AggregationContext context,
- ValuesSourceConfig config,
- AggregatorFactory parent,
- Builder subFactoriesBuilder) throws IOException {
-
+ protected ValuesSourceAggregatorFactory innerBuild(
+ AggregationContext context,
+ ValuesSourceConfig config,
+ AggregatorFactory parent,
+ Builder subFactoriesBuilder
+ ) throws IOException {
final List preparedRoundings = buildRoundings();
return new DateHierarchyAggregatorFactory(
- name,
- config,
- order,
- preparedRoundings,
- minDocCount,
- bucketCountThresholds,
- context,
- parent,
- subFactoriesBuilder,
- metadata);
+ name,
+ config,
+ order,
+ preparedRoundings,
+ minDocCount,
+ bucketCountThresholds,
+ context,
+ parent,
+ subFactoriesBuilder,
+ metadata
+ );
}
@Override
@@ -398,18 +408,14 @@ public boolean equals(Object obj) {
if (!super.equals(obj)) return false;
DateHierarchyAggregationBuilder other = (DateHierarchyAggregationBuilder) obj;
return Objects.equals(interval, other.interval)
- && Objects.equals(order, other.order)
- && Objects.equals(minDocCount, other.minDocCount)
- && Objects.equals(bucketCountThresholds, other.bucketCountThresholds)
- && Objects.equals(timeZone, other.timeZone);
+ && Objects.equals(order, other.order)
+ && Objects.equals(minDocCount, other.minDocCount)
+ && Objects.equals(bucketCountThresholds, other.bucketCountThresholds)
+ && Objects.equals(timeZone, other.timeZone);
}
@Override
public String getType() {
return NAME;
}
-
- @Override
- protected ValuesSourceRegistry.RegistryKey> getRegistryKey() { return REGISTRY_KEY; }
}
-
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationSupplier.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationSupplier.java
index fba09df..4bc4a2a 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationSupplier.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregationSupplier.java
@@ -13,15 +13,17 @@
@FunctionalInterface
public interface DateHierarchyAggregationSupplier {
- Aggregator build(String name,
- AggregatorFactories factories,
- BucketOrder order,
- List roundingsInfo,
- long minDocCount,
- DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
- ValuesSourceConfig valuesSourceConfig,
- SearchContext aggregationContext,
- Aggregator parent,
- CardinalityUpperBound cardinality,
- Map metadata) throws IOException;
+ Aggregator build(
+ String name,
+ AggregatorFactories factories,
+ BucketOrder order,
+ List roundingsInfo,
+ long minDocCount,
+ DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
+ ValuesSourceConfig valuesSourceConfig,
+ SearchContext aggregationContext,
+ Aggregator parent,
+ CardinalityUpperBound cardinality,
+ Map metadata
+ ) throws IOException;
}
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregator.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregator.java
index 0dc4db1..60e77d4 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregator.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregator.java
@@ -1,17 +1,16 @@
package org.opendatasoft.elasticsearch.search.aggregations.bucket;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.Rounding;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.core.Releasables;
import org.elasticsearch.common.util.BytesRefHash;
-import org.elasticsearch.xcontent.ToXContentFragment;
-import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.common.util.ObjectArray;
+import org.elasticsearch.core.Releasables;
+import org.elasticsearch.search.aggregations.AggregationExecutionContext;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketOrder;
@@ -20,41 +19,41 @@
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
-import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.xcontent.ToXContentFragment;
+import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Arrays;
-import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-
public class DateHierarchyAggregator extends BucketsAggregator {
- public DateHierarchyAggregator(String name,
- AggregatorFactories factories,
- AggregationContext context,
- ValuesSource.Numeric valuesSource,
- BucketOrder order,
- long minDocCount,
- BucketCountThresholds bucketCountThresholds,
- List preparedRoundings,
- Aggregator parent,
- CardinalityUpperBound cardinalityUpperBound,
- Map metadata
+ public DateHierarchyAggregator(
+ String name,
+ AggregatorFactories factories,
+ AggregationContext context,
+ ValuesSource.Numeric valuesSource,
+ BucketOrder order,
+ long minDocCount,
+ BucketCountThresholds bucketCountThresholds,
+ List preparedRoundings,
+ Aggregator parent,
+ CardinalityUpperBound cardinalityUpperBound,
+ Map metadata
) throws IOException {
super(name, factories, context, parent, cardinalityUpperBound, metadata);
this.valuesSource = valuesSource;
this.preparedRoundings = preparedRoundings;
this.minDocCount = minDocCount;
- bucketOrds = new BytesRefHash(1, context.bigArrays());
+ bucketOrds = new BytesRefHash(1, context.bigArrays());
this.bucketCountThresholds = bucketCountThresholds;
order.validate(this);
this.order = order;
- this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
}
public static class BucketCountThresholds implements Writeable, ToXContentFragment {
@@ -134,8 +133,7 @@ public boolean equals(Object obj) {
return false;
}
DateHierarchyAggregator.BucketCountThresholds other = (DateHierarchyAggregator.BucketCountThresholds) obj;
- return Objects.equals(requiredSize, other.requiredSize)
- && Objects.equals(shardSize, other.shardSize);
+ return Objects.equals(requiredSize, other.requiredSize) && Objects.equals(shardSize, other.shardSize);
}
}
@@ -145,7 +143,6 @@ public boolean equals(Object obj) {
private final long minDocCount;
private final BucketCountThresholds bucketCountThresholds;
private final List preparedRoundings;
- protected final Comparator partiallyBuiltBucketComparator;
/**
* The collector collects the docs, including or not some score (depending of the including of a Scorer) in the
@@ -154,11 +151,11 @@ public boolean equals(Object obj) {
* The LeafBucketCollector is a "Per-leaf bucket collector". It collects docs for the account of buckets.
*/
@Override
- public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+ public LeafBucketCollector getLeafCollector(AggregationExecutionContext ctx, LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
- final SortedNumericDocValues values = valuesSource.longValues(ctx);
+ final SortedNumericDocValues values = valuesSource.longValues(ctx.getLeafReaderContext());
return new LeafBucketCollectorBase(sub, values) {
@@ -171,7 +168,7 @@ public void collect(int doc, long bucket) throws IOException {
for (int i = 0; i < valuesCount; ++i) {
long value = values.nextValue();
String path = "";
- for (DateHierarchyAggregationBuilder.PreparedRounding preparedRounding: preparedRoundings) {
+ for (DateHierarchyAggregationBuilder.PreparedRounding preparedRounding : preparedRoundings) {
long roundedValue = preparedRounding.prepared.round(value);
path += preparedRounding.roundingInfo.format.format(roundedValue).toString();
long bucketOrd = bucketOrds.add(new BytesRef(path));
@@ -190,66 +187,84 @@ public void collect(int doc, long bucket) throws IOException {
}
@Override
- public InternalAggregation[] buildAggregations(long[] owningBucketOrdinals) throws IOException {
+ public InternalAggregation[] buildAggregations(LongArray owningBucketOrdinals) throws IOException {
- InternalDateHierarchy.InternalBucket[][] topBucketsPerOrd = new InternalDateHierarchy.InternalBucket[owningBucketOrdinals.length][];
- InternalDateHierarchy[] results = new InternalDateHierarchy[owningBucketOrdinals.length];
+ try (
+ ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrdinals.size())
+ ) {
- for (int ordIdx = 0; ordIdx < owningBucketOrdinals.length; ordIdx++) {
- assert owningBucketOrdinals[ordIdx] == 0;
+ InternalDateHierarchy[] results = new InternalDateHierarchy[Math.toIntExact(owningBucketOrdinals.size())];
- // build buckets and store them sorted
- final int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
+ for (int ordIdx = 0; ordIdx < owningBucketOrdinals.size(); ordIdx++) {
+ assert owningBucketOrdinals.get(ordIdx) == 0;
- PathSortedTree pathSortedTree = new PathSortedTree<>(order.comparator(), size);
+ // build buckets and store them sorted
+ final int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
- InternalDateHierarchy.InternalBucket spare;
- for (int i = 0; i < bucketOrds.size(); i++) {
- spare = new InternalDateHierarchy.InternalBucket(0, null, null, null, 0, null);
+ PathSortedTree pathSortedTree = new PathSortedTree<>(
+ order.comparator(),
+ size
+ );
- BytesRef term = new BytesRef();
- bucketOrds.get(i, term);
- String[] paths = term.utf8ToString().split("/", -1);
+ InternalDateHierarchy.InternalBucket spare;
+ for (int i = 0; i < bucketOrds.size(); i++) {
+ spare = new InternalDateHierarchy.InternalBucket(0, null, null, null, 0, null);
- spare.paths = paths;
- spare.key = term;
- spare.level = paths.length - 1;
- spare.name = paths[spare.level];
- spare.docCount = bucketDocCount(i);
- spare.bucketOrd = i;
+ BytesRef term = new BytesRef();
+ bucketOrds.get(i, term);
+ String[] paths = term.utf8ToString().split("/", -1);
- pathSortedTree.add(spare.paths, spare);
- }
+ spare.paths = paths;
+ spare.key = term;
+ spare.level = paths.length - 1;
+ spare.name = paths[spare.level];
+ spare.docCount = bucketDocCount(i);
+ spare.bucketOrd = i;
- // Get the top buckets
- topBucketsPerOrd[ordIdx] = new InternalDateHierarchy.InternalBucket[size];
- long otherHierarchyNodes = pathSortedTree.getFullSize();
- Iterator iterator = pathSortedTree.consumer();
- for (int i = 0; i < size; i++) {
- final InternalDateHierarchy.InternalBucket bucket = iterator.next();
- topBucketsPerOrd[ordIdx][i] = bucket;
- otherHierarchyNodes -= 1;
- }
+ pathSortedTree.add(spare.paths, spare);
+ }
- results[ordIdx] = new InternalDateHierarchy(name, Arrays.asList(topBucketsPerOrd[ordIdx]), order,
- minDocCount, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(),
- otherHierarchyNodes, metadata());
- }
+ // Get the top buckets
+ topBucketsPerOrd.set(ordIdx, new InternalDateHierarchy.InternalBucket[size]);
+ long otherHierarchyNodes = pathSortedTree.getFullSize();
+ Iterator iterator = pathSortedTree.consumer();
+ for (int i = 0; i < size; i++) {
+ final InternalDateHierarchy.InternalBucket bucket = iterator.next();
+ topBucketsPerOrd.get(ordIdx)[i] = bucket;
+ otherHierarchyNodes -= 1;
+ }
- // Build sub-aggregations for pruned buckets
- buildSubAggsForAllBuckets(
- topBucketsPerOrd,
- b -> b.bucketOrd,
- (b, aggregations) -> b.aggregations = aggregations
- );
+ results[ordIdx] = new InternalDateHierarchy(
+ name,
+ Arrays.asList(topBucketsPerOrd.get(ordIdx)),
+ order,
+ minDocCount,
+ bucketCountThresholds.getRequiredSize(),
+ bucketCountThresholds.getShardSize(),
+ otherHierarchyNodes,
+ metadata()
+ );
+ }
+
+ // Build sub-aggregations for pruned buckets
+ buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggregations) -> b.aggregations = aggregations);
- return results;
+ return results;
+ }
}
@Override
public InternalAggregation buildEmptyAggregation() {
- return new InternalDateHierarchy(name, null, order, minDocCount, bucketCountThresholds.getRequiredSize(),
- bucketCountThresholds.getShardSize(), 0, metadata());
+ return new InternalDateHierarchy(
+ name,
+ null,
+ order,
+ minDocCount,
+ bucketCountThresholds.getRequiredSize(),
+ bucketCountThresholds.getShardSize(),
+ 0,
+ metadata()
+ );
}
@Override
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregatorFactory.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregatorFactory.java
index 2a96c92..eb9b885 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregatorFactory.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/DateHierarchyAggregatorFactory.java
@@ -10,11 +10,11 @@
import org.elasticsearch.search.aggregations.NonCollectingAggregator;
import org.elasticsearch.search.aggregations.bucket.BucketUtils;
import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
-import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
-import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
@@ -33,16 +33,17 @@ class DateHierarchyAggregatorFactory extends ValuesSourceAggregatorFactory {
private List preparedRoundings;
private final DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds;
- DateHierarchyAggregatorFactory(String name,
- ValuesSourceConfig config,
- BucketOrder order,
- List preparedRoundings,
- long minDocCount,
- DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
- AggregationContext context,
- AggregatorFactory parent,
- AggregatorFactories.Builder subFactoriesBuilder,
- Map metadata
+ DateHierarchyAggregatorFactory(
+ String name,
+ ValuesSourceConfig config,
+ BucketOrder order,
+ List preparedRoundings,
+ long minDocCount,
+ DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
+ AggregationContext context,
+ AggregatorFactory parent,
+ AggregatorFactories.Builder subFactoriesBuilder,
+ Map metadata
) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metadata);
this.order = order;
@@ -52,25 +53,37 @@ class DateHierarchyAggregatorFactory extends ValuesSourceAggregatorFactory {
}
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
- builder.register(DateHierarchyAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.DATE, (name,
- factories,
- order,
- roundingsInfo,
- minDocCount,
- bucketCountThresholds,
- valuesSourceConfig,
- aggregationContext,
- parent,
- cardinality,
- metadata) -> null,
- true);
+ builder.register(
+ DateHierarchyAggregationBuilder.REGISTRY_KEY,
+ CoreValuesSourceType.DATE,
+ (
+ name,
+ factories,
+ order,
+ roundingsInfo,
+ minDocCount,
+ bucketCountThresholds,
+ valuesSourceConfig,
+ aggregationContext,
+ parent,
+ cardinality,
+ metadata) -> null,
+ true
+ );
}
@Override
- protected Aggregator createUnmapped(Aggregator parent,
- Map metadata) throws IOException {
- final InternalAggregation aggregation = new InternalDateHierarchy(name, new ArrayList<>(), order, minDocCount,
- bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), 0, metadata);
+ protected Aggregator createUnmapped(Aggregator parent, Map metadata) throws IOException {
+ final InternalAggregation aggregation = new InternalDateHierarchy(
+ name,
+ new ArrayList<>(),
+ order,
+ minDocCount,
+ bucketCountThresholds.getRequiredSize(),
+ bucketCountThresholds.getShardSize(),
+ 0,
+ metadata
+ );
return new NonCollectingAggregator(name, context, parent, factories, metadata) {
{
// even in the case of an unmapped aggregator, validate the
@@ -79,18 +92,21 @@ protected Aggregator createUnmapped(Aggregator parent,
}
@Override
- public InternalAggregation buildEmptyAggregation() { return aggregation; }
+ public InternalAggregation buildEmptyAggregation() {
+ return aggregation;
+ }
};
}
@Override
- protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata
- ) throws IOException {
+ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata)
+ throws IOException {
- DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds = new
- DateHierarchyAggregator.BucketCountThresholds(this.bucketCountThresholds);
+ DateHierarchyAggregator.BucketCountThresholds bucketCountThresholds = new DateHierarchyAggregator.BucketCountThresholds(
+ this.bucketCountThresholds
+ );
if (!InternalOrder.isKeyOrder(order)
- && bucketCountThresholds.getShardSize() == DateHierarchyAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
+ && bucketCountThresholds.getShardSize() == DateHierarchyAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
// The user has not made a shardSize selection. Use default
// heuristic to avoid any wrong-ranking caused by distributed
// counting
@@ -98,8 +114,17 @@ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound c
}
bucketCountThresholds.ensureValidity();
return new DateHierarchyAggregator(
- name, factories, context, (ValuesSource.Numeric) config.getValuesSource(),
- order, minDocCount, bucketCountThresholds, preparedRoundings, parent, cardinality, metadata);
+ name,
+ factories,
+ context,
+ (ValuesSource.Numeric) config.getValuesSource(),
+ order,
+ minDocCount,
+ bucketCountThresholds,
+ preparedRoundings,
+ parent,
+ cardinality,
+ metadata
+ );
}
}
-
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalDateHierarchy.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalDateHierarchy.java
index c46e978..f1cdee7 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalDateHierarchy.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalDateHierarchy.java
@@ -3,8 +3,8 @@
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.xcontent.XContentBuilder;
-import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.AggregationReduceContext;
+import org.elasticsearch.search.aggregations.AggregatorReducer;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
@@ -12,28 +12,85 @@
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.KeyComparable;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.TreeMap;
/**
* An internal implementation of {@link InternalMultiBucketAggregation}
* which extends {@link org.elasticsearch.search.aggregations.Aggregation}.
* Mainly, returns the builder and makes the reduce of buckets.
*/
-public class InternalDateHierarchy extends InternalMultiBucketAggregation {
+public class InternalDateHierarchy extends InternalMultiBucketAggregation {
+
+ @Override
+ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) {
+ Map> buckets = new LinkedHashMap<>();
+
+ return new AggregatorReducer() {
+ private long otherHierarchyNodes = 0;
+
+ @Override
+ public void accept(InternalAggregation aggregation) {
+ InternalDateHierarchy dateHierarchy = (InternalDateHierarchy) aggregation;
+
+ otherHierarchyNodes += dateHierarchy.getSumOtherHierarchyNodes();
+
+ for (InternalBucket bucket : dateHierarchy.buckets) {
+ List existingBuckets = buckets.get(bucket.key);
+ if (existingBuckets == null) {
+ existingBuckets = new ArrayList<>(size);
+ buckets.put(bucket.key, existingBuckets);
+ }
+ existingBuckets.add(bucket);
+ }
+ }
+
+ @Override
+ public InternalAggregation get() {
+ final int size = !reduceContext.isFinalReduce() ? buckets.size() : Math.min(requiredSize, buckets.size());
+ PathSortedTree ordered = new PathSortedTree<>(order.comparator(), size);
+
+ for (List sameTermBuckets : buckets.values()) {
+ final InternalBucket b = reduceBucket(sameTermBuckets, reduceContext);
+ if (b.getDocCount() >= minDocCount || !reduceContext.isFinalReduce()) {
+ reduceContext.consumeBucketsAndMaybeBreak(1);
+ ordered.add(b.paths, b);
+ } else {
+ reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b));
+ }
+ }
+
+ long sum_other_hierarchy_nodes = ordered.getFullSize() - size + otherHierarchyNodes;
+
+ return new InternalDateHierarchy(
+ getName(),
+ ordered.getAsList(),
+ order,
+ minDocCount,
+ requiredSize,
+ shardSize,
+ sum_other_hierarchy_nodes,
+ getMetadata()
+ );
+ }
+ };
+ }
/**
* The bucket class of InternalDateHierarchy.
* @see MultiBucketsAggregation.Bucket
*/
- public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements
+ public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable
+ implements
KeyComparable {
BytesRef key;
@@ -64,7 +121,7 @@ public InternalBucket(StreamInput in) throws IOException {
level = in.readInt();
int pathsSize = in.readInt();
paths = new String[pathsSize];
- for (int i=0; i < pathsSize; i++) {
+ for (int i = 0; i < pathsSize; i++) {
paths[i] = in.readString();
}
}
@@ -80,7 +137,7 @@ public void writeTo(StreamOutput out) throws IOException {
aggregations.writeTo(out);
out.writeInt(level);
out.writeInt(paths.length);
- for (String path: paths) {
+ for (String path : paths) {
out.writeString(path);
}
}
@@ -106,11 +163,10 @@ public long getDocCount() {
}
@Override
- public Aggregations getAggregations() {
+ public InternalAggregations getAggregations() {
return aggregations;
}
- @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
@@ -120,7 +176,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
}
-
private List buckets;
private BucketOrder order;
private final int requiredSize;
@@ -129,14 +184,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
private final long minDocCount;
public InternalDateHierarchy(
- String name,
- List buckets,
- BucketOrder order,
- long minDocCount,
- int requiredSize,
- int shardSize,
- long otherHierarchyNodes,
- Map metadata
+ String name,
+ List buckets,
+ BucketOrder order,
+ long minDocCount,
+ int requiredSize,
+ int shardSize,
+ long otherHierarchyNodes,
+ Map metadata
) {
super(name, metadata);
this.buckets = buckets;
@@ -159,7 +214,7 @@ public InternalDateHierarchy(StreamInput in) throws IOException {
otherHierarchyNodes = in.readVLong();
int bucketsSize = in.readInt();
this.buckets = new ArrayList<>(bucketsSize);
- for (int i=0; i buckets) {
return new InternalDateHierarchy(
- this.name, buckets, order, minDocCount, requiredSize, shardSize, otherHierarchyNodes,
- this.metadata);
+ this.name,
+ buckets,
+ order,
+ minDocCount,
+ requiredSize,
+ shardSize,
+ otherHierarchyNodes,
+ this.metadata
+ );
}
@Override
@@ -210,54 +272,7 @@ public List getBuckets() {
return buckets;
}
- /**
- * Reduces the given aggregations to a single one and returns it.
- */
- @Override
- public InternalDateHierarchy reduce(List aggregations, ReduceContext reduceContext) {
- Map> buckets = null;
- long otherHierarchyNodes = 0;
-
- // extract buckets from aggregations
- for (InternalAggregation aggregation : aggregations) {
- InternalDateHierarchy dateHierarchy = (InternalDateHierarchy) aggregation;
- if (buckets == null) {
- buckets = new LinkedHashMap<>();
- }
-
- otherHierarchyNodes += dateHierarchy.getSumOtherHierarchyNodes();
-
- for (InternalBucket bucket : dateHierarchy.buckets) {
- List existingBuckets = buckets.get(bucket.key);
- if (existingBuckets == null) {
- existingBuckets = new ArrayList<>(aggregations.size());
- buckets.put(bucket.key, existingBuckets);
- }
- existingBuckets.add(bucket);
- }
- }
-
- // reduce and sort buckets depending of ordering rules
- final int size = !reduceContext.isFinalReduce() ? buckets.size() : Math.min(requiredSize, buckets.size());
- PathSortedTree ordered = new PathSortedTree<>(order.comparator(), size);
- for (List sameTermBuckets : buckets.values()) {
-
- final InternalBucket b = reduceBucket(sameTermBuckets, reduceContext);
- if (b.getDocCount() >= minDocCount || !reduceContext.isFinalReduce()) {
- reduceContext.consumeBucketsAndMaybeBreak(1);
- ordered.add(b.paths, b);
- } else {
- reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b));
- }
- }
-
- long sum_other_hierarchy_nodes = ordered.getFullSize() - size + otherHierarchyNodes;
- return new InternalDateHierarchy(getName(), ordered.getAsList(), order, minDocCount, requiredSize, shardSize,
- sum_other_hierarchy_nodes, getMetadata());
- }
-
- @Override
- protected InternalBucket reduceBucket(List buckets, ReduceContext context) {
+ protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) {
List aggregationsList = new ArrayList<>(buckets.size());
InternalBucket reduced = null;
for (InternalBucket bucket : buckets) {
@@ -306,7 +321,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th
}
if (currentBucket != null) {
- for (int i=0; i < currentBucket.level; i++) {
+ for (int i = 0; i < currentBucket.level; i++) {
builder.endObject();
builder.endArray();
builder.endObject();
@@ -327,10 +342,10 @@ public int hashCode() {
public boolean equals(Object obj) {
InternalDateHierarchy that = (InternalDateHierarchy) obj;
return Objects.equals(buckets, that.buckets)
- && Objects.equals(order, that.order)
- && Objects.equals(minDocCount, that.minDocCount)
- && Objects.equals(requiredSize, that.requiredSize)
- && Objects.equals(shardSize, that.shardSize)
- && Objects.equals(otherHierarchyNodes, that.otherHierarchyNodes);
+ && Objects.equals(order, that.order)
+ && Objects.equals(minDocCount, that.minDocCount)
+ && Objects.equals(requiredSize, that.requiredSize)
+ && Objects.equals(shardSize, that.shardSize)
+ && Objects.equals(otherHierarchyNodes, that.otherHierarchyNodes);
}
}
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalPathHierarchy.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalPathHierarchy.java
index 3c3651b..8ba84ed 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalPathHierarchy.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/InternalPathHierarchy.java
@@ -1,12 +1,11 @@
package org.opendatasoft.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.util.BytesRef;
-import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.Aggregation;
-import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.AggregationReduceContext;
+import org.elasticsearch.search.aggregations.AggregatorReducer;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
@@ -14,10 +13,12 @@
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.KeyComparable;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
-import java.util.Arrays;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -28,16 +29,79 @@
* An internal implementation of {@link InternalMultiBucketAggregation} which extends {@link Aggregation}.
* Mainly, returns the builder and makes the reduce of buckets.
*/
-public class InternalPathHierarchy extends InternalMultiBucketAggregation {
+public class InternalPathHierarchy extends InternalMultiBucketAggregation {
protected static final ParseField SUM_OF_OTHER_HIERARCHY_NODES = new ParseField("sum_other_hierarchy_nodes");
protected static final ParseField PATHS = new ParseField("path");
+ @Override
+ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) {
+ Map> buckets = new TreeMap<>();
+
+ return new AggregatorReducer() {
+ // Need a global otherHierarchyNodes counter that is increased in accept() and used in get()
+ private long otherHierarchyNodes = 0;
+
+ @Override
+ public void accept(InternalAggregation aggregation) {
+ InternalPathHierarchy pathHierarchy = (InternalPathHierarchy) aggregation;
+ otherHierarchyNodes += pathHierarchy.getSumOtherHierarchyNodes();
+
+ for (InternalBucket bucket : pathHierarchy.buckets) {
+ List existingBuckets = buckets.get(bucket.termBytes);
+ if (existingBuckets == null) {
+ existingBuckets = new ArrayList<>(size);
+ buckets.put(bucket.termBytes, existingBuckets);
+ }
+ existingBuckets.add(bucket);
+ }
+ }
+
+ @Override
+ public InternalAggregation get() {
+ // reduce and sort buckets depending of ordering rules
+ final int size = !reduceContext.isFinalReduce() ? buckets.size() : Math.min(requiredSize, buckets.size());
+ PathSortedTree ordered = new PathSortedTree<>(order.comparator(), size);
+
+ for (List sameTermBuckets : buckets.values()) {
+ final InternalBucket b = reduceBucket(sameTermBuckets, reduceContext);
+ if (b.getDocCount() >= minDocCount || !reduceContext.isFinalReduce()) {
+ reduceContext.consumeBucketsAndMaybeBreak(1);
+ String[] pathsForTree;
+ if (b.minDepth > 0) {
+ pathsForTree = Arrays.copyOfRange(b.paths, b.minDepth, b.paths.length);
+ } else {
+ pathsForTree = b.paths;
+ }
+ ordered.add(pathsForTree, b);
+ } else {
+ reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b));
+ }
+ }
+
+ long sum_other_hierarchy_nodes = ordered.getFullSize() - size + otherHierarchyNodes;
+
+ return new InternalPathHierarchy(
+ getName(),
+ ordered.getAsList(),
+ order,
+ minDocCount,
+ requiredSize,
+ shardSize,
+ sum_other_hierarchy_nodes,
+ separator,
+ getMetadata()
+ );
+
+ }
+ };
+ }
+
/**
* The bucket class of InternalPathHierarchy.
* @see MultiBucketsAggregation.Bucket
*/
- public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements
+ public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable
+ implements
KeyComparable {
BytesRef termBytes;
@@ -49,8 +113,15 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern
protected int minDepth;
protected String basename;
- public InternalBucket(long docCount, InternalAggregations aggregations, String basename,
- BytesRef term, int level, int minDepth, String[] paths) {
+ public InternalBucket(
+ long docCount,
+ InternalAggregations aggregations,
+ String basename,
+ BytesRef term,
+ int level,
+ int minDepth,
+ String[] paths
+ ) {
termBytes = term;
this.docCount = docCount;
this.aggregations = aggregations;
@@ -72,7 +143,7 @@ public InternalBucket(StreamInput in) throws IOException {
basename = in.readString();
int pathsSize = in.readInt();
paths = new String[pathsSize];
- for (int i=0; i < pathsSize; i++) {
+ for (int i = 0; i < pathsSize; i++) {
paths[i] = in.readString();
}
}
@@ -89,7 +160,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeInt(minDepth);
out.writeString(basename);
out.writeInt(paths.length);
- for (String path: paths) {
+ for (String path : paths) {
out.writeString(path);
}
}
@@ -115,11 +186,10 @@ public long getDocCount() {
}
@Override
- public Aggregations getAggregations() {
+ public InternalAggregations getAggregations() {
return aggregations;
}
- @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
@@ -129,7 +199,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
}
-
private List buckets;
private BytesRef separator;
private BucketOrder order;
@@ -139,15 +208,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
private final long minDocCount;
public InternalPathHierarchy(
- String name,
- List buckets,
- BucketOrder order,
- long minDocCount,
- int requiredSize,
- int shardSize,
- long otherHierarchyNodes,
- BytesRef separator,
- Map metadata
+ String name,
+ List buckets,
+ BucketOrder order,
+ long minDocCount,
+ int requiredSize,
+ int shardSize,
+ long otherHierarchyNodes,
+ BytesRef separator,
+ Map metadata
) {
super(name, metadata);
this.buckets = buckets;
@@ -172,7 +241,7 @@ public InternalPathHierarchy(StreamInput in) throws IOException {
separator = in.readBytesRef();
int bucketsSize = in.readInt();
this.buckets = new ArrayList<>(bucketsSize);
- for (int i=0; i buckets) {
- return new InternalPathHierarchy(this.name, buckets, order, minDocCount, requiredSize, shardSize, otherHierarchyNodes,
- this.separator, this.metadata);
+ return new InternalPathHierarchy(
+ this.name,
+ buckets,
+ order,
+ minDocCount,
+ requiredSize,
+ shardSize,
+ otherHierarchyNodes,
+ this.separator,
+ this.metadata
+ );
}
@Override
public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) {
- return new InternalBucket(prototype.docCount, aggregations, prototype.basename, prototype.termBytes,
- prototype.level, prototype.minDepth, prototype.paths);
+ return new InternalBucket(
+ prototype.docCount,
+ aggregations,
+ prototype.basename,
+ prototype.termBytes,
+ prototype.level,
+ prototype.minDepth,
+ prototype.paths
+ );
}
@Override
@@ -224,62 +309,10 @@ public List getBuckets() {
return buckets;
}
- /**
- * Reduces the given aggregations to a single one and returns it.
- */
- @Override
- public InternalPathHierarchy reduce(List aggregations, ReduceContext reduceContext) {
- Map> buckets = null;
- long otherHierarchyNodes = 0;
-
- // extract buckets from aggregations
- for (InternalAggregation aggregation : aggregations) {
- InternalPathHierarchy pathHierarchy = (InternalPathHierarchy) aggregation;
- if (buckets == null) {
- buckets = new TreeMap<>();
- }
-
- otherHierarchyNodes += pathHierarchy.getSumOtherHierarchyNodes();
-
- for (InternalBucket bucket : pathHierarchy.buckets) {
- List existingBuckets = buckets.get(bucket.termBytes);
- if (existingBuckets == null) {
- existingBuckets = new ArrayList<>(aggregations.size());
- buckets.put(bucket.termBytes, existingBuckets);
- }
- existingBuckets.add(bucket);
- }
- }
-
- // reduce and sort buckets depending of ordering rules
- final int size = !reduceContext.isFinalReduce() ? buckets.size() : Math.min(requiredSize, buckets.size());
- PathSortedTree ordered = new PathSortedTree<>(order.comparator(), size);
- for (List sameTermBuckets : buckets.values()) {
- final InternalBucket b = reduceBucket(sameTermBuckets, reduceContext);
- if (b.getDocCount() >= minDocCount || !reduceContext.isFinalReduce()) {
- reduceContext.consumeBucketsAndMaybeBreak(1);
- String [] pathsForTree;
- if (b.minDepth > 0) {
- pathsForTree = Arrays.copyOfRange(b.paths, b.minDepth, b.paths.length);
- } else {
- pathsForTree = b.paths;
- }
- ordered.add(pathsForTree, b);
- } else {
- reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b));
- }
- }
-
- long sum_other_hierarchy_nodes = ordered.getFullSize() - size + otherHierarchyNodes;
- return new InternalPathHierarchy(getName(), ordered.getAsList(), order, minDocCount, requiredSize, shardSize,
- sum_other_hierarchy_nodes, separator, getMetadata());
- }
-
/**
* Utility method of InternalPathHierarchy.doReduce()
*/
- @Override
- protected InternalBucket reduceBucket(List buckets, ReduceContext context) {
+ protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) {
List aggregationsList = new ArrayList<>(buckets.size());
InternalBucket reduced = null;
for (InternalBucket bucket : buckets) {
@@ -296,7 +329,6 @@ protected InternalBucket reduceBucket(List buckets, ReduceContex
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
-// builder.field(SUM_OF_OTHER_HIERARCHY_NODES.getPreferredName(), otherHierarchyNodes);
Iterator bucketIterator = buckets.iterator();
builder.startArray(CommonFields.BUCKETS.getPreferredName());
InternalBucket prevBucket = null;
@@ -323,14 +355,14 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th
builder.startObject();
builder.field(CommonFields.KEY.getPreferredName(), currentBucket.basename);
builder.field(CommonFields.DOC_COUNT.getPreferredName(), currentBucket.docCount);
- builder.field(PATHS.getPreferredName(), Arrays.copyOf(currentBucket.paths, currentBucket.paths.length -1));
+ builder.field(PATHS.getPreferredName(), Arrays.copyOf(currentBucket.paths, currentBucket.paths.length - 1));
currentBucket.getAggregations().toXContentInternal(builder, params);
prevBucket = currentBucket;
}
if (currentBucket != null) {
- for (int i=0; i < currentBucket.level; i++) {
+ for (int i = 0; i < currentBucket.level; i++) {
builder.endObject();
builder.endArray();
builder.endObject();
@@ -351,11 +383,11 @@ public int hashCode() {
public boolean equals(Object obj) {
InternalPathHierarchy that = (InternalPathHierarchy) obj;
return Objects.equals(buckets, that.buckets)
- && Objects.equals(separator, that.separator)
- && Objects.equals(order, that.order)
- && Objects.equals(minDocCount, that.minDocCount)
- && Objects.equals(requiredSize, that.requiredSize)
- && Objects.equals(shardSize, that.shardSize)
- && Objects.equals(otherHierarchyNodes, that.otherHierarchyNodes);
+ && Objects.equals(separator, that.separator)
+ && Objects.equals(order, that.order)
+ && Objects.equals(minDocCount, that.minDocCount)
+ && Objects.equals(requiredSize, that.requiredSize)
+ && Objects.equals(shardSize, that.shardSize)
+ && Objects.equals(otherHierarchyNodes, that.otherHierarchyNodes);
}
}
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationBuilder.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationBuilder.java
index e134aa6..eb2a32e 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationBuilder.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationBuilder.java
@@ -1,39 +1,44 @@
package org.opendatasoft.elasticsearch.search.aggregations.bucket;
-import org.elasticsearch.Version;
-import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.TransportVersion;
+import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.xcontent.ObjectParser;
-import org.elasticsearch.xcontent.XContentBuilder;
-import org.elasticsearch.xcontent.XContentParser;
-import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
-import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalOrder;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
+import org.elasticsearch.xcontent.ObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-
/**
* The builder of the aggregatorFactory. Also implements the parsing of the request.
*/
public class PathHierarchyAggregationBuilder extends ValuesSourceAggregationBuilder {
+ @Override
+ public TransportVersion getMinimalSupportedVersion() {
+ return TransportVersions.V_8_0_0;
+ }
+
public static final String NAME = "path_hierarchy";
public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY =
- new ValuesSourceRegistry.RegistryKey<>(NAME, PathHierarchyAggregationSupplier.class);
+ new ValuesSourceRegistry.RegistryKey<>(NAME, PathHierarchyAggregationSupplier.class);
public static final ParseField SEPARATOR_FIELD = new ParseField("separator");
public static final ParseField MIN_DEPTH_FIELD = new ParseField("min_depth");
@@ -45,10 +50,12 @@ public class PathHierarchyAggregationBuilder extends ValuesSourceAggregationBuil
public static final ParseField SHARD_SIZE_FIELD = new ParseField("shard_size");
public static final ParseField MIN_DOC_COUNT_FIELD = new ParseField("min_doc_count");
- public static final PathHierarchyAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS = new
- PathHierarchyAggregator.BucketCountThresholds(10, -1);
- public static final ObjectParser PARSER =
- ObjectParser.fromBuilder(NAME, PathHierarchyAggregationBuilder::new);
+ public static final PathHierarchyAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS =
+ new PathHierarchyAggregator.BucketCountThresholds(10, -1);
+ public static final ObjectParser PARSER = ObjectParser.fromBuilder(
+ NAME,
+ PathHierarchyAggregationBuilder::new
+ );
static {
ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
@@ -60,8 +67,7 @@ public class PathHierarchyAggregationBuilder extends ValuesSourceAggregationBuil
PARSER.declareInt(PathHierarchyAggregationBuilder::size, SIZE_FIELD);
PARSER.declareLong(PathHierarchyAggregationBuilder::minDocCount, MIN_DOC_COUNT_FIELD);
PARSER.declareInt(PathHierarchyAggregationBuilder::shardSize, SHARD_SIZE_FIELD);
- PARSER.declareObjectArray(PathHierarchyAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p),
- ORDER_FIELD);
+ PARSER.declareObjectArray(PathHierarchyAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p), ORDER_FIELD);
}
public static AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException {
@@ -84,15 +90,15 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
private int depth = -1;
private BucketOrder order = BucketOrder.compound(BucketOrder.count(false)); // automatically adds tie-breaker key asc order
private PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds = new PathHierarchyAggregator.BucketCountThresholds(
- DEFAULT_BUCKET_COUNT_THRESHOLDS);
-
+ DEFAULT_BUCKET_COUNT_THRESHOLDS
+ );
private PathHierarchyAggregationBuilder(String name) {
super(name);
}
@Override
- protected boolean serializeTargetValueType(Version version) {
+ protected boolean serializeTargetValueType(TransportVersion version) {
return true;
}
@@ -111,8 +117,7 @@ public PathHierarchyAggregationBuilder(StreamInput in) throws IOException {
order = InternalOrder.Streams.readOrder(in);
}
- private PathHierarchyAggregationBuilder(PathHierarchyAggregationBuilder clone, Builder factoriesBuilder,
- Map metadata) {
+ private PathHierarchyAggregationBuilder(PathHierarchyAggregationBuilder clone, Builder factoriesBuilder, Map metadata) {
super(clone, factoriesBuilder, metadata);
separator = clone.separator;
minDepth = clone.minDepth;
@@ -180,7 +185,7 @@ private PathHierarchyAggregationBuilder order(BucketOrder order) {
if (order == null) {
throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
}
- if(order instanceof InternalOrder.CompoundOrder || InternalOrder.isKeyOrder(order)) {
+ if (order instanceof InternalOrder.CompoundOrder || InternalOrder.isKeyOrder(order)) {
this.order = order; // if order already contains a tie-breaker we are good to go
} else { // otherwise add a tie-breaker by using a compound order
this.order = BucketOrder.compound(order);
@@ -197,7 +202,6 @@ private PathHierarchyAggregationBuilder order(List orders) {
return this;
}
-
/**
* Sets the size - indicating how many term buckets should be returned
* (defaults to 10)
@@ -215,7 +219,8 @@ public PathHierarchyAggregationBuilder size(int size) {
public PathHierarchyAggregationBuilder minDocCount(long minDocCount) {
if (minDocCount < 0) {
throw new IllegalArgumentException(
- "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]");
+ "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"
+ );
}
this.minDocCount = minDocCount;
return this;
@@ -241,8 +246,7 @@ public BucketCardinality bucketCardinality() {
*/
public PathHierarchyAggregationBuilder shardSize(int shardSize) {
if (shardSize <= 0) {
- throw new IllegalArgumentException(
- "[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
+ throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
}
bucketCountThresholds.setShardSize(shardSize);
return this;
@@ -256,38 +260,40 @@ public int shardSize() {
}
@Override
- protected ValuesSourceAggregatorFactory innerBuild(AggregationContext context,
- ValuesSourceConfig config,
- AggregatorFactory parent,
- AggregatorFactories.Builder subFactoriesBuilder) throws IOException {
-
+ protected ValuesSourceAggregatorFactory innerBuild(
+ AggregationContext context,
+ ValuesSourceConfig config,
+ AggregatorFactory parent,
+ AggregatorFactories.Builder subFactoriesBuilder
+ ) throws IOException {
- if (minDepth > maxDepth)
- throw new IllegalArgumentException("[minDepth] (" + minDepth + ") must not be greater than [maxDepth] (" +
- maxDepth + ")");
+ if (minDepth > maxDepth) throw new IllegalArgumentException(
+ "[minDepth] (" + minDepth + ") must not be greater than [maxDepth] (" + maxDepth + ")"
+ );
if (depth >= 0) {
- if (minDepth > depth)
- throw new IllegalArgumentException("[minDepth] (" + minDepth + ") must not be greater than [depth] (" +
- depth + ")");
+ if (minDepth > depth) throw new IllegalArgumentException(
+ "[minDepth] (" + minDepth + ") must not be greater than [depth] (" + depth + ")"
+ );
minDepth = depth;
maxDepth = depth;
}
return new PathHierarchyAggregatorFactory(
- name,
- config,
- separator,
- minDepth,
- maxDepth,
- keepBlankPath,
- order,
- minDocCount,
- bucketCountThresholds,
- context,
- parent,
- subFactoriesBuilder,
- metadata);
+ name,
+ config,
+ separator,
+ minDepth,
+ maxDepth,
+ keepBlankPath,
+ order,
+ minDocCount,
+ bucketCountThresholds,
+ context,
+ parent,
+ subFactoriesBuilder,
+ metadata
+ );
}
@Override
@@ -332,20 +338,16 @@ public boolean equals(Object obj) {
if (!super.equals(obj)) return false;
PathHierarchyAggregationBuilder other = (PathHierarchyAggregationBuilder) obj;
return Objects.equals(separator, other.separator)
- && Objects.equals(minDepth, other.minDepth)
- && Objects.equals(maxDepth, other.maxDepth)
- && Objects.equals(depth, other.depth)
- && Objects.equals(order, other.order)
- && Objects.equals(minDocCount, other.minDocCount)
- && Objects.equals(bucketCountThresholds, other.bucketCountThresholds);
+ && Objects.equals(minDepth, other.minDepth)
+ && Objects.equals(maxDepth, other.maxDepth)
+ && Objects.equals(depth, other.depth)
+ && Objects.equals(order, other.order)
+ && Objects.equals(minDocCount, other.minDocCount)
+ && Objects.equals(bucketCountThresholds, other.bucketCountThresholds);
}
@Override
public String getType() {
return NAME;
}
-
- @Override
- protected ValuesSourceRegistry.RegistryKey> getRegistryKey() { return REGISTRY_KEY; }
}
-
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationSupplier.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationSupplier.java
index 2f71ba7..8fa8c5b 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationSupplier.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregationSupplier.java
@@ -13,18 +13,20 @@
@FunctionalInterface
public interface PathHierarchyAggregationSupplier {
- Aggregator build(String name,
- AggregatorFactories factories,
- BytesRef separator,
- int minDepth,
- int maxDepth,
- boolean keepBlankPath,
- BucketOrder order,
- long minDocCount,
- PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
- ValuesSourceConfig valuesSourceConfig,
- SearchContext aggregationContext,
- Aggregator parent,
- CardinalityUpperBound cardinality,
- Map metadata) throws IOException;
+ Aggregator build(
+ String name,
+ AggregatorFactories factories,
+ BytesRef separator,
+ int minDepth,
+ int maxDepth,
+ boolean keepBlankPath,
+ BucketOrder order,
+ long minDocCount,
+ PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
+ ValuesSourceConfig valuesSourceConfig,
+ SearchContext aggregationContext,
+ Aggregator parent,
+ CardinalityUpperBound cardinality,
+ Map metadata
+ ) throws IOException;
}
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregator.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregator.java
index aa6ef53..9655692 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregator.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregator.java
@@ -1,17 +1,17 @@
package org.opendatasoft.elasticsearch.search.aggregations.bucket;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.core.Releasables;
import org.elasticsearch.common.util.BytesRefHash;
-import org.elasticsearch.xcontent.ToXContentFragment;
-import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.common.util.ObjectArray;
+import org.elasticsearch.core.Releasables;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
+import org.elasticsearch.search.aggregations.AggregationExecutionContext;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketOrder;
@@ -20,31 +20,36 @@
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
-import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.xcontent.ToXContentFragment;
+import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.regex.Pattern;
public class PathHierarchyAggregator extends BucketsAggregator {
- public PathHierarchyAggregator(String name,
- AggregatorFactories factories,
- AggregationContext context,
- ValuesSource valuesSource,
- BucketOrder order,
- long minDocCount,
- BucketCountThresholds bucketCountThresholds,
- BytesRef separator,
- int minDepth,
- Aggregator parent,
- CardinalityUpperBound cardinality,
- Map metadata
+ public PathHierarchyAggregator(
+ String name,
+ AggregatorFactories factories,
+ AggregationContext context,
+ ValuesSource valuesSource,
+ BucketOrder order,
+ long minDocCount,
+ BucketCountThresholds bucketCountThresholds,
+ BytesRef separator,
+ int minDepth,
+ Aggregator parent,
+ CardinalityUpperBound cardinality,
+ Map metadata
) throws IOException {
super(name, factories, context, parent, cardinality, metadata);
this.valuesSource = valuesSource;
@@ -53,7 +58,6 @@ public PathHierarchyAggregator(String name,
bucketOrds = new BytesRefHash(1, context.bigArrays());
order.validate(this);
this.order = order;
- this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
this.bucketCountThresholds = bucketCountThresholds;
this.minDepth = minDepth;
}
@@ -135,18 +139,15 @@ public boolean equals(Object obj) {
return false;
}
PathHierarchyAggregator.BucketCountThresholds other = (PathHierarchyAggregator.BucketCountThresholds) obj;
- return Objects.equals(requiredSize, other.requiredSize)
- && Objects.equals(shardSize, other.shardSize);
+ return Objects.equals(requiredSize, other.requiredSize) && Objects.equals(shardSize, other.shardSize);
}
}
-
private final ValuesSource valuesSource;
private final BytesRefHash bucketOrds;
private final BucketOrder order;
private final long minDocCount;
private final int minDepth;
- protected final Comparator partiallyBuiltBucketComparator;
private final BucketCountThresholds bucketCountThresholds;
private final BytesRef separator;
@@ -157,13 +158,14 @@ public boolean equals(Object obj) {
* The LeafBucketCollector is a "Per-leaf bucket collector". It collects docs for the account of buckets.
*/
@Override
- public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
- final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
+ final SortedBinaryDocValues values = valuesSource.bytesValues(aggCtx.getLeafReaderContext());
return new LeafBucketCollectorBase(sub, values) {
final BytesRefBuilder previous = new BytesRefBuilder();
+
/**
* Collect the given doc in the given bucket.
* Called once for every document matching a query, with the unbased document number.
@@ -183,7 +185,7 @@ public void collect(int doc, long owningBucketOrdinal) throws IOException {
}
long bucketOrdinal = bucketOrds.add(bytesValue);
if (bucketOrdinal < 0) { // already seen
- bucketOrdinal = - 1 - bucketOrdinal;
+ bucketOrdinal = -1 - bucketOrdinal;
collectExistingBucket(sub, doc, bucketOrdinal);
} else {
collectBucket(sub, doc, bucketOrdinal);
@@ -196,76 +198,113 @@ public void collect(int doc, long owningBucketOrdinal) throws IOException {
}
@Override
- public InternalAggregation[] buildAggregations(long[] owningBucketOrdinals) throws IOException {
-
- InternalPathHierarchy.InternalBucket[][] topBucketsPerOrd = new InternalPathHierarchy.InternalBucket[owningBucketOrdinals.length][];
- InternalPathHierarchy[] results = new InternalPathHierarchy[owningBucketOrdinals.length];
-
- for (int ordIdx = 0; ordIdx < owningBucketOrdinals.length; ordIdx++) {
- assert owningBucketOrdinals[ordIdx] == 0;
-
- final int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
- PathSortedTree pathSortedTree =
- new PathSortedTree<>(partiallyBuiltBucketComparator, size);
+ public InternalAggregation[] buildAggregations(LongArray owningBucketOrdinals) throws IOException {
+ try (
+ ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrdinals.size())
+ ) {
+ InternalPathHierarchy[] results = new InternalPathHierarchy[Math.toIntExact(owningBucketOrdinals.size())];
+
+ for (long ordIdx = 0; ordIdx < owningBucketOrdinals.size(); ordIdx++) {
+ assert owningBucketOrdinals.get(ordIdx) == 0;
+
+ final int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
+
+ PathSortedTree pathSortedTree = new PathSortedTree<>(
+ order.comparator(),
+ size
+ );
+
+ InternalPathHierarchy.InternalBucket spare;
+ for (int i = 0; i < bucketOrds.size(); i++) {
+ spare = new InternalPathHierarchy.InternalBucket(0, null, null, new BytesRef(), 0, 0, null);
+ BytesRef term = new BytesRef();
+ bucketOrds.get(i, term);
+ String quotedPattern = Pattern.quote(separator.utf8ToString());
+ String[] paths = term.utf8ToString().split(quotedPattern, -1);
+
+ String[] pathsForTree;
+ if (minDepth > 0) {
+ pathsForTree = Arrays.copyOfRange(paths, minDepth, paths.length);
+ } else {
+ pathsForTree = paths;
+ }
- InternalPathHierarchy.InternalBucket spare;
- for (int i = 0; i < bucketOrds.size(); i++) {
- spare = new InternalPathHierarchy.InternalBucket(0, null, null, new BytesRef(), 0, 0, null);
- BytesRef term = new BytesRef();
- bucketOrds.get(i, term);
+ spare.termBytes = BytesRef.deepCopyOf(term);
+ spare.level = pathsForTree.length - 1;
+ spare.docCount = bucketDocCount(i);
+ spare.basename = paths[paths.length - 1];
+ spare.minDepth = minDepth;
+ spare.bucketOrd = i;
+ spare.paths = paths;
- String quotedPattern = Pattern.quote(separator.utf8ToString());
+ pathSortedTree.add(pathsForTree, spare);
+ }
- String[] paths = term.utf8ToString().split(quotedPattern, -1);
+ topBucketsPerOrd.set(ordIdx, new InternalPathHierarchy.InternalBucket[size]);
- String[] pathsForTree;
+ long otherHierarchyNodes = pathSortedTree.getFullSize();
- if (minDepth > 0) {
- pathsForTree = Arrays.copyOfRange(paths, minDepth, paths.length);
- } else {
- pathsForTree = paths;
+ Iterator iterator = pathSortedTree.consumer();
+ for (int i = 0; i < size && iterator.hasNext(); i++) {
+ final InternalPathHierarchy.InternalBucket bucket = iterator.next();
+ topBucketsPerOrd.get(ordIdx)[i] = bucket;
+ otherHierarchyNodes -= 1;
}
- spare.termBytes = BytesRef.deepCopyOf(term);
- spare.level = pathsForTree.length - 1;
- spare.docCount = bucketDocCount(i);
- spare.basename = paths[paths.length - 1];
- spare.minDepth = minDepth;
- spare.bucketOrd = i;
- spare.paths = paths;
-
- pathSortedTree.add(pathsForTree, spare);
-
- }
- // Get the top buckets
- topBucketsPerOrd[ordIdx] = new InternalPathHierarchy.InternalBucket[size];
- long otherHierarchyNodes = pathSortedTree.getFullSize();
- Iterator iterator = pathSortedTree.consumer();
- for (int i = 0; i < size; i++) {
- final InternalPathHierarchy.InternalBucket bucket = iterator.next();
- topBucketsPerOrd[ordIdx][i] = bucket;
- otherHierarchyNodes -= 1;
+ results[Math.toIntExact(ordIdx)] = new InternalPathHierarchy(
+ name,
+ Arrays.asList(topBucketsPerOrd.get(ordIdx)),
+ order,
+ minDocCount,
+ bucketCountThresholds.getRequiredSize(),
+ bucketCountThresholds.getShardSize(),
+ otherHierarchyNodes,
+ separator,
+ metadata()
+ );
}
- results[ordIdx] = new InternalPathHierarchy(name, Arrays.asList(topBucketsPerOrd[ordIdx]), order,
- minDocCount, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(),
- otherHierarchyNodes, separator, metadata());
- }
+ buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggregations) -> b.aggregations = aggregations);
- // Build sub-aggregations for pruned buckets
- buildSubAggsForAllBuckets(
- topBucketsPerOrd,
- b -> b.bucketOrd,
- (b, aggregations) -> b.aggregations = aggregations
- );
-
- return results;
+ return results;
+ }
}
@Override
public InternalAggregation buildEmptyAggregation() {
- return new InternalPathHierarchy(name, null, order, minDocCount, bucketCountThresholds.getRequiredSize(),
- bucketCountThresholds.getShardSize(), 0, separator, metadata());
+ return new InternalPathHierarchy(
+ name,
+ null,
+ order,
+ minDocCount,
+ bucketCountThresholds.getRequiredSize(),
+ bucketCountThresholds.getShardSize(),
+ 0,
+ separator,
+ metadata()
+ );
+ }
+
+ InternalPathHierarchy buildAggregation(
+ String name,
+ List buckets,
+ BucketOrder order,
+ long minDocCount,
+ long otherHierarchyNodes,
+ BytesRef separator,
+ Map metadata
+ ) {
+ return new InternalPathHierarchy(
+ name,
+ buckets,
+ order,
+ minDocCount,
+ bucketCountThresholds.getRequiredSize(),
+ bucketCountThresholds.getShardSize(),
+ otherHierarchyNodes,
+ separator,
+ metadata
+ );
}
@Override
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregatorFactory.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregatorFactory.java
index 0c383f9..f67fb72 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregatorFactory.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathHierarchyAggregatorFactory.java
@@ -4,7 +4,6 @@
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.FutureArrays;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortingBinaryDocValues;
import org.elasticsearch.search.aggregations.Aggregator;
@@ -17,14 +16,15 @@
import org.elasticsearch.search.aggregations.NonCollectingAggregator;
import org.elasticsearch.search.aggregations.bucket.BucketUtils;
import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
-import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Map;
/**
@@ -41,19 +41,20 @@ class PathHierarchyAggregatorFactory extends ValuesSourceAggregatorFactory {
private boolean keepBlankPath;
private final PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds;
- PathHierarchyAggregatorFactory(String name,
- ValuesSourceConfig config,
- String separator,
- int minDepth,
- int maxDepth,
- boolean keepBlankPath,
- BucketOrder order,
- long minDocCount,
- PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
- AggregationContext context,
- AggregatorFactory parent,
- AggregatorFactories.Builder subFactoriesBuilder,
- Map metaData
+ PathHierarchyAggregatorFactory(
+ String name,
+ ValuesSourceConfig config,
+ String separator,
+ int minDepth,
+ int maxDepth,
+ boolean keepBlankPath,
+ BucketOrder order,
+ long minDocCount,
+ PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds,
+ AggregationContext context,
+ AggregatorFactory parent,
+ AggregatorFactories.Builder subFactoriesBuilder,
+ Map metaData
) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metaData);
this.separator = new BytesRef(separator);
@@ -66,27 +67,41 @@ class PathHierarchyAggregatorFactory extends ValuesSourceAggregatorFactory {
}
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
- builder.register(PathHierarchyAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.KEYWORD, (name,
- factories,
- separator,
- minDepth,
- maxDepth,
- keepBlankPath,
- order,
- minDocCount,
- bucketCountThresholds,
- valuesSourceConfig,
- aggregationContext,
- parent,
- cardinality,
- metadata) -> null,
- true);
+ builder.register(
+ PathHierarchyAggregationBuilder.REGISTRY_KEY,
+ CoreValuesSourceType.KEYWORD,
+ (
+ name,
+ factories,
+ separator,
+ minDepth,
+ maxDepth,
+ keepBlankPath,
+ order,
+ minDocCount,
+ bucketCountThresholds,
+ valuesSourceConfig,
+ aggregationContext,
+ parent,
+ cardinality,
+ metadata) -> null,
+ true
+ );
}
@Override
protected Aggregator createUnmapped(Aggregator parent, Map metadata) throws IOException {
- final InternalAggregation aggregation = new InternalPathHierarchy(name, new ArrayList<>(), order, minDocCount,
- bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), 0, separator, metadata);
+ final InternalAggregation aggregation = new InternalPathHierarchy(
+ name,
+ new ArrayList<>(),
+ order,
+ minDocCount,
+ bucketCountThresholds.getRequiredSize(),
+ bucketCountThresholds.getShardSize(),
+ 0,
+ separator,
+ metadata
+ );
return new NonCollectingAggregator(name, context, parent, factories, metadata) {
{
// even in the case of an unmapped aggregator, validate the
@@ -102,13 +117,14 @@ public InternalAggregation buildEmptyAggregation() {
}
@Override
- protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality,
- Map metadata) throws IOException {
+ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata)
+ throws IOException {
ValuesSource valuesSourceBytes = new HierarchyValuesSource(config.getValuesSource(), separator, minDepth, maxDepth, keepBlankPath);
- PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds = new
- PathHierarchyAggregator.BucketCountThresholds(this.bucketCountThresholds);
+ PathHierarchyAggregator.BucketCountThresholds bucketCountThresholds = new PathHierarchyAggregator.BucketCountThresholds(
+ this.bucketCountThresholds
+ );
if (!InternalOrder.isKeyOrder(order)
- && bucketCountThresholds.getShardSize() == PathHierarchyAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
+ && bucketCountThresholds.getShardSize() == PathHierarchyAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
// The user has not made a shardSize selection. Use default
// heuristic to avoid any wrong-ranking caused by distributed
// counting
@@ -116,9 +132,19 @@ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound c
}
bucketCountThresholds.ensureValidity();
return new PathHierarchyAggregator(
- name, factories, context,
- valuesSourceBytes, order, minDocCount, bucketCountThresholds, separator, minDepth,
- parent, cardinality, metadata);
+ name,
+ factories,
+ context,
+ valuesSourceBytes,
+ order,
+ minDocCount,
+ bucketCountThresholds,
+ separator,
+ minDepth,
+ parent,
+ cardinality,
+ metadata
+ );
}
/**
@@ -137,8 +163,7 @@ private static class HierarchyValues extends SortingBinaryDocValues {
private int maxDepth;
private boolean keepBlankPath;
- private HierarchyValues(SortedBinaryDocValues valuesSource, BytesRef separator, int minDepth, int maxDepth,
- boolean keepBlankPath) {
+ private HierarchyValues(SortedBinaryDocValues valuesSource, BytesRef separator, int minDepth, int maxDepth, boolean keepBlankPath) {
this.valuesSource = valuesSource;
this.separator = separator;
this.minDepth = minDepth;
@@ -157,21 +182,26 @@ public boolean advanceExact(int docId) throws IOException {
if (valuesSource.advanceExact(docId)) {
count = 0;
int t = 0;
- for (int i=0; i < valuesSource.docValueCount(); i++) {
+ for (int i = 0; i < valuesSource.docValueCount(); i++) {
int depth = 0;
BytesRef val = valuesSource.nextValue();
BytesRefBuilder cleanVal = new BytesRefBuilder();
int startNewValOffset = -1;
- for (int offset=0; offset < val.length; offset++) {
+ for (int offset = 0; offset < val.length; offset++) {
// it is a separator
- if (val.length - offset >= separator.length &&
- FutureArrays.equals(
- separator.bytes, separator.offset, separator.offset + separator.length,
- val.bytes, val.offset + offset, val.offset + offset + separator.length)) {
+ if (val.length - offset >= separator.length
+ && Arrays.equals(
+ separator.bytes,
+ separator.offset,
+ separator.offset + separator.length,
+ val.bytes,
+ val.offset + offset,
+ val.offset + offset + separator.length
+ )) {
// ignore separator at the beginning
if (offset == 0) {
- offset += separator.length -1;
+ offset += separator.length - 1;
continue;
}
@@ -183,14 +213,14 @@ public boolean advanceExact(int docId) throws IOException {
}
startNewValOffset = -1;
cleanVal.append(separator);
- depth ++;
- // two separators following each other
+ depth++;
+ // two separators following each other
} else if (keepBlankPath) {
count++;
growExact();
values[t++].copyBytes(cleanVal);
cleanVal.append(separator);
- depth ++;
+ depth++;
}
if (maxDepth >= 0 && depth > maxDepth) {
@@ -216,8 +246,7 @@ public boolean advanceExact(int docId) throws IOException {
}
sort(); // sort values that are stored between offsets 0 and count of values
return true;
- } else
- return false;
+ } else return false;
}
final void growExact() {
@@ -241,7 +270,7 @@ private static class HierarchyValuesSource extends ValuesSource.Bytes {
private final int maxDepth;
private final boolean twoSepAsOne;
- private HierarchyValuesSource(ValuesSource values, BytesRef separator, int minDepth, int maxDepth, boolean twoSepAsOne){
+ private HierarchyValuesSource(ValuesSource values, BytesRef separator, int minDepth, int maxDepth, boolean twoSepAsOne) {
this.values = values;
this.separator = separator;
this.minDepth = minDepth;
@@ -256,4 +285,3 @@ public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOExc
}
}
-
diff --git a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathSortedTree.java b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathSortedTree.java
index f9d7e76..3842a72 100644
--- a/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathSortedTree.java
+++ b/src/main/java/org/opendatasoft/elasticsearch/search/aggregations/bucket/PathSortedTree.java
@@ -1,6 +1,5 @@
package org.opendatasoft.elasticsearch.search.aggregations.bucket;
-
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Iterator;
@@ -9,7 +8,7 @@
import java.util.PriorityQueue;
import java.util.Stack;
-public class PathSortedTree implements Iterable{
+public class PathSortedTree implements Iterable {
private Comparator super T> comparator;
private Node root;
@@ -45,20 +44,19 @@ public void add(K[] path, T element) {
if (newChild) {
Node newNode = new Node<>(k, comparator, element, currentNode);
currentNode.children.add(newNode);
- fullSize ++;
+ fullSize++;
break;
}
}
}
-
public List getAsList() {
List result = new ArrayList<>(fullSize);
Iterator iterator = consumer();
- while (iterator.hasNext()){
+ while (iterator.hasNext()) {
result.add(iterator.next());
}
return result;
@@ -68,7 +66,6 @@ public Iterator consumer() {
return new PathSortedTreeConsumer(root, fullSize);
}
-
@Override
public Iterator iterator() {
return new PathSortedTreeIterator(root);
@@ -93,7 +90,6 @@ Comparator> getComparator(Comparator super T> comparator) {
return (n1, n2) -> comparator.compare(n1.data, n2.data);
}
-
public Node(K key, Comparator super T> comparator, T data, Node parent) {
this.key = key;
this.data = data;
@@ -122,11 +118,11 @@ public T next() {
Node nextNode = current.next();
- if (! nextNode.children.isEmpty()) {
+ if (!nextNode.children.isEmpty()) {
iterators.push(current);
current = nextNode.children.iterator();
- } else if (! current.hasNext()){
- while (! iterators.empty()) {
+ } else if (!current.hasNext()) {
+ while (!iterators.empty()) {
current = iterators.pop();
if (current.hasNext()) {
break;
@@ -153,7 +149,7 @@ private class PathSortedTreeConsumer implements Iterator {
@Override
public boolean hasNext() {
- if (size >=0 && currentSize >= size) {
+ if (size >= 0 && currentSize >= size) {
return false;
}
if (cursor.children.size() > 0) {
@@ -177,8 +173,8 @@ public T next() {
}
}
if (nextNode == null) throw new NoSuchElementException();
- currentSize ++;
- fullSize --;
+ currentSize++;
+ fullSize--;
cursor = nextNode;
return nextNode.data;
diff --git a/src/test/java/org/opendatasoft/elasticsearch/PathHierarchyTests.java b/src/test/java/org/opendatasoft/elasticsearch/PathHierarchyTests.java
index 7ee12d0..01f905e 100644
--- a/src/test/java/org/opendatasoft/elasticsearch/PathHierarchyTests.java
+++ b/src/test/java/org/opendatasoft/elasticsearch/PathHierarchyTests.java
@@ -1,28 +1,28 @@
package org.opendatasoft.elasticsearch;
-import org.elasticsearch.xcontent.XContentParser;
-import org.elasticsearch.xcontent.json.JsonXContent;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xcontent.XContentParser;
+import org.elasticsearch.xcontent.json.JsonXContent;
import org.opendatasoft.elasticsearch.search.aggregations.bucket.PathHierarchyAggregationBuilder;
public class PathHierarchyTests extends ESTestCase {
public void testParser() throws Exception {
// can create the factory with utf8 separator
String separator = "夢";
- XContentParser stParser = createParser(JsonXContent.jsonXContent,
- "{\"field\":\"path\", \"separator\": \"" + separator + "\"}");
+ XContentParser stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"path\", \"separator\": \"" + separator + "\"}");
XContentParser.Token token = stParser.nextToken();
assertSame(XContentParser.Token.START_OBJECT, token);
assertNotNull(PathHierarchyAggregationBuilder.parse("path_hierarchy", stParser));
// can create the factory with an array of orders
String orders = "[{\"_key\": \"asc\"}, {\"_count\": \"desc\"}]";
- stParser = createParser(JsonXContent.jsonXContent,
- "{\"field\":\"path\", \"order\": " + orders + "}");
+ stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"path\", \"order\": " + orders + "}");
assertNotNull(PathHierarchyAggregationBuilder.parse("path_hierarchy", stParser));
- stParser = createParser(JsonXContent.jsonXContent,
- "{\"field\":\"path\", \"separator\":\"/\", \"order\": " + orders + ", \"min_depth\": 0, \"max_depth\": 3}");
+ stParser = createParser(
+ JsonXContent.jsonXContent,
+ "{\"field\":\"path\", \"separator\":\"/\", \"order\": " + orders + ", \"min_depth\": 0, \"max_depth\": 3}"
+ );
AggregationBuilder builder = PathHierarchyAggregationBuilder.parse("path_hierarchy", stParser);
assertNotNull(builder);
}
diff --git a/src/yamlRestTest/java/org/opendatasoft/elasticsearch/RestApiYamlIT.java b/src/yamlRestTest/java/org/opendatasoft/elasticsearch/RestApiYamlIT.java
index b454a26..2939acd 100644
--- a/src/yamlRestTest/java/org/opendatasoft/elasticsearch/RestApiYamlIT.java
+++ b/src/yamlRestTest/java/org/opendatasoft/elasticsearch/RestApiYamlIT.java
@@ -2,6 +2,7 @@
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
@@ -10,7 +11,7 @@
*/
public class RestApiYamlIT extends ESClientYamlSuiteTestCase {
- public RestApiYamlIT (@Name("yaml") ClientYamlTestCandidate testCandidate) {
+ public RestApiYamlIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
diff --git a/src/yamlRestTest/resources/rest-api-spec/test/PathHierarchy/10_basic.yml b/src/yamlRestTest/resources/rest-api-spec/test/PathHierarchy/10_basic.yml
index e190d87..4acd545 100644
--- a/src/yamlRestTest/resources/rest-api-spec/test/PathHierarchy/10_basic.yml
+++ b/src/yamlRestTest/resources/rest-api-spec/test/PathHierarchy/10_basic.yml
@@ -8,3 +8,8 @@
nodes.info: {}
- match: {nodes.$master.plugins.0.name: pathhierarchy-aggregation}
+
+ - do:
+ info: {}
+
+ - match: { cluster_name: "yamlRestTest" }