diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..5f4b87bc --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,218 @@ +name: ci + +on: + push: + branches: + - master + - dev + tags: + - '*' + pull_request: + branches: + - master + - dev + types: + - opened + - edited + - reopened + - synchronize + - ready_for_review + - unlocked + - review_requested + workflow_dispatch: + +env: + DOCKER_REGISTRY_REPOSITORY: sysflowtelemetry/sf-processor + PLUGIN_BUILDER_DOCKER_REGISTRY_REPOSITORY: sysflowtelemetry/plugin-builder + GH_ORGANIZATION: sysflow-telemetry + +jobs: + lint-core: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: 1.19 + - name: Lint core module + uses: golangci/golangci-lint-action@v3 + with: + version: v1.51.1 + working-directory: core + args: --disable=errcheck --build-tags=flatrecord + lint-driver: + needs: lint-core + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: 1.19 + - name: Lint driver module + uses: golangci/golangci-lint-action@v3 + with: + version: v1.51.1 + working-directory: driver + args: --disable=errcheck + docker: + needs: lint-driver + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Read manifest + shell: bash + run: | + echo "UBI_VERSION=$(awk -F'=' '/UBI_VERSION/{print $2}' makefile.manifest.inc)" >> $GITHUB_ENV + - name: Docker meta + id: meta_builder + uses: docker/metadata-action@v4 + with: + images: | + ${{ env.PLUGIN_BUILDER_DOCKER_REGISTRY_REPOSITORY }} + ghcr.io/${{ env.GH_ORGANIZATION }}/plugin-builder + tags: | + type=edge,branch=master + type=ref,event=branch + type=match,pattern=^\d.\d.\d$ + type=ref,event=pr + type=sha,prefix= + type=sha,format=long,prefix= + labels: | + org.opencontainers.image.documentation=https://sysflow.readthedocs.io/ + org.opencontainers.image.vendor=SysFlow + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: | + ${{ env.DOCKER_REGISTRY_REPOSITORY }} + ghcr.io/${{ github.repository }} + tags: | + type=edge,branch=master + type=ref,event=branch + type=match,pattern=^\d.\d.\d$ + type=ref,event=pr + type=sha,prefix= + type=sha,format=long,prefix= + labels: | + org.opencontainers.image.documentation=https://sysflow.readthedocs.io/ + org.opencontainers.image.vendor=SysFlow + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Login to DockerHub + uses: docker/login-action@v2 + if: ${{ github.event_name != 'pull_request' }} + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + if: ${{ github.event_name != 'pull_request' }} + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Set build args + id: args + shell: bash + run: | + echo "UBI_VERSION=$(awk -F'=' '/UBI_VERSION/{print $2}' makefile.manifest.inc)" >> $GITHUB_ENV + echo "BRANCH=$(echo ${GITHUB_REF##refs/*/})" >> $GITHUB_ENV + echo "SHA_SHORT=$(git rev-parse --short HEAD)" >> $GITHUB_ENV + - name: Build and push plugin builder image + id: docker_plugin_builder + uses: docker/build-push-action@v3 + with: + push: ${{ github.event_name != 'pull_request' }} + target: base + tags: "${{ steps.meta_builder.outputs.tags }}" + labels: "${{ steps.meta_builder.outputs.labels }}" + build-args: | + BUILD_NUMBER=${{ env.SHA_SHORT }} + VERSION=${{ env.BRANCH }} + RELEASE=${{ env.SHA_SHORT }} + UBI_VER=${{ env.UBI_VERSION }} + - name: Build and push + id: docker_build + uses: docker/build-push-action@v3 + with: + push: ${{ github.event_name != 'pull_request' }} + tags: "${{ steps.meta.outputs.tags }}" + labels: "${{ steps.meta.outputs.labels }}" + cache-from: type=registry,ref=ghcr.io/${{ env.GH_ORGANIZATION }}/plugin-builder:${{ env.SHA_SHORT }} + build-args: | + BUILD_NUMBER=${{ env.SHA_SHORT }} + VERSION=${{ env.BRANCH }} + RELEASE=${{ env.SHA_SHORT }} + UBI_VER=${{ env.UBI_VERSION }} + - name: push README to Dockerhub + uses: christian-korneck/update-container-description-action@v1 + if: ${{ github.ref == 'refs/heads/master' && github.event_name != 'pull_request' }} + env: + DOCKER_USER: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKER_PASS: ${{ secrets.DOCKERHUB_TOKEN }} + with: + destination_container_repo: ${{ env.DOCKER_REGISTRY_REPOSITORY }} + provider: dockerhub + readme_file: "README.md" + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} + release: + needs: docker + if: startsWith(github.ref, 'refs/tags/') + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Setup cmake + uses: jwlawson/actions-setup-cmake@v1.12 + with: + cmake-version: '3.16.x' + - name: Get version from tag + id: tag_name + shell: bash + run: | + GHREF=${GITHUB_REF#refs/tags/}; echo "CURRENT_VERSION=${GHREF%%-*}" >> $GITHUB_ENV + echo "CURRENT_TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + echo "SHA_SHORT=$(git rev-parse --short HEAD)" >> $GITHUB_ENV + - name: Build Release Assets + id: build_release_assets + shell: bash + run: | + docker pull ${{ env.DOCKER_REGISTRY_REPOSITORY }}:${{ env.SHA_SHORT }} + docker tag ${{ env.DOCKER_REGISTRY_REPOSITORY }}:${{ env.SHA_SHORT }} ${{ env.DOCKER_REGISTRY_REPOSITORY }}:${{ env.CURRENT_TAG }} + make package + echo "DEB=$(ls ${{ github.workspace }}/scripts/cpack/*.deb)" >> $GITHUB_ENV + echo "RPM=$(ls ${{ github.workspace }}/scripts/cpack/*.rpm)" >> $GITHUB_ENV + echo "TARGZ=$(ls ${{ github.workspace }}/scripts/cpack/*.tar.gz)" >> $GITHUB_ENV + - name: Get Changelog Entry + id: changelog_reader + uses: mindsers/changelog-reader-action@v2 + with: + version: ${{ env.CURRENT_VERSION }} + path: ./CHANGELOG.md + - name: Release + uses: softprops/action-gh-release@v1 + with: + body: ${{ steps.changelog_reader.outputs.changes }} + token: ${{ secrets.GITHUB_TOKEN }} + prerelease: ( contains(env.CURRENT_VERSION, '-rc') || contains(env.CURRENT_VERSION, '-alpha') || contains(env.CURRENT_VERSION, '-beta') ) + draft: true + files: | + ${{ env.DEB }} + ${{ env.RPM }} + ${{ env.TARGZ }} diff --git a/.gitignore b/.gitignore index 732ce25c..4c38407e 100644 --- a/.gitignore +++ b/.gitignore @@ -22,12 +22,21 @@ .antlr driver/driver driver/sfprocessor +driver/test.sh +driver/*.perf +driver/*.pprof *.pdf *.prof *.vlog *.local.json *.DS_Store +**/goda # Manifest driver/manifest/manifest.go -driver/manifest/manifest.gobak \ No newline at end of file +driver/manifest/manifest.gobak + +# Cpack +scripts/cpack/_CPack_Packages/ +scripts/cpack/build/ +scripts/cpack/sfprocessor* diff --git a/CHANGELOG.md b/CHANGELOG.md index 08effdf8..1878482d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,10 +13,231 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). > - **Fixed**: for any bug fixes. > - **Security**: in case of vulnerabilities. -## [[UNRELEASED](https://github.com/sysflow-telemetry/sf-collector/compare/0.1.0...HEAD)] +## [Unreleased] + +## [0.6.3] - 2024-04-22 + +### Changed + +- No changes (matching release) + +## [0.6.2] - 2024-04-22 + +### Changed + +- Bumped UBI version to 9.3-1610 + +## [0.6.1] - 2024-02-23 + +### Fixed + +- Fix double call to StartWorkers() function when policy are reloaded + +## [0.6.0] - 2023-11-28 + +### Added + +- Experimental Sigma rules support +- Multi-language rules engine support + +### Changed + +- Bumped UBI version to 9.3-1361.1699548029 +- Bump sf-apis to 0.6.0 + +### Security + +- CVE-2022-41723: golang.org/x/net Uncontrolled Resource Consumption (updated to 0.7.0) +- CVE-2022-27664: golang.org/x/net/http2 Denial of Service vulnerability (updated to 0.0.0-20220906165146-f3363e06e74c) +- CVE-2022-32149: Denial of service in golang.org/x/text/language (updated to 0.3.8) +- CVE-2022-41721: golang.org/x/net/http2/h2c vulnerable to request smuggling attack (updated to 0.1.1-0.20221104162952-702349b0e862) +- CVE-2022-28948: gopkg.in/yaml.v3 Denial of Service (updated to 3.0.0-20220521103104-8f96da9f5d5e) + +## [0.5.1] - 2023-06-07 + +### Added + +- Add multi-driver support + +### Changed + +- Bumped UBI version to 8.8-854 +- Bump sf-apis to 0.5.1 + +### Fixed + +- Fix off-by-1 JSON ports encoding +- Add correct formatting to mapPortList in JSON output + +## [0.5.0] - 2022-10-17 + +### Added + +- Add support for k8s pod and event objects +- Add jsonpath expression support for policy engine + +### Changed + +- Bumped UBI version to 8.6-943.1665521450 + +### Fixed + +- Fix bug in exists predicate +- Fix `open_read` and `open_write` macros in ttps.yaml + +## [0.4.4] - 2022-08-01 + +### Added + +- Add rate limiting filter with time decaying + +### Changed + +- Bump UBI to 8.6-855 +- Update reference to sf-apis + +### Fixed + +- Fix exists predicate +- Fix handling of integers and booleans in MatStr function + +## [0.4.3] - 2022-06-21 + +### Changed + +- Update systemd service to include plugindir argument + +## [0.4.2] - 2022-06-10 + +### Changed + +- Add missing host field to ECS encoder + +## [0.4.1] - 2022-05-26 + +### Changed + +- Bumped UBI version to 8.6-754 +- Removed binary package's dkms requirement + +## [0.4.0] - 2022-02-18 + +### Added + +- Support for pluggable actions for policy engine +- Support for asynchonous policy engine with thread pooling +- Packaging in deb, rpm, and targz formats +- Added 14 new MITRE TTP tagging rules +- Added support for quiet logging mode +- Added plugin builder image to support plugin development and releases + +### Changed + +- Added contextual sysflow structure, removed global cache and cache synchronization primitives; refactored handler interface +- Changed cache keys to OID types +- BREAKING Changed policy engine modes and action verbs (update policy yaml rule declarations to remove `action` attribute if used with `alert` or `tag` verbs) + - `alert` and `enrich` are now policy engine modes, and `action` in policy rule declaration is now used for calling action handling plugins +- Updated the short union strings from gogen-avro +- Updated CI to automate packaging or release assets with release notes +- Bump go version to go1.17.7 +- BREAKING Added support for architecture-dependent build (darwin, linux), due to [changes in go 1.17 net](https://github.com/golang/go/commit/e97d8eb027c0067f757860b6f766644de15941f2) package +- Updated findings short description formatting and name convention + +### Fixed + +- Fixed cache coherence and race condition when updating the cache in the processor plugin; splits the processor plugin into two plugins, reader (which builds the cache) and processor (only reads from cache) +- Fixed stream socket reader issue introduced with the upgrade to go 1.17 + +### Security + +- Updated IBM Findings SDK to fix [CVE-2020-26160](https://github.com/advisories/GHSA-w73w-5m7g-f7qc) + +## [0.3.1] - 2021-09-29 + +### Changed + +- Bumped UBI version to 8.4-211. + +## [0.3.0] - 2021-09-20 + +### Added + +- Support for pluggable export protocols +- Elastic Common Schema (ECS) export format and Elasticsearch integration +- Export to IBM Findings API +- MITRE ATT&CK ttp tagging policy +- Support for pipeline forking (tee feature) +- Custom S3 prefix to Findings exporter + +### Changed + +- Moved away from Dockerhub CI. +- Optimized JSON export +- Updated dependencies to latest `sf-apis` +- Updated sample policies +- Refactoring of processor and handling APIs + +### Fixed + +- Fixes bugs in policy engine related to lists containing quoted strings +- Fixes several issues in policy engine field mapping + +### Removed + +- Support for flat JSON schema + +## [0.2.2] - 2020-12-07 + +### Changed + +- Updated dependencies to latest `sf-apis`. + +## [0.2.1] - 2020-12-02 + +### Fixed + +- Fixes `sf.file.oid` and `sf.file.newoid` attribute mapping. + +## [0.2.0] - 2020-12-01 + +### Added + +- Adds lists and macro preprocessing to deal with usage before declarations in input policy language. +- Adds empty handling for process flow objects. +- Adds `endswith` binary operator to policy expression language. +- Added initial documentation. + +### Changed + +- Updates the grammar and intepreter to support falco policies. +- Several refactorings and performance optimizations in policy engine. +- Tuned filter policy for k8s clusters. + +### Fixed + +- Fixes module names and package paths. ## [0.1.0] - 2020-10-30 ### Added - First release of SysFlow Processor. + +[Unreleased]: https://github.com/sysflow-telemetry/sf-processor/compare/0.6.3...HEAD +[0.6.3]: https://github.com/sysflow-telemetry/sf-processor/compare/0.6.2...0.6.3 +[0.6.2]: https://github.com/sysflow-telemetry/sf-processor/compare/0.6.1...0.6.2 +[0.6.1]: https://github.com/sysflow-telemetry/sf-processor/compare/0.6.0...0.6.1 +[0.6.0]: https://github.com/sysflow-telemetry/sf-processor/compare/0.5.1...0.6.0 +[0.5.1]: https://github.com/sysflow-telemetry/sf-processor/compare/0.5.0...0.5.1 +[0.5.0]: https://github.com/sysflow-telemetry/sf-processor/compare/0.4.4...0.5.0 +[0.4.4]: https://github.com/sysflow-telemetry/sf-processor/compare/0.4.3...0.4.4 +[0.4.3]: https://github.com/sysflow-telemetry/sf-processor/compare/0.4.2...0.4.3 +[0.4.2]: https://github.com/sysflow-telemetry/sf-processor/compare/0.4.1...0.4.2 +[0.4.1]: https://github.com/sysflow-telemetry/sf-processor/compare/0.4.0...0.4.1 +[0.4.0]: https://github.com/sysflow-telemetry/sf-processor/compare/0.3.1...0.4.0 +[0.3.1]: https://github.com/sysflow-telemetry/sf-processor/compare/0.2.2...0.3.1 +[0.3.0]: https://github.com/sysflow-telemetry/sf-processor/compare/0.2.2...0.3.0 +[0.2.2]: https://github.com/sysflow-telemetry/sf-processor/compare/0.2.1...0.2.2 +[0.2.1]: https://github.com/sysflow-telemetry/sf-processor/compare/0.2.0...0.2.1 +[0.2.0]: https://github.com/sysflow-telemetry/sf-processor/compare/0.1.0...0.2.0 +[0.1.0]: https://github.com/sysflow-telemetry/sf-processor/releases/tag/0.1.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5289ee3d..ad1c973a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ ## Contributing in general -Our project welcomes external contributions. A good way to familiarize yourself with the codebase and the contribution process is to look for and address issues in the [issue tracker](https://github.com/sysflow-telemetry/sf-docs/issues). +Our project welcomes external contributions. A good way to familiarize yourself with the codebase and the contribution process is to look for and address issues in the [issue tracker](https://github.com/sysflow-telemetry/sysflow/issues). To contribute code or documentation, please submit a [pull request](https://github.com/sysflow-telemetry/sf-processor/pulls); and please quickly [get in touch](#communication) with us before embarking on a more ambitious contribution. @@ -10,13 +10,13 @@ cannot be accepted at all! ### Proposing new features -If you would like to implement a new feature, please [raise an issue](https://github.com/sysflow-telemetry/sf-docs/issues) +If you would like to implement a new feature, please [raise an issue](https://github.com/sysflow-telemetry/sysflow/issues) before sending a pull request so that the proposed feature can be discussed first. This is to avoid putting an effort on a feature that the project developers would not be able to accept into the code base. ### Fixing bugs -If you would like to fix a bug, please [raise an issue](https://github.com/sysflow-telemetry/sf-docs/issues) before sending a +If you would like to fix a bug, please [raise an issue](https://github.com/sysflow-telemetry/sysflow/issues) before sending a pull request so that the bug fix can be tracked properly. ### Merge approval @@ -78,4 +78,4 @@ on how to build the application, dependencies, and how to test the collector. TBD ## Coding style guidelines -We follow the [Golang coding standards](https://golang.org/doc/effective_go.html) in this project. You can use the go compiler or your IDE of choice to automatically lint your code. \ No newline at end of file +We follow the [Golang coding standards](https://golang.org/doc/effective_go.html) in this project. You can use the go compiler or your IDE of choice to automatically lint your code. diff --git a/Dockerfile b/Dockerfile index b5df8f0d..2a0b1162 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,27 @@ # -# Copyright (C) 2020 IBM Corporation. +# Copyright (C) 2022 IBM Corporation. # # Authors: # Frederico Araujo # Teryl Taylor +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. #----------------------- # Stage: base #----------------------- -FROM registry.access.redhat.com/ubi8/ubi:8.1-406 as base +ARG UBI_VER +FROM registry.access.redhat.com/ubi9/ubi:${UBI_VER} AS base # Environment and build args ARG VERSION=dev @@ -19,14 +32,14 @@ ENV PATH=$PATH:/usr/local/go/bin/ ENV GOPATH=/go/ -ENV SRC_ROOT=/go/src/github.ibm.com/sysflow/sf-processor/ +ENV SRC_ROOT=/go/src/github.com/sysflow-telemetry/sf-processor/ # Install dependencies RUN dnf update -y --disableplugin=subscription-manager && \ dnf install -y --disableplugin=subscription-manager wget gcc make git device-mapper-devel -RUN wget https://dl.google.com/go/go1.14.2.linux-amd64.tar.gz && \ - tar -C /usr/local -xzf go1.14.2.linux-amd64.tar.gz && mkdir -p $SRC_ROOT +RUN wget https://go.dev/dl/go1.19.4.linux-amd64.tar.gz && \ + tar -C /usr/local -xzf go1.19.4.linux-amd64.tar.gz && mkdir -p $SRC_ROOT # Copy sources COPY core ${SRC_ROOT}core @@ -45,13 +58,15 @@ RUN cd ${SRC_ROOT} && \ #----------------------- # Stage: runtime #----------------------- -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.2-267 AS runtime +FROM registry.access.redhat.com/ubi9/ubi:${UBI_VER} AS runtime # Environment and build args ARG VERSION=dev ARG RELEASE=dev +ARG DOCKER_GID + ARG inputpath=/sock/sysflow.sock ENV INPUT_PATH=$inputpath @@ -72,7 +87,8 @@ ENV CONFIG_PATH=$configpath # Image labels LABEL "name"="SysFlow Processor" -LABEL "vendor"="IBM" +LABEL "maintainer"="The SysFlow team" +LABEL "vendor"="SysFlow" LABEL "version"="${VERSION}" LABEL "release"="${RELEASE}" LABEL "summary"="SysFlow Processor implements a pluggable stream-processing pipeline and contains a built-in policy engine that evaluates rules on the ingested SysFlow stream" @@ -83,8 +99,19 @@ LABEL "io.k8s.description"="SysFlow Processor implements a pluggable stream-proc # Update license COPY ./LICENSE.md /licenses/ +# Fix plugin load error +RUN ln -s /lib64/libdevmapper.so.1.02 /lib64/libdevmapper.so.1.02.1 + +# Add user +RUN useradd -u 1001 sysflow + # Copy files from previous stage -COPY --from=base /usr/local/sysflow/ /usr/local/sysflow/ +COPY --from=base --chown=sysflow:sysflow /usr/local/sysflow/ /usr/local/sysflow/ +RUN dnf -y update && \ + ( dnf -y clean all ; rm -rf /var/cache/{dnf,yum} ; true ) && \ + mkdir -p /sock && chown -R sysflow:sysflow /sock +VOLUME /sock +USER sysflow # Entrypoint CMD /usr/local/sysflow/bin/sfprocessor \ diff --git a/LICENSE.md b/LICENSE.md index f0601d5f..f3e54878 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -``` + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -175,4 +175,28 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS -``` \ No newline at end of file + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 The Falco Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/Makefile b/Makefile index ec6dc433..3f063274 100644 --- a/Makefile +++ b/Makefile @@ -10,37 +10,52 @@ include ./makefile.manifest.inc # Basic go commands +PATH=$(shell printenv PATH):/usr/local/go/bin GOCMD=go -GOBUILD=$(GOCMD) build -tags exclude_graphdriver_btrfs +GOBUILD=$(GOCMD) build -trimpath -tags "exclude_graphdriver_btrfs flatrecord" GOCLEAN=$(GOCMD) clean -GOTEST=$(GOCMD) test -GOGET=$(GOCMD) get -tags exclude_graphdriver_btrfs +GOTEST=$(GOCMD) test -tags "exclude_graphdriver_btrfs flatrecord" +GOGET=$(GOCMD) get -tags "exclude_graphdriver_btrfs flatrecord" BIN=sfprocessor OUTPUT=$(BIN) SRC=./driver +PACKDIR=./scripts/cpack +INSTALL_PATH=/usr/local/sysflow .PHONY: build build: version deps cd $(SRC) && $(GOBUILD) -o $(OUTPUT) -v +.PHONY: package +package: + docker run --rm --user $(id -u):$(id -g) --group-add users --entrypoint=/bin/bash \ + -v $(shell pwd)/scripts:$(INSTALL_PATH)/scripts \ + -v $(shell pwd)/resources:$(INSTALL_PATH)/resources \ + -v $(shell pwd)/LICENSE.md:$(INSTALL_PATH)/LICENSE.md \ + -v $(shell pwd)/README.md:$(INSTALL_PATH)/README.md \ + sysflowtelemetry/sf-processor:${SYSFLOW_VERSION} -- $(INSTALL_PATH)/scripts/cpack/prepackage.sh + cd scripts/cpack && export SYSFLOW_VERSION=$(SYSFLOW_VERSION) && cpack --config ./CPackConfig.cmake + .PHONY: deps deps: - cd $(SRC) && $(GOGET) ./... + cd $(SRC) && $(GOGET) ./... .PHONY: version version: cp $(SRC)/manifest/manifest.go.in $(SRC)/manifest/manifest.go - sed -ibak -e "s/SYSFLOW_VERSION/$(SYSFLOW_VERSION)/" -e "s/\"JSON_SCHEMA_VERSION\"/$(SYSFLOW_JSON_SCHEMA_VERSION)/" -e "s/BUILD_NUMBER/$(SYSFLOW_BUILD_NUMBER)/" $(SRC)/manifest/manifest.go + sed -ibak -e "s/SYSFLOW_VERSION/$(SYSFLOW_VERSION)/" -e "s/\"JSON_SCHEMA_VERSION\"/$(SYSFLOW_JSON_SCHEMA_VERSION)/" -e "s/BUILD_NUMBER/$(SYSFLOW_BUILD_NUMBER)/" -e "s/ECS_VERSION/$(SYSFLOW_ECS_VERSION)/" $(SRC)/manifest/manifest.go rm -f $(SRC)/manifest/manifest.gobak .PHONY: test test: - cd $(SRC) && $(GOTEST) -v ./... + cd $(SRC) && $(GOTEST) ./... .PHONY: clean clean: cd $(SRC) && $(GOCLEAN) rm -f $(SRC)/$(BIN) + rm -f $(SRC)/manifest/manifest.go + cd $(PACKDIR) && ./clean.sh .PHONY: install install: build @@ -50,8 +65,12 @@ install: build cp ./resources/policies/distribution/* /usr/local/sysflow/resources/policies/ .PHONY: docker-build -docker-build: build - docker build -t sf-processor --target=runtime -f Dockerfile . +docker-build: docker-plugin-builder + ( DOCKER_BUILDKIT=1 docker build --cache-from=sysflowtelemetry/plugin-builder:${SYSFLOW_VERSION} -t sysflowtelemetry/sf-processor:${SYSFLOW_VERSION} --build-arg UBI_VER=$(UBI_VERSION) --target=runtime -f Dockerfile . ) + +.PHONY: docker-plugin-builder +docker-plugin-builder: + ( DOCKER_BUILDKIT=1 docker build -t sysflowtelemetry/plugin-builder:${SYSFLOW_VERSION} --build-arg UBI_VER=$(UBI_VERSION) --target=base -f Dockerfile . ) .PHONY: pull pull: @@ -61,3 +80,11 @@ pull: up: sudo docker-compose -f docker-compose.yml up +.PHONY: plugins +plugins: + @for dir in `find plugins -type d`; do \ + if [ -f $${dir}/Makefile ]; then \ + $(MAKE) -C $${dir}; \ + fi; \ + done + diff --git a/README.md b/README.md index 66dc93cd..982c7751 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/sysflowtelemetry/sf-processor)](https://hub.docker.com/r/sysflowtelemetry/sf-processor/builds) +[![Build Status](https://img.shields.io/github/actions/workflow/status/sysflow-telemetry/sf-processor/ci.yaml?branch=master)](https://github.com/sysflow-telemetry/sf-processor/actions) [![Docker Pulls](https://img.shields.io/docker/pulls/sysflowtelemetry/sf-processor)](https://hub.docker.com/r/sysflowtelemetry/sf-processor) ![GitHub tag (latest by date)](https://img.shields.io/github/v/tag/sysflow-telemetry/sf-processor) [![Documentation Status](https://readthedocs.org/projects/sysflow/badge/?version=latest)](https://sysflow.readthedocs.io/en/latest/?badge=latest) @@ -6,91 +6,67 @@ # Supported tags and respective `Dockerfile` links -- [`0.1.0`](https://github.com/sysflow-telemetry/sf-processor/blob/0.1.0/Dockerfile), [`latest`](https://github.com/sysflow-telemetry/sf-processor/blob/master/Dockerfile) +- [`0.6.3`, `latest`](https://github.com/sysflow-telemetry/sf-processor/blob/0.6.3/Dockerfile), [`edge`](https://github.com/sysflow-telemetry/sf-processor/blob/master/Dockerfile), [`dev`](https://github.com/sysflow-telemetry/sf-processor/blob/dev/Dockerfile) # Quick reference -- **Documentation**: +- **Documentation**: [the SysFlow Documentation](https://sysflow.readthedocs.io) - -- **Where to get help**: + +- **Where to get help**: [the SysFlow Community Slack](https://join.slack.com/t/sysflow-telemetry/shared_invite/enQtODA5OTA3NjE0MTAzLTlkMGJlZDQzYTc3MzhjMzUwNDExNmYyNWY0NWIwODNjYmRhYWEwNGU0ZmFkNGQ2NzVmYjYxMWFjYTM1MzA5YWQ) -- **Where to file issues**: - [the github issue tracker](https://github.com/sysflow-telemetry/sf-docs/issues) (include the `sf-processor` tag) +- **Where to file issues**: + [the github issue tracker](https://github.com/sysflow-telemetry/sysflow/issues) (include the `sf-processor` tag) -- **Source of this description**: +- **Source of this description**: [repo's readme](https://github.com/sysflow-telemetry/sf-processor/edit/master/README.md) ([history](https://github.com/sysflow-telemetry/sf-processor/commits/master)) +- **Docker images**: + [docker hub](https://hub.docker.com/u/sysflowtelemetry) | [GHCR](https://github.com/orgs/sysflow-telemetry/packages) + +- **Binary packages**: + [deb](https://github.com/sysflow-telemetry/sf-processor/releases/tag/0.6.3/sfprocessor-0.6.3-x86_64.deb) | [rpm](https://github.com/sysflow-telemetry/sf-processor/releases/tag/0.6.3/sfprocessor-0.6.3-x86_64.rpm) | [tgz](https://github.com/sysflow-telemetry/sf-processor/releases/tag/0.6.3/sfprocessor-0.6.3-x86_64.tar.gz) + # What is SysFlow? -The SysFlow Telemetry Pipeline is a framework for monitoring cloud workloads and for creating performance and security analytics. The goal of this project is to build all the plumbing required for system telemetry so that users can focus on writing and sharing analytics on a scalable, common open-source platform. The backbone of the telemetry pipeline is a new data format called SysFlow, which lifts raw system event information into an abstraction that describes process behaviors, and their relationships with containers, files, and network. This object-relational format is highly compact, yet it provides broad visibility into container clouds. We have also built several APIs that allow users to process SysFlow with their favorite toolkits. Learn more about SysFlow in the [SysFlow specification document](https://sysflow.readthedocs.io/en/latest/spec.html). +The SysFlow Telemetry Pipeline is a framework for monitoring cloud workloads and for creating performance and security analytics. The goal of this project is to build all the plumbing required for system telemetry so that users can focus on writing and sharing analytics on a scalable, common open-source platform. The backbone of the telemetry pipeline is a new data format called SysFlow, which lifts raw system event information into an abstraction that describes process behaviors, and their relationships with containers, files, and network. This object-relational format is highly compact, yet it provides broad visibility into container clouds. We have also built several APIs that allow users to process SysFlow with their favorite toolkits. Learn more about SysFlow in the [SysFlow documentation](https://sysflow.readthedocs.io). + +The SysFlow framework consists of the following sub-projects: + +- [sf-apis](https://github.com/sysflow-telemetry/sf-apis) provides the SysFlow schema and programatic APIs in go, python, and C++. +- [sf-collector](https://github.com/sysflow-telemetry/sf-collector) monitors and collects system call and event information from hosts and exports them in the SysFlow format using Apache Avro object serialization. +- [sf-processor](https://github.com/sysflow-telemetry/sf-processor) provides a performance optimized policy engine for processing, enriching, filtering SysFlow events, generating alerts, and exporting the processed data to various targets. +- [sf-exporter](https://github.com/sysflow-telemetry/sf-exporter) exports SysFlow traces to S3-compliant storage systems for archival purposes. +- [sf-deployments](https://github.com/sysflow-telemetry/sf-deployments) contains deployment packages for SysFlow, including Docker, Helm, and OpenShift. +- [sysflow](https://github.com/sysflow-telemetry/sysflow) is the documentation repository and issue tracker for the SysFlow framework. # About this image -The SysFlow processor is a lighweight edge analytics pipeline that can process and enrich SysFlow data. The processor is written in golang, and allows users to build and configure various pipelines using a set of built-in and custom plugins and drivers. Pipeline plugins are producer-consumer objects that follow an interface and pass data to one another through pre-defined channels in a multi-threaded environment. By contrast, a driver represents a data source, which pushes data to the plugins. The processor currently supports two builtin drivers, including one that reads sysflow from a file, and another that reads streaming sysflow over a domain socket. Plugins and drivers are configured using a JSON file. +The SysFlow processor is a lighweight edge analytics pipeline that can process and enrich SysFlow data. The processor is written in golang, and allows users to build and configure various pipelines using a set of built-in and custom plugins and drivers. Pipeline plugins are producer-consumer objects that follow an interface and pass data to one another through pre-defined channels in a multi-threaded environment. By contrast, a driver represents a data source, which pushes data to the plugins. The processor currently supports two builtin drivers, including one that reads sysflow from a file, and another that reads streaming sysflow over a domain socket. Plugins and drivers are configured using a JSON file. Please check [Sysflow Processor](https://sysflow.readthedocs.io/en/latest/processor.html) for documentation on deployment and configuration options. # How to use this image ### Starting the processor -The easiest way to run the SysFlow Processor is using [docker-compose](https://github.com/sysflow-telemetry/sf-processor/edit/master/docker-compose.yml). The following compose file shows how to run sf-processor with processor events exported to rsyslog. - -```yaml -version: "3.5" -services: - sf-processor: - container_name: sf-processor - image: sysflowtelemetry/sf-processor:latest - privileged: true - volumes: - - socket-vol:/sock/ - environment: - DRIVER: socket - INPUT_PATH: /sock/sysflow.sock - POLICYENGINE_MODE: alert - EXPORTER_TYPE: telemetry - EXPORTER_SOURCE: ${HOSTNAME} - EXPORTER_EXPORT: terminal - EXPORTER_HOST: localhost - EXPORTER_PORT: 514 - sf-collector: - container_name: sf-collector - image: sysflowtelemetry/sf-collector:latest - depends_on: - - "sf-processor" - privileged: true - volumes: - - /var/run/docker.sock:/host/var/run/docker.sock - - /dev:/host/dev - - /proc:/host/proc:ro - - /boot:/host/boot:ro - - /lib/modules:/host/lib/modules:ro - - /usr:/host/usr:ro - - /mnt/data:/mnt/data - - socket-vol:/sock/ - - ./resources/traces:/tests/traces - environment: - EXPORTER_ID: local - NODE_IP: "127.0.0.1" - FILTER: "container.name!=sf-collector and container.name!=sf-processor" - INTERVAL: 300 - SOCK_FILE: /sock/sysflow.sock -volumes: - socket-vol: + +The easiest way to run the SysFlow Processor is by using [docker-compose](https://github.com/sysflow-telemetry/sf-deployments/tree/master/docker). The provided `docker-compose.processor.yml` file deploys the SysFlow processor and collector. The rsyslog endpoint should be configured in `./config/.env.processor`. Collector settings can be changed in `./config/.env.collector`. Additional settings can be configured directly in the compose file. + +```bash +docker-compose -f docker-compose.processor.yml up ``` -Instructions for `docker-compose` and `helm` deployments are available in [here](https://sysflow.readthedocs.io/en/latest/deploy.html). +Instructions for `docker-compose`, `helm`, and `oc operator` deployments are available [here](https://sysflow.readthedocs.io/en/latest/deploy.html). Alternatively, you can install the SysFlow Processor using its binary installers available in the release pages. -### Configuration + # License -View [license information](https://github.com/sysflow-telemetry/sf-exporter/blob/master/LICENSE.md) for the software contained in this image. +View [license information](https://github.com/sysflow-telemetry/sf-processor/blob/master/LICENSE.md) for the software contained in this image. As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc from the base distribution, along with any direct or indirect dependencies of the primary software being contained). diff --git a/core/cache/tables.go b/core/cache/tables.go index a38770fb..a027721c 100644 --- a/core/cache/tables.go +++ b/core/cache/tables.go @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,148 +17,134 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package cache implements a local cache for telemetry objects. package cache import ( - "fmt" - "sync" - - "github.com/cespare/xxhash" - cqueue "github.com/enriquebris/goconcurrentqueue" - cmap "github.com/orcaman/concurrent-map" - "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/sfgo" ) -const ( - cacheSize = 2 -) - -var instance *SFTables -var once sync.Once - // SFTables defines thread-safe shared cache for plugins for storing SysFlow entities. type SFTables struct { - contTable *cqueue.FIFO - procTable *cqueue.FIFO - fileTable *cqueue.FIFO - rwmutex sync.RWMutex - capacity int -} - -// GetInstance returns SFTables singleton instance -func GetInstance() *SFTables { - once.Do(func() { - instance = newSFTables(cacheSize) - }) - return instance + contTable map[string]*sfgo.Container + podTable map[string]*sfgo.Pod + // procTable map[uint64][]*sfgo.Process + // fileTable map[uint64]*sfgo.File + // ptreeTable map[uint64][]*sfgo.Process + procTable map[sfgo.OID][]*sfgo.Process + fileTable map[sfgo.FOID]*sfgo.File + ptreeTable map[sfgo.OID][]*sfgo.Process } -// newSFTables creates a new SFTables instance. -func newSFTables(capacity int) *SFTables { +// NewSFTables creates a new SFTables instance. +func NewSFTables() *SFTables { t := new(SFTables) - if capacity < 1 { - logger.Error.Println("Cache capacity must be greater than 1") - return nil - } - t.capacity = capacity - t.contTable = cqueue.NewFIFO() - t.procTable = cqueue.NewFIFO() - t.fileTable = cqueue.NewFIFO() - t.contTable.Enqueue(cmap.New()) - t.procTable.Enqueue(cmap.New()) - t.fileTable.Enqueue(cmap.New()) + t.new() return t } -// Reset pushes a new set of empty maps into the cache. -func (t *SFTables) Reset() { - t.rwmutex.Lock() - defer t.rwmutex.Unlock() - t.reset(t.contTable) - t.reset(t.procTable) - t.reset(t.fileTable) +func (t *SFTables) new() { + t.contTable = make(map[string]*sfgo.Container) + t.podTable = make(map[string]*sfgo.Pod) + t.procTable = make(map[sfgo.OID][]*sfgo.Process) + t.fileTable = make(map[sfgo.FOID]*sfgo.File) + t.ptreeTable = make(map[sfgo.OID][]*sfgo.Process) + // t.procTable = make(map[uint64][]*sfgo.Process) + // t.fileTable = make(map[uint64]*sfgo.File) + // t.ptreeTable = make(map[uint64][]*sfgo.Process) } -func (t *SFTables) reset(queue *cqueue.FIFO) { - queue.Enqueue(cmap.New()) - if queue.GetLen() > t.capacity { - queue.Remove(0) - } +// Reset pushes a new set of empty maps into the cache. +func (t *SFTables) Reset() { + t.new() } // GetCont retrieves a cached container object by ID. -func (t *SFTables) GetCont(ID string) *sfgo.Container { - t.rwmutex.RLock() - defer t.rwmutex.RUnlock() - for i := 0; i < t.contTable.GetLen(); i++ { - m, _ := t.contTable.Get(i) - table := m.(cmap.ConcurrentMap) - if v, ok := table.Get(ID); ok { - return v.(*sfgo.Container) - } - } - return nil +func (t *SFTables) GetCont(ID string) (co *sfgo.Container) { + co = t.contTable[ID] + return } // SetCont stores a container object in the cache. func (t *SFTables) SetCont(ID string, o *sfgo.Container) { - t.rwmutex.RLock() - m, _ := t.contTable.Get(t.contTable.GetLen() - 1) - t.rwmutex.RUnlock() - table := m.(cmap.ConcurrentMap) - table.Set(ID, o) + t.contTable[ID] = o +} + +// GetPod retrieves a cached pod object by ID. +func (t *SFTables) GetPod(ID string) (pd *sfgo.Pod) { + pd = t.podTable[ID] + return +} + +// SetPod stores a pod object in the cache. +func (t *SFTables) SetPod(ID string, o *sfgo.Pod) { + t.podTable[ID] = o } // GetProc retrieves a cached process object by ID. -func (t *SFTables) GetProc(ID sfgo.OID) *sfgo.Process { - t.rwmutex.RLock() - defer t.rwmutex.RUnlock() - for i := 0; i < t.procTable.GetLen(); i++ { - m, _ := t.procTable.Get(i) - table := m.(cmap.ConcurrentMap) - if v, ok := table.Get(t.getHash(ID)); ok { - return v.(*sfgo.Process) +func (t *SFTables) GetProc(ID sfgo.OID) (po *sfgo.Process) { + // if p, ok := t.procTable[hash.GetHash(ID)]; ok { + if p, ok := t.procTable[ID]; ok { + if v := p[sfgo.SFObjectStateMODIFIED]; v != nil { + po = v + } else if v := p[sfgo.SFObjectStateCREATED]; v != nil { + po = v + } else if v := p[sfgo.SFObjectStateREUP]; v != nil { + po = v } } - return nil + return } // SetProc stores a process object in the cache. func (t *SFTables) SetProc(ID sfgo.OID, o *sfgo.Process) { - t.rwmutex.RLock() - m, _ := t.procTable.Get(t.procTable.GetLen() - 1) - t.rwmutex.RUnlock() - table := m.(cmap.ConcurrentMap) - table.Set(t.getHash(ID), o) + // oID := hash.GetHash(ID) + oID := ID + if p, ok := t.procTable[oID]; ok { + p[o.State] = o + } else { + p = make([]*sfgo.Process, sfgo.SFObjectStateREUP+1) + p[o.State] = o + t.procTable[oID] = p + } } // GetFile retrieves a cached file object by ID. func (t *SFTables) GetFile(ID sfgo.FOID) *sfgo.File { - t.rwmutex.RLock() - defer t.rwmutex.RUnlock() - for i := 0; i < t.fileTable.GetLen(); i++ { - m, _ := t.fileTable.Get(i) - table := m.(cmap.ConcurrentMap) - if v, ok := table.Get(t.getHash(ID)); ok { - return v.(*sfgo.File) - } + // if v, ok := t.fileTable[hash.GetHash(ID)]; ok { + if v, ok := t.fileTable[ID]; ok { + return v } return nil } // SetFile stores a file object in the cache. func (t *SFTables) SetFile(ID sfgo.FOID, o *sfgo.File) { - t.rwmutex.RLock() - m, _ := t.fileTable.Get(t.fileTable.GetLen() - 1) - t.rwmutex.RUnlock() - table := m.(cmap.ConcurrentMap) - table.Set(t.getHash(ID), o) + t.fileTable[ID] = o + // t.fileTable[hash.GetHash(ID)] = o } -func (t *SFTables) getHash(o interface{}) string { - h := xxhash.New() - h.Write([]byte(fmt.Sprintf("%v", o))) - return fmt.Sprintf("%x", h.Sum(nil)) +// GetPtree retrieves and caches the processes hierachy given a process ID. +func (t *SFTables) GetPtree(ID sfgo.OID) []*sfgo.Process { + // oID := hash.GetHash(ID) + oID := ID + if ptree, ok := t.ptreeTable[oID]; ok { + return ptree + } + ptree := t.getProcProv(ID) + t.ptreeTable[oID] = ptree + return ptree +} + +// getProcProv builds the provenance tree of a process recursevely. +func (t *SFTables) getProcProv(ID sfgo.OID) []*sfgo.Process { + var ptree = make([]*sfgo.Process, 0) + if p := t.GetProc(ID); p != nil { + if p.Poid != nil && p.Poid.UnionType == sfgo.PoidUnionTypeEnumOID { + return append(append(ptree, p), t.getProcProv(*p.Poid.OID)...) + } + return append(ptree, p) + } + return ptree } diff --git a/core/exporter/commons/config.go b/core/exporter/commons/config.go new file mode 100644 index 00000000..3bacd613 --- /dev/null +++ b/core/exporter/commons/config.go @@ -0,0 +1,247 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package commons defines common facilities for exporters. +package commons + +import ( + "strconv" + + "github.com/sysflow-telemetry/sf-apis/go/secrets" +) + +// Configuration keys. +const ( + TransportConfigKey string = "export" + FormatConfigKey string = "format" + VaultEnabledConfigKey string = "vault.secrets" + VaultPathConfigKey string = "vault.path" + VaultEncodingConfigKey string = "vault.encoding" + EventBufferConfigKey string = "buffer" + VersionKey string = "version" + JSONSchemaVersionKey string = "jsonschemaversion" + EcsVersionKey string = "ecsversion" + BuildNumberKey string = "buildnumber" + ClusterIDKey string = "cluster.id" +) + +// Config defines a configuration object for the exporter. +type Config struct { + Transport Transport + Format Format + EventBuffer int + VaultEnabled bool + VaultMountPath string + VaultEncoding VaultEncoding + secrets *secrets.Secrets + Version string + JSONSchemaVersion string + EcsVersion string + BuildNumber string + ClusterID string + FileConfig + SyslogConfig + ESConfig + FindingsConfig +} + +// CreateConfig creates a new config object from config dictionary. +func CreateConfig(conf map[string]interface{}) (c Config, err error) { + c = Config{} + + // wrapper for reading from secrets vault + if v, ok := conf[VaultEnabledConfigKey].(string); ok && v == "true" { + c.VaultEnabled = true + if e, ok := conf[VaultEncodingConfigKey].(string); ok { + c.VaultEncoding = parseVaultEncodingConfig(e) + } + var s *secrets.Secrets + if p, ok := conf[VaultPathConfigKey].(string); ok { + s, err = secrets.NewSecretsWithCustomPath(p) + } else { + s, err = secrets.NewSecrets() + } + if err != nil { + return + } + c.secrets = s + } + + // parse config map + if v, ok := conf[TransportConfigKey].(string); ok { + c.Transport = parseTransportConfig(v) + } + if v, ok := conf[FormatConfigKey].(string); ok { + c.Format = parseFormatConfig(v) + } + if v, ok := conf[EventBufferConfigKey].(string); ok { + c.EventBuffer, err = strconv.Atoi(v) + if err != nil { + return c, err + } + } + if v, ok := conf[VersionKey].(string); ok { + c.Version = v + } + if v, ok := conf[JSONSchemaVersionKey].(string); ok { + c.JSONSchemaVersion = v + } + if v, ok := conf[EcsVersionKey].(string); ok { + c.EcsVersion = v + } + if v, ok := conf[BuildNumberKey].(string); ok { + c.BuildNumber = v + } + if v, ok := conf[ClusterIDKey].(string); ok { + c.ClusterID = v + } + + // parse specialized configs + c.FileConfig, err = CreateFileConfig(c, conf) + if err != nil { + return + } + c.SyslogConfig, err = CreateSyslogConfig(c, conf) + if err != nil { + return + } + c.ESConfig, err = CreateElasticConfig(c, conf) + if err != nil { + return + } + c.FindingsConfig, err = CreateFindingsConfig(c, conf) + + return +} + +// Transport type. +type Transport int + +// Transport config options. +const ( + StdOutTransport Transport = iota + FileTransport + SyslogTransport + ESTransport + FindingsTransport + NullTransport +) + +func (s Transport) String() string { + return [...]string{"terminal", "file", "syslog", "es", "findings", "null"}[s] +} + +func parseTransportConfig(s string) Transport { + if FileTransport.String() == s { + return FileTransport + } + if SyslogTransport.String() == s { + return SyslogTransport + } + if ESTransport.String() == s { + return ESTransport + } + if FindingsTransport.String() == s { + return FindingsTransport + } + if NullTransport.String() == s { + return NullTransport + } + return StdOutTransport +} + +// Format type. +type Format int + +// Format config options. +const ( + JSONFormat Format = iota // JSON schema + ECSFormat // Elastic Common Schema + OccurrenceFormat // IBM Findings Occurrence +) + +func (s Format) String() string { + return [...]string{"json", "ecs", "occurrence"}[s] +} + +func parseFormatConfig(s string) Format { + switch s { + case JSONFormat.String(): + return JSONFormat + case ECSFormat.String(): + return ECSFormat + case OccurrenceFormat.String(): + return OccurrenceFormat + } + return JSONFormat +} + +// Proto denotes protocol type. +type Proto int + +// Proto config options. +const ( + TCPProto Proto = iota + TCPTLSProto + UDPProto +) + +func (s Proto) String() string { + return [...]string{"tcp", "tls", "udp"}[s] +} + +func parseProtoConfig(s string) Proto { + switch s { + case TCPProto.String(): + return TCPProto + case TCPTLSProto.String(): + return TCPTLSProto + case UDPProto.String(): + return UDPProto + } + return TCPProto +} + +// VaultEncoding type. +type VaultEncoding int + +// VaultEncoding config options. +const ( + NoneVaultEncoding VaultEncoding = iota + Base64VaultEncoding +) + +func (s VaultEncoding) String() string { + return [...]string{"none", "base64"}[s] +} + +func parseVaultEncodingConfig(s string) VaultEncoding { + if NoneVaultEncoding.String() == s { + return NoneVaultEncoding + } + if Base64VaultEncoding.String() == s { + return Base64VaultEncoding + } + return NoneVaultEncoding +} + +// GetSecret obtains the secret for a key. +func (c Config) GetSecret(key string) (string, error) { + return [...]func(string) (string, error){c.secrets.Get, c.secrets.GetDecoded}[c.VaultEncoding](key) +} diff --git a/core/exporter/commons/elasticconfig.go b/core/exporter/commons/elasticconfig.go new file mode 100644 index 00000000..db9fca4f --- /dev/null +++ b/core/exporter/commons/elasticconfig.go @@ -0,0 +1,103 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package commons defines common facilities for exporters. +package commons + +import ( + "strconv" + "strings" + "time" +) + +// Configuration keys. +const ( + ESAddressesConfigKey string = "es.addresses" + ESIndexConfigKey string = "es.index" + ESUsernameConfigKey string = "es.username" + ESPasswordConfigKey string = "es.password" + ESWorkersConfigKey string = "es.bulk.numWorkers" + ESFBufferConfigKey string = "es.bulk.flushBuffer" + ESFTimeoutConfigKey string = "es.bulk.flushTimeout" +) + +// ESConfig holds Elastic specific configuration. +type ESConfig struct { + ESAddresses []string + ESIndex string + ESUsername string + ESPassword string + ESNumWorkers int + ESFlushBuffer int + ESFlushTimeout time.Duration +} + +// CreateElasticConfig creates a new config object from config dictionary. +func CreateElasticConfig(bc Config, conf map[string]interface{}) (c ESConfig, err error) { + // default values + c = ESConfig{ + ESNumWorkers: 0, + ESFlushBuffer: 5e+6, + ESFlushTimeout: 30 * time.Second} + + // parse config map + if v, ok := conf[ESAddressesConfigKey].(string); ok { + c.ESAddresses = strings.Split(v, ",") + } + if v, ok := conf[ESIndexConfigKey].(string); ok { + c.ESIndex = v + } + if v, ok := conf[ESUsernameConfigKey].(string); ok { + c.ESUsername = v + } else if bc.VaultEnabled && bc.Transport == ESTransport { + s, err := bc.GetSecret(ESUsernameConfigKey) + if err != nil { + return c, err + } + c.ESUsername = string(s) + } + if v, ok := conf[ESPasswordConfigKey].(string); ok { + c.ESPassword = v + } else if bc.VaultEnabled && bc.Transport == ESTransport { + s, err := bc.GetSecret(ESPasswordConfigKey) + if err != nil { + return c, err + } + c.ESPassword = string(s) + } + if v, ok := conf[ESWorkersConfigKey].(string); ok { + c.ESNumWorkers, err = strconv.Atoi(v) + if err != nil { + return c, err + } + } + if v, ok := conf[ESFBufferConfigKey].(string); ok { + c.ESFlushBuffer, err = strconv.Atoi(v) + if err != nil { + return c, err + } + } + if v, ok := conf[ESFTimeoutConfigKey].(string); ok { + c.ESFlushTimeout, err = time.ParseDuration(v) + if err != nil { + return c, err + } + } + return +} diff --git a/core/exporter/commons/fileconfig.go b/core/exporter/commons/fileconfig.go new file mode 100644 index 00000000..57e4c9fd --- /dev/null +++ b/core/exporter/commons/fileconfig.go @@ -0,0 +1,43 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package commons defines common facilities for exporters. +package commons + +// Configuration keys. +const ( + PathConfigKey string = "file.path" +) + +// FileConfig holds file output specific configuration. +type FileConfig struct { + Path string +} + +// CreateFileConfig creates a new config object from config dictionary. +func CreateFileConfig(bc Config, conf map[string]interface{}) (c FileConfig, err error) { + // default values + c = FileConfig{Path: "./export.out"} + + // parse config map + if v, ok := conf[PathConfigKey].(string); ok { + c.Path = v + } + return +} diff --git a/core/exporter/commons/findingsconfig.go b/core/exporter/commons/findingsconfig.go new file mode 100644 index 00000000..24586a8c --- /dev/null +++ b/core/exporter/commons/findingsconfig.go @@ -0,0 +1,126 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package commons defines common facilities for exporters. +package commons + +import ( + "strconv" + + "github.com/IBM/scc-go-sdk/v3/findingsv1" +) + +// Configuration keys. +const ( + FindingsAPIKeyConfigKey string = "findings.apikey" + FindingsURLConfigKey string = "findings.url" + FindingsAccountIDConfigKey string = "findings.accountid" + FindingsProviderIDConfigKey string = "findings.provider" + FindingsRegionConfigKey string = "findings.region" + FindingsSQLQueryURLConfigKey string = "findings.sqlqueryurl" + FindingsSQLQueryCrnConfigKey string = "findings.sqlquerycrn" + FindingsS3RegionConfigKey string = "findings.s3region" + FindingsS3BucketConfigKey string = "findings.s3bucket" + FindingsS3PrefixConfigKey string = "findings.s3prefix" + FindingsPathConfigKey string = "findings.path" + FindingsPoolCapacityConfigKey string = "findings.pool.capacity" + FindingsPoolMaxAgeConfigKey string = "findings.pool.maxage" +) + +// FindingsConfig holds IBM Findings API specific configuration. +type FindingsConfig struct { + FindingsAPIKey string + FindingsURL string + FindingsAccountID string + FindingsProviderID string + FindingsSQLQueryURL string + FindingsSQLQueryCrn string + FindingsRegion string + FindingsS3Region string + FindingsS3Bucket string + FindingsS3Prefix string + FindingsPath string + FindingsPoolCapacity int + FindingsPoolMaxAge int +} + +// CreateFindingsConfig creates a new config object from config dictionary. +func CreateFindingsConfig(bc Config, conf map[string]interface{}) (c FindingsConfig, err error) { + // default values + c = FindingsConfig{ + FindingsURL: findingsv1.DefaultServiceURL, + FindingsSQLQueryURL: "https://us.sql-query.cloud.ibm.com/sqlquery", + FindingsPath: "/mnt/occurrences", + FindingsPoolCapacity: 250, + FindingsPoolMaxAge: 1440} // 24 hours (specified in minutes) + + // parse config map + if v, ok := conf[FindingsAPIKeyConfigKey].(string); ok { + c.FindingsAPIKey = v + } else if bc.VaultEnabled && bc.Transport == FindingsTransport { + s, err := bc.GetSecret(FindingsAPIKeyConfigKey) + if err != nil { + return c, err + } + c.FindingsAPIKey = string(s) + } + if v, ok := conf[FindingsAccountIDConfigKey].(string); ok { + c.FindingsAccountID = v + } + if v, ok := conf[FindingsURLConfigKey].(string); ok { + c.FindingsURL = v + } + if v, ok := conf[FindingsProviderIDConfigKey].(string); ok { + c.FindingsProviderID = v + } + if v, ok := conf[FindingsSQLQueryURLConfigKey].(string); ok { + c.FindingsSQLQueryURL = v + } + if v, ok := conf[FindingsSQLQueryCrnConfigKey].(string); ok { + c.FindingsSQLQueryCrn = v + } + if v, ok := conf[FindingsRegionConfigKey].(string); ok { + c.FindingsRegion = v + } + if v, ok := conf[FindingsS3RegionConfigKey].(string); ok { + c.FindingsS3Region = v + } + if v, ok := conf[FindingsS3PrefixConfigKey].(string); ok { + c.FindingsS3Prefix = v + } + if v, ok := conf[FindingsS3BucketConfigKey].(string); ok { + c.FindingsS3Bucket = v + } + if v, ok := conf[FindingsPathConfigKey].(string); ok { + c.FindingsPath = v + } + if v, ok := conf[FindingsPoolCapacityConfigKey].(string); ok { + c.FindingsPoolCapacity, err = strconv.Atoi(v) + if err != nil { + return c, err + } + } + if v, ok := conf[FindingsPoolMaxAgeConfigKey].(string); ok { + c.FindingsPoolMaxAge, err = strconv.Atoi(v) + if err != nil { + return c, err + } + } + return +} diff --git a/core/exporter/commons/syslogconfig.go b/core/exporter/commons/syslogconfig.go new file mode 100644 index 00000000..b6f5b803 --- /dev/null +++ b/core/exporter/commons/syslogconfig.go @@ -0,0 +1,73 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package commons defines common facilities for exporters. +package commons + +import ( + "strconv" +) + +// Configuration keys. +const ( + ProtoConfigKey string = "syslog.proto" + TagConfigKey string = "syslog.tag" + LogSourceConfigKey string = "syslog.source" + HostConfigKey string = "syslog.host" + PortConfigKey string = "syslog.port" +) + +// SyslogConfig holds rsyslog specific configuration. +type SyslogConfig struct { + Proto Proto + Tag string + LogSource string + Host string + Port int +} + +// CreateSyslogConfig creates a new config object from config dictionary. +func CreateSyslogConfig(bc Config, conf map[string]interface{}) (c SyslogConfig, err error) { + // default values + c = SyslogConfig{ + Host: "localhost", + Port: 514, + Tag: "sysflow"} + + // parse config map + if v, ok := conf[ProtoConfigKey].(string); ok { + c.Proto = parseProtoConfig(v) + } + if v, ok := conf[TagConfigKey].(string); ok { + c.Tag = v + } + if v, ok := conf[LogSourceConfigKey].(string); ok { + c.LogSource = v + } + if v, ok := conf[HostConfigKey].(string); ok { + c.Host = v + } + if v, ok := conf[PortConfigKey].(string); ok { + c.Port, err = strconv.Atoi(v) + if err != nil { + return c, err + } + } + return +} diff --git a/core/exporter/event.go b/core/exporter/commons/types.go similarity index 79% rename from core/exporter/event.go rename to core/exporter/commons/types.go index 8347afaf..bc5e7913 100644 --- a/core/exporter/event.go +++ b/core/exporter/commons/types.go @@ -16,11 +16,9 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package exporter -// Event defines an interface for exported event objects. -type Event interface { - ToJSON() []byte - ToJSONStr() string -} +// Package commons defines common facilities for exporters. +package commons + +// EncodedData represents the encoded telemetry data to be exported. +type EncodedData interface{} diff --git a/core/exporter/config.go b/core/exporter/config.go deleted file mode 100644 index 9bd8af85..00000000 --- a/core/exporter/config.go +++ /dev/null @@ -1,198 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package exporter - -import ( - "strconv" -) - -// Configuration keys. -const ( - ExportConfigKey string = "export" - ExpTypeConfigKey string = "type" - FormatConfigKey string = "format" - FlatConfigKey string = "flat" - ProtoConfigKey string = "proto" - TagConfigKey string = "tag" - LogSourceConfigKey string = "source" - HostConfigKey string = "host" - PortConfigKey string = "port" - PathConfigKey string = "path" - EventBufferConfigKey string = "buffer" - VersionKey string = "version" - JSONSchemaVersionKey string = "jsonschemaversion" - BuildNumberKey string = "buildnumber" -) - -// Config defines a configuration object for the exporter. -type Config struct { - Export Export - ExpType ExportType - Format Format - Flat bool - Proto Proto - Tag string - LogSource string - Host string - Port int - Path string - EventBuffer int - Version string - JSONSchemaVersion string - BuildNumber string -} - -// CreateConfig creates a new config object from config dictionary. -func CreateConfig(conf map[string]string) Config { - var c Config = Config{Host: "localhost", Port: 514, Path: "./export.out", Tag: "sysflow"} // default values - if v, ok := conf[ExportConfigKey]; ok { - c.Export = parseExportConfig(v) - } - if v, ok := conf[ExpTypeConfigKey]; ok { - c.ExpType = parseExportTypeConfig(v) - } - if v, ok := conf[FormatConfigKey]; ok { - c.Format = parseFormatConfig(v) - } - if v, ok := conf[FlatConfigKey]; ok && v == "true" { - c.Flat = true - } - if v, ok := conf[ProtoConfigKey]; ok { - c.Proto = parseProtoConfig(v) - } - if v, ok := conf[TagConfigKey]; ok { - c.Tag = v - } - if v, ok := conf[LogSourceConfigKey]; ok { - c.LogSource = v - } - if v, ok := conf[HostConfigKey]; ok { - c.Host = v - } - if v, ok := conf[PortConfigKey]; ok { - c.Port, _ = strconv.Atoi(v) - } - if v, ok := conf[PathConfigKey]; ok { - c.Path = v - } - if v, ok := conf[EventBufferConfigKey]; ok { - c.EventBuffer, _ = strconv.Atoi(v) - } - if v, ok := conf[VersionKey]; ok { - c.Version = v - } - if v, ok := conf[JSONSchemaVersionKey]; ok { - c.JSONSchemaVersion = v - } - if v, ok := conf[BuildNumberKey]; ok { - c.BuildNumber = v - } - return c -} - -// Export type. -type Export int - -// Export config options. -const ( - StdOutExport Export = iota - FileExport - SyslogExport - NullExport -) - -func (s Export) String() string { - return [...]string{"terminal", "file", "syslog", "null"}[s] -} - -func parseExportConfig(s string) Export { - if FileExport.String() == s { - return FileExport - } - if SyslogExport.String() == s { - return SyslogExport - } - if NullExport.String() == s { - return NullExport - } - return StdOutExport -} - -// ExportType type. -type ExportType int - -// ExportType config options. -const ( - TelemetryType ExportType = iota - BatchType -) - -func (s ExportType) String() string { - return [...]string{"telemetry", "batch"}[s] -} - -func parseExportTypeConfig(s string) ExportType { - if BatchType.String() == s { - return BatchType - } - return TelemetryType -} - -// Format type. -type Format int - -// Format config options. -const ( - JSONFormat Format = iota -) - -func (s Format) String() string { - return [...]string{"json"}[s] -} - -func parseFormatConfig(s string) Format { - return JSONFormat -} - -// Proto denotes protocol type. -type Proto int - -// Proto config options. -const ( - TCPProto Proto = iota - TCPTLSProto - UDPProto -) - -func (s Proto) String() string { - return [...]string{"tcp", "tls", "udp"}[s] -} - -func parseProtoConfig(s string) Proto { - switch s { - case TCPProto.String(): - return TCPProto - case TCPTLSProto.String(): - return TCPTLSProto - case UDPProto.String(): - return UDPProto - } - return TCPProto -} diff --git a/core/exporter/encoders/avro/occurrence/avdl/Event.avdl b/core/exporter/encoders/avro/occurrence/avdl/Event.avdl new file mode 100644 index 00000000..1eb0c70c --- /dev/null +++ b/core/exporter/encoders/avro/occurrence/avdl/Event.avdl @@ -0,0 +1,39 @@ +/** Copyright (C) 2019 IBM Corporation. +* +* Authors: +* Frederico Araujo +* Teryl Taylor +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +**/ +@namespace("event") +protocol Event { +record Event { + long ts; + string description; + string severity; + string clusterID; + string nodeID; + string nodeIP; + string containerID; + string recordType; + string opFlags; + string pProcCmd; + long pProcPID; + string procCmd; + long procPID; + string Resource; + string Tags; + string Trace; +} +} diff --git a/core/exporter/encoders/avro/occurrence/avro-tools-1.10.2.jar b/core/exporter/encoders/avro/occurrence/avro-tools-1.10.2.jar new file mode 100644 index 00000000..4a906250 Binary files /dev/null and b/core/exporter/encoders/avro/occurrence/avro-tools-1.10.2.jar differ diff --git a/core/exporter/encoders/avro/occurrence/avsc/Event.avsc b/core/exporter/encoders/avro/occurrence/avsc/Event.avsc new file mode 100644 index 00000000..d343a582 --- /dev/null +++ b/core/exporter/encoders/avro/occurrence/avsc/Event.avsc @@ -0,0 +1,54 @@ +{ + "type" : "record", + "name" : "Event", + "namespace" : "event", + "fields" : [ { + "name" : "ts", + "type" : "long" + }, { + "name" : "description", + "type" : "string" + }, { + "name" : "severity", + "type" : "string" + }, { + "name" : "clusterID", + "type" : "string" + }, { + "name" : "nodeID", + "type" : "string" + }, { + "name" : "nodeIP", + "type" : "string" + }, { + "name" : "containerID", + "type" : "string" + }, { + "name" : "recordType", + "type" : "string" + }, { + "name" : "opFlags", + "type" : "string" + }, { + "name" : "pProcCmd", + "type" : "string" + }, { + "name" : "pProcPID", + "type" : "long" + }, { + "name" : "procCmd", + "type" : "string" + }, { + "name" : "procPID", + "type" : "long" + }, { + "name" : "Resource", + "type" : "string" + }, { + "name" : "Tags", + "type" : "string" + }, { + "name" : "Trace", + "type" : "string" + } ] +} diff --git a/core/exporter/encoders/avro/occurrence/event/event.go b/core/exporter/encoders/avro/occurrence/event/event.go new file mode 100644 index 00000000..b6fc8282 --- /dev/null +++ b/core/exporter/encoders/avro/occurrence/event/event.go @@ -0,0 +1,230 @@ +// Code generated by github.com/actgardner/gogen-avro/v7. DO NOT EDIT. +/* + * SOURCE: + * Event.avsc + */ +package event + +import ( + "github.com/actgardner/gogen-avro/v7/compiler" + "github.com/actgardner/gogen-avro/v7/vm" + "github.com/actgardner/gogen-avro/v7/vm/types" + "io" +) + +type Event struct { + Ts int64 `json:"ts"` + + Description string `json:"description"` + + Severity string `json:"severity"` + + ClusterID string `json:"clusterID"` + + NodeID string `json:"nodeID"` + + NodeIP string `json:"nodeIP"` + + ContainerID string `json:"containerID"` + + RecordType string `json:"recordType"` + + OpFlags string `json:"opFlags"` + + PProcCmd string `json:"pProcCmd"` + + PProcPID int64 `json:"pProcPID"` + + ProcCmd string `json:"procCmd"` + + ProcPID int64 `json:"procPID"` + + Resource string `json:"Resource"` + + Tags string `json:"Tags"` + + Trace string `json:"Trace"` +} + +const EventAvroCRC64Fingerprint = "\f\xa9\xfe\x11\x8aaM\xca" + +func NewEvent() *Event { + return &Event{} +} + +func DeserializeEvent(r io.Reader) (*Event, error) { + t := NewEvent() + deser, err := compiler.CompileSchemaBytes([]byte(t.Schema()), []byte(t.Schema())) + if err != nil { + return nil, err + } + + err = vm.Eval(r, deser, t) + if err != nil { + return nil, err + } + return t, err +} + +func DeserializeEventFromSchema(r io.Reader, schema string) (*Event, error) { + t := NewEvent() + + deser, err := compiler.CompileSchemaBytes([]byte(schema), []byte(t.Schema())) + if err != nil { + return nil, err + } + + err = vm.Eval(r, deser, t) + if err != nil { + return nil, err + } + return t, err +} + +func writeEvent(r *Event, w io.Writer) error { + var err error + err = vm.WriteLong(r.Ts, w) + if err != nil { + return err + } + err = vm.WriteString(r.Description, w) + if err != nil { + return err + } + err = vm.WriteString(r.Severity, w) + if err != nil { + return err + } + err = vm.WriteString(r.ClusterID, w) + if err != nil { + return err + } + err = vm.WriteString(r.NodeID, w) + if err != nil { + return err + } + err = vm.WriteString(r.NodeIP, w) + if err != nil { + return err + } + err = vm.WriteString(r.ContainerID, w) + if err != nil { + return err + } + err = vm.WriteString(r.RecordType, w) + if err != nil { + return err + } + err = vm.WriteString(r.OpFlags, w) + if err != nil { + return err + } + err = vm.WriteString(r.PProcCmd, w) + if err != nil { + return err + } + err = vm.WriteLong(r.PProcPID, w) + if err != nil { + return err + } + err = vm.WriteString(r.ProcCmd, w) + if err != nil { + return err + } + err = vm.WriteLong(r.ProcPID, w) + if err != nil { + return err + } + err = vm.WriteString(r.Resource, w) + if err != nil { + return err + } + err = vm.WriteString(r.Tags, w) + if err != nil { + return err + } + err = vm.WriteString(r.Trace, w) + if err != nil { + return err + } + return err +} + +func (r *Event) Serialize(w io.Writer) error { + return writeEvent(r, w) +} + +func (r *Event) Schema() string { + return "{\"fields\":[{\"name\":\"ts\",\"type\":\"long\"},{\"name\":\"description\",\"type\":\"string\"},{\"name\":\"severity\",\"type\":\"string\"},{\"name\":\"clusterID\",\"type\":\"string\"},{\"name\":\"nodeID\",\"type\":\"string\"},{\"name\":\"nodeIP\",\"type\":\"string\"},{\"name\":\"containerID\",\"type\":\"string\"},{\"name\":\"recordType\",\"type\":\"string\"},{\"name\":\"opFlags\",\"type\":\"string\"},{\"name\":\"pProcCmd\",\"type\":\"string\"},{\"name\":\"pProcPID\",\"type\":\"long\"},{\"name\":\"procCmd\",\"type\":\"string\"},{\"name\":\"procPID\",\"type\":\"long\"},{\"name\":\"Resource\",\"type\":\"string\"},{\"name\":\"Tags\",\"type\":\"string\"},{\"name\":\"Trace\",\"type\":\"string\"}],\"name\":\"event.Event\",\"type\":\"record\"}" +} + +func (r *Event) SchemaName() string { + return "event.Event" +} + +func (_ *Event) SetBoolean(v bool) { panic("Unsupported operation") } +func (_ *Event) SetInt(v int32) { panic("Unsupported operation") } +func (_ *Event) SetLong(v int64) { panic("Unsupported operation") } +func (_ *Event) SetFloat(v float32) { panic("Unsupported operation") } +func (_ *Event) SetDouble(v float64) { panic("Unsupported operation") } +func (_ *Event) SetBytes(v []byte) { panic("Unsupported operation") } +func (_ *Event) SetString(v string) { panic("Unsupported operation") } +func (_ *Event) SetUnionElem(v int64) { panic("Unsupported operation") } + +func (r *Event) Get(i int) types.Field { + switch i { + case 0: + return &types.Long{Target: &r.Ts} + case 1: + return &types.String{Target: &r.Description} + case 2: + return &types.String{Target: &r.Severity} + case 3: + return &types.String{Target: &r.ClusterID} + case 4: + return &types.String{Target: &r.NodeID} + case 5: + return &types.String{Target: &r.NodeIP} + case 6: + return &types.String{Target: &r.ContainerID} + case 7: + return &types.String{Target: &r.RecordType} + case 8: + return &types.String{Target: &r.OpFlags} + case 9: + return &types.String{Target: &r.PProcCmd} + case 10: + return &types.Long{Target: &r.PProcPID} + case 11: + return &types.String{Target: &r.ProcCmd} + case 12: + return &types.Long{Target: &r.ProcPID} + case 13: + return &types.String{Target: &r.Resource} + case 14: + return &types.String{Target: &r.Tags} + case 15: + return &types.String{Target: &r.Trace} + } + panic("Unknown field index") +} + +func (r *Event) SetDefault(i int) { + switch i { + } + panic("Unknown field index") +} + +func (r *Event) NullField(i int) { + switch i { + } + panic("Not a nullable field index") +} + +func (_ *Event) AppendMap(key string) types.Field { panic("Unsupported operation") } +func (_ *Event) AppendArray() types.Field { panic("Unsupported operation") } +func (_ *Event) Finalize() {} + +func (_ *Event) AvroCRC64Fingerprint() []byte { + return []byte(EventAvroCRC64Fingerprint) +} diff --git a/core/exporter/encoders/avro/occurrence/event/event_container.go b/core/exporter/encoders/avro/occurrence/event/event_container.go new file mode 100644 index 00000000..d22dc629 --- /dev/null +++ b/core/exporter/encoders/avro/occurrence/event/event_container.go @@ -0,0 +1,49 @@ +// Code generated by github.com/actgardner/gogen-avro/v7. DO NOT EDIT. +/* + * SOURCE: + * Event.avsc + */ +package event + +import ( + "io" + + "github.com/actgardner/gogen-avro/v7/compiler" + "github.com/actgardner/gogen-avro/v7/container" + "github.com/actgardner/gogen-avro/v7/vm" +) + +func NewEventWriter(writer io.Writer, codec container.Codec, recordsPerBlock int64) (*container.Writer, error) { + str := NewEvent() + return container.NewWriter(writer, codec, recordsPerBlock, str.Schema()) +} + +// container reader +type EventReader struct { + r io.Reader + p *vm.Program +} + +func NewEventReader(r io.Reader) (*EventReader, error) { + containerReader, err := container.NewReader(r) + if err != nil { + return nil, err + } + + t := NewEvent() + deser, err := compiler.CompileSchemaBytes([]byte(containerReader.AvroContainerSchema()), []byte(t.Schema())) + if err != nil { + return nil, err + } + + return &EventReader{ + r: containerReader, + p: deser, + }, nil +} + +func (r EventReader) Read() (*Event, error) { + t := NewEvent() + err := vm.Eval(r.r, r.p, t) + return t, err +} diff --git a/core/exporter/encoders/avro/occurrence/generate.sh b/core/exporter/encoders/avro/occurrence/generate.sh new file mode 100755 index 00000000..06de1832 --- /dev/null +++ b/core/exporter/encoders/avro/occurrence/generate.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# compile avro IDL to avro Schema +java -jar avro-tools-1.10.2.jar idl2schemata ./avdl/Event.avdl avsc/ + +# golang stub generation +# to install gogen-avro: go get github.com/actgardner/gogen-avro/v7/cmd/.. +gogen-avro --containers=true --package=event event ./avsc/Event.avsc diff --git a/core/exporter/encoders/constants.go b/core/exporter/encoders/constants.go new file mode 100644 index 00000000..a7242796 --- /dev/null +++ b/core/exporter/encoders/constants.go @@ -0,0 +1,61 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +// SysFlow record components +const ( + PROC = "proc" + PPROC = "pproc" + NET = "net" + FILEF = "file" + FLOW = "flow" + CONTAINER = "container" + POD = "pod" + SERVICE = "service" + KE = "k8s" + NODE = "node" + META = "meta" + + BEGIN_STATE = iota + PROC_STATE + PPROC_STATE + NET_STATE + FILE_STATE + FLOW_STATE + CONT_STATE + POD_STATE + SVC_STATE + KE_STATE + NODE_STATE + META_STATE +) + +// Export schema shared attribute names. +const ( + VERSION_ATTR = "version" + GROUP_ID_ATTR = "groupId" + OBSERVATIONS_ATTR = "observations" + POLICIES_ATTR = "policies" + ID_TAG_ATTR = "id" + DESC_ATTR = "desc" + PRIORITY_ATTR = "priority" + TAGS_ATTR = "tags" +) diff --git a/core/exporter/encoders/ecs.go b/core/exporter/encoders/ecs.go new file mode 100644 index 00000000..07244afa --- /dev/null +++ b/core/exporter/encoders/ecs.go @@ -0,0 +1,605 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Andreas Schade +// Frederico Araujo +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +import ( + "encoding/binary" + "fmt" + "net" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/cespare/xxhash/v2" + "github.com/satta/gommunityid" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/utils" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" + "github.com/tidwall/gjson" +) + +// JSONData is a map to serialize data to JSON. +type JSONData map[string]interface{} + +// ECSRecord is a struct for serializing ECS records. +type ECSRecord struct { + ID string `json:"-"` + Ts string `json:"@timestamp"` + Agent struct { + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + } `json:"agent,omitempty"` + Ecs struct { + Version string `json:"version,omitempty"` + } `json:"ecs,omitempty"` + Event JSONData `json:"event"` + Host JSONData `json:"host"` + Container JSONData `json:"container,omitempty"` + Orchestrator JSONData `json:"orchestrator,omitempty"` + Pod JSONData `json:"pod,omitempty"` + Service []JSONData `json:"service,omitempty"` + File JSONData `json:"file,omitempty"` + FileAction JSONData `json:"sf_file_action,omitempty"` + Network JSONData `json:"network,omitempty"` + Source JSONData `json:"source,omitempty"` + Destination JSONData `json:"destination,omitempty"` + Process JSONData `json:"process,omitempty"` + User JSONData `json:"user,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +// ECSEncoder implements an ECS encoder for telemetry records. +type ECSEncoder struct { + config commons.Config + //jsonencoder JSONEncoder + batch []commons.EncodedData +} + +// NewECSEncoder instantiates an ECS encoder. +func NewECSEncoder(config commons.Config) Encoder { + return &ECSEncoder{ + config: config, + batch: make([]commons.EncodedData, 0, config.EventBuffer)} +} + +// Register registers the encoder to the codecs cache. +func (t *ECSEncoder) Register(codecs map[commons.Format]EncoderFactory) { + codecs[commons.ECSFormat] = NewECSEncoder +} + +// Encode encodes telemetry records into an ECS representation. +func (t *ECSEncoder) Encode(recs []*flatrecord.Record) ([]commons.EncodedData, error) { + t.batch = t.batch[:0] + for _, rec := range recs { + ecs := t.encode(rec) + t.batch = append(t.batch, ecs) + } + return t.batch, nil +} + +// Encodes a telemetry record into an ECS representation. +func (t *ECSEncoder) encode(rec *flatrecord.Record) *ECSRecord { + ecs := &ECSRecord{ + ID: encodeID(rec), + Host: encodeHost(rec), + } + ecs.Agent.Version = t.config.Version + ecs.Agent.Type = ECS_AGENT_TYPE + ecs.Ecs.Version = t.config.EcsVersion + ecs.Ts = utils.ToIsoTimeStr(flatrecord.Mapper.MapInt(flatrecord.SF_TS)(rec)) + + // encode specific record components + sfType := flatrecord.Mapper.MapStr(flatrecord.SF_TYPE)(rec) + if sfType != sfgo.TyKEStr { + ecs.Container = encodeContainer(rec) + if flatrecord.Mapper.MapStr(flatrecord.SF_POD_ID)(rec) != sfgo.Zeros.String { + ecs.encodeOrchestrator(rec) + ecs.encodePod(rec) + } + ecs.Process = encodeProcess(rec) + ecs.User = encodeUser(rec) + } else { + ecs.encodeOrchestrator(rec) + } + + switch sfType { + case sfgo.TyNFStr: + ecs.encodeNetworkFlow(rec) + case sfgo.TyFFStr: + ecs.encodeFileFlow(rec) + case sfgo.TyFEStr: + ecs.encodeFileEvent(rec) + case sfgo.TyPEStr: + ecs.encodeProcessEvent(rec) + case sfgo.TyKEStr: + ecs.encodeK8sEvent(rec) + } + + // encode tags and policy information + tags := rec.Ctx.GetTags() + rules := rec.Ctx.GetRules() + if len(rules) > 0 { + reasons := make([]string, 0) + priority := int(policy.Low) + for _, r := range rules { + reasons = append(reasons, r.Name) + tags = append(tags, extracTags(r.Tags)...) + priority = utils.Max(priority, int(r.Priority)) + } + ecs.Event[ECS_EVENT_REASON] = strings.Join(reasons, ", ") + ecs.Event[ECS_EVENT_SEVERITY] = priority + } + if len(tags) > 0 { + ecs.Tags = tags + } + + return ecs +} + +var byteInt64 []byte = make([]byte, 8) + +// encodeID returns the ECS document identifier. +func encodeID(rec *flatrecord.Record) string { + h := xxhash.New() + t := flatrecord.Mapper.MapStr(flatrecord.SF_TYPE)(rec) + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_NODE_ID)(rec))) + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_ID)(rec))) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.TS_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.TID_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.PROC_OID_CREATETS_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.EV_PROC_OPFLAGS_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + switch t { + case sfgo.TyFFStr, sfgo.TyFEStr: + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_FILE_OID)(rec))) + case sfgo.TyNFStr: + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.FL_NETW_SIP_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.FL_NETW_SPORT_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.FL_NETW_DIP_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.FL_NETW_DPORT_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.FL_NETW_PROTO_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + case sfgo.TyKEStr: + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.K8SE_ACTION_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(rec.GetInt(sfgo.K8SE_KIND_INT, sfgo.SYSFLOW_SRC))) + h.Write(byteInt64) + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_K8SE_MESSAGE)(rec))) + } + return fmt.Sprintf("%x", h.Sum(nil)) +} + +// encodeNetworkFlow populates the ECS representatiom of a NetworkFlow record. +func (ecs *ECSRecord) encodeNetworkFlow(rec *flatrecord.Record) { + rbytes := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_RBYTES)(rec) + rops := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_ROPS)(rec) + wbytes := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_WBYTES)(rec) + wops := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_WOPS)(rec) + sip := flatrecord.Mapper.MapStr(flatrecord.SF_NET_SIP)(rec) + dip := flatrecord.Mapper.MapStr(flatrecord.SF_NET_DIP)(rec) + sport := flatrecord.Mapper.MapInt(flatrecord.SF_NET_SPORT)(rec) + dport := flatrecord.Mapper.MapInt(flatrecord.SF_NET_DPORT)(rec) + proto := flatrecord.Mapper.MapInt(flatrecord.SF_NET_PROTO)(rec) + + cid, _ := gommunityid.GetCommunityIDByVersion(1, 0) + ft := gommunityid.MakeFlowTuple(net.ParseIP(sip), net.ParseIP(dip), uint16(sport), uint16(dport), uint8(proto)) + + // Calculate Base64-encoded value + ecs.Network = JSONData{ + ECS_NET_BYTES: rbytes + wbytes, + ECS_NET_CID: cid.CalcBase64(ft), + ECS_NET_IANA: strconv.FormatInt(proto, 10), + ECS_NET_PROTO: sfgo.GetProto(proto), + } + ecs.Source = JSONData{ + ECS_ENDPOINT_IP: sip, + ECS_ENDPOINT_PORT: sport, + ECS_ENDPOINT_ADDR: sip, + ECS_ENDPOINT_BYTES: wbytes, + ECS_ENDPOINT_PACKETS: wops, + } + ecs.Destination = JSONData{ + ECS_ENDPOINT_IP: dip, + ECS_ENDPOINT_PORT: dport, + ECS_ENDPOINT_ADDR: dip, + ECS_ENDPOINT_BYTES: rbytes, + ECS_ENDPOINT_PACKETS: rops, + } + ecs.Event = encodeEvent(rec, ECS_CAT_NETWORK, ECS_TYPE_CONNECTION, ECS_CAT_NETWORK+"-"+ECS_ACTION_TRAFFIC) +} + +// encodeFileFlow populates the ECS representatiom of a FF record +func (ecs *ECSRecord) encodeFileFlow(rec *flatrecord.Record) { + opFlags := rec.GetInt(sfgo.EV_PROC_OPFLAGS_INT, sfgo.SYSFLOW_SRC) + rbytes := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_RBYTES)(rec) + rops := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_ROPS)(rec) + wbytes := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_WBYTES)(rec) + wops := flatrecord.Mapper.MapInt(flatrecord.SF_FLOW_WOPS)(rec) + category := ECS_CAT_FILE + eventType := ECS_TYPE_ACCESS + action := category + "-" + eventType + if opFlags&sfgo.OP_READ_RECV == sfgo.OP_READ_RECV && (rbytes > 0 || rops > 0) { + action = action + "-" + ECS_ACTION_READ + } + if opFlags&sfgo.OP_WRITE_SEND == sfgo.OP_WRITE_SEND && (wbytes > 0 || wops > 0) { + eventType = ECS_TYPE_CHANGE + action = action + "-" + ECS_ACTION_WRITE + } + ecs.Event = encodeEvent(rec, category, eventType, action) + ecs.File = encodeFile(rec) + if rbytes > 0 || rops > 0 || wbytes > 0 || wops > 0 { + ecs.FileAction = JSONData{ + ECS_SF_FA_RBYTES: rbytes, + ECS_SF_FA_ROPS: rops, + ECS_SF_FA_WBYTES: wbytes, + ECS_SF_FA_WOPS: wops, + } + } +} + +// encodeFileEvent populates the ECS representatiom of a FE record +func (ecs *ECSRecord) encodeFileEvent(rec *flatrecord.Record) { + opFlags := rec.GetInt(sfgo.EV_PROC_OPFLAGS_INT, sfgo.SYSFLOW_SRC) + targetPath := flatrecord.Mapper.MapStr(flatrecord.SF_FILE_NEWPATH)(rec) + ecs.File = encodeFile(rec) + category := ECS_CAT_FILE + eventType := ECS_TYPE_CHANGE + action := category + "-" + eventType + if opFlags&sfgo.OP_MKDIR == sfgo.OP_MKDIR { + category = ECS_CAT_DIR + eventType = ECS_TYPE_CREATE + action = category + "-" + ECS_ACTION_CREATE + } else if opFlags&sfgo.OP_RMDIR == sfgo.OP_RMDIR { + category = ECS_CAT_DIR + eventType = ECS_TYPE_DELETE + action = category + "-" + ECS_ACTION_DELETE + } else if opFlags&sfgo.OP_UNLINK == sfgo.OP_UNLINK { + eventType = ECS_TYPE_DELETE + action = category + "-" + ECS_ACTION_DELETE + } else if opFlags&sfgo.OP_SYMLINK == sfgo.OP_SYMLINK || opFlags&sfgo.OP_LINK == sfgo.OP_LINK { + action = category + "-" + ECS_ACTION_LINK + ecs.File[ECS_FILE_TARGET] = targetPath + } else if opFlags&sfgo.OP_RENAME == sfgo.OP_RENAME { + action = category + "-" + ECS_ACTION_RENAME + ecs.File[ECS_FILE_TARGET] = targetPath + } + ecs.Event = encodeEvent(rec, category, eventType, action) +} + +// encodeProcessEvent populates the ECS representatiom of a PE record +func (ecs *ECSRecord) encodeProcessEvent(rec *flatrecord.Record) { + opFlags := rec.GetInt(sfgo.EV_PROC_OPFLAGS_INT, sfgo.SYSFLOW_SRC) + pid := flatrecord.Mapper.MapInt(flatrecord.SF_PROC_PID)(rec) + tid := flatrecord.Mapper.MapInt(flatrecord.SF_PROC_TID)(rec) + category := ECS_CAT_PROCESS + eventType := ECS_TYPE_START + + if opFlags&sfgo.OP_EXIT == sfgo.OP_EXIT { + if pid != tid { + eventType = ECS_TYPE_TEXIT + } else { + eventType = ECS_TYPE_EXIT + } + } else if opFlags&sfgo.OP_CLONE == sfgo.OP_CLONE || opFlags&sfgo.OP_EXEC == sfgo.OP_EXEC { + if pid != tid { + eventType = ECS_TYPE_TSTART + } + } else if opFlags&sfgo.OP_SETUID == sfgo.OP_SETUID { + eventType = ECS_TYPE_CHANGE + } + + action := category + "-" + eventType + ecs.Event = encodeEvent(rec, category, eventType, action) +} + +func k8sActionToEventType(rec *flatrecord.Record) string { + eventType := ECS_TYPE_INFO + am := flatrecord.Mapper.Mappers[flatrecord.SF_K8SE_ACTION] + switch sfgo.K8sAction(rec.Fr.Ints[am.Source][am.FlatIndex]) { + case sfgo.K8sActionK8S_COMPONENT_ADDED: + eventType = ECS_TYPE_CREATE + case sfgo.K8sActionK8S_COMPONENT_DELETED: + eventType = ECS_TYPE_DELETE + case sfgo.K8sActionK8S_COMPONENT_MODIFIED: + eventType = ECS_TYPE_CHANGE + case sfgo.K8sActionK8S_COMPONENT_ERROR: + eventType = ECS_TYPE_ERROR + } + return eventType +} + +// encodeK8sEvent populates the ECS representatiom of a KE record +func (ecs *ECSRecord) encodeK8sEvent(rec *flatrecord.Record) { + category := ECS_CAT_ORCH + eventType := k8sActionToEventType(rec) + action := flatrecord.Mapper.MapStr(flatrecord.SF_K8SE_ACTION)(rec) + + ecs.Event = encodeEvent(rec, category, eventType, action) + msgStr := flatrecord.Mapper.MapStr(flatrecord.SF_K8SE_MESSAGE)(rec) + ecs.Event[ECS_EVENT_ORIGINAL] = msgStr + + msg := gjson.Parse(msgStr) + ecs.Orchestrator = JSONData{ + ECS_ORCHESTRATOR_NAMESPACE: msg.Get("items.0.namespace").String(), + ECS_ORCHESTRATOR_RESOURCE: JSONData{ + ECS_RESOURCE_TYPE: strings.ToLower(msg.Get("kind").String()), + ECS_RESOURCE_NAME: msg.Get("items.0.name").String(), + }, + ECS_ORCHESTRATOR_TYPE: "kubernetes", + } +} + +// encodeOrchestrator creates an ECS orchestrator field. +func (ecs *ECSRecord) encodeOrchestrator(rec *flatrecord.Record) { + ecs.Orchestrator = JSONData{ + ECS_ORCHESTRATOR_NAMESPACE: flatrecord.Mapper.MapStr(flatrecord.SF_POD_NAMESPACE)(rec), + ECS_ORCHESTRATOR_RESOURCE: JSONData{ + ECS_RESOURCE_TYPE: "pod", + ECS_RESOURCE_NAME: flatrecord.Mapper.MapStr(flatrecord.SF_POD_NAME)(rec), + }, + ECS_ORCHESTRATOR_TYPE: "kubernetes", + } +} + +// encodePod creates a custom ECS pod field. +func (ecs *ECSRecord) encodePod(rec *flatrecord.Record) { + ecs.Pod = JSONData{ + ECS_POD_TS: utils.ToIsoTimeStr(flatrecord.Mapper.MapInt(flatrecord.SF_POD_TS)(rec)), + ECS_POD_ID: flatrecord.Mapper.MapStr(flatrecord.SF_POD_ID)(rec), + ECS_POD_NAME: flatrecord.Mapper.MapStr(flatrecord.SF_POD_NAME)(rec), + ECS_POD_NODENAME: flatrecord.Mapper.MapStr(flatrecord.SF_POD_NODENAME)(rec), + ECS_POD_NAMESPACE: flatrecord.Mapper.MapStr(flatrecord.SF_POD_NAMESPACE)(rec), + ECS_POD_HOSTIP: utils.ToIPStrArray(flatrecord.Mapper.MapIntArray(flatrecord.SF_POD_HOSTIP)(rec)), + ECS_POD_INTERNALIP: utils.ToIPStrArray(flatrecord.Mapper.MapIntArray(flatrecord.SF_POD_INTERNALIP)(rec)), + ECS_POD_RESTARTCOUNT: flatrecord.Mapper.MapInt(flatrecord.SF_POD_RESTARTCOUNT)(rec), + } + + services := flatrecord.Mapper.MapSvcArray(flatrecord.SF_POD_SERVICES)(rec) + if services != sfgo.Zeros.Any && len(*services) > 0 { + ecs.encodeService(services) + } +} + +// encodeServices creates an ECS service field. +func (ecs *ECSRecord) encodeService(svcs *[]*sfgo.Service) { + ecs.Service = make([]JSONData, len(*svcs)) + for i, svc := range *svcs { + ecs.Service[i] = JSONData{ + ECS_SERVICE_ID: svc.Id, + ECS_SERVICE_NAME: svc.Name, + ECS_SERVICE_NAMESPACE: svc.Namespace, + ECS_SERVICE_CLUSTERIP: utils.ToIPStrArray(&svc.ClusterIP), + ECS_SERVICE_PORTLIST: encodePortList(&svc.PortList), + } + } +} + +// encodePortList creates a ports field for an ECS service field. +func encodePortList(pl *[]*sfgo.Port) []JSONData { + ports := make([]JSONData, len(*pl)) + for i, p := range *pl { + ports[i] = JSONData{ + ECS_SERVICE_PORT: p.Port, + ECS_SERVICE_TARGETPORT: p.TargetPort, + ECS_SERVICE_NODEPORT: p.NodePort, + ECS_SERVICE_PROTO: p.Proto, + } + } + return ports +} + +// encodeContainer creates an ECS container field. +func encodeContainer(rec *flatrecord.Record) JSONData { + var container JSONData + cid := flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_ID)(rec) + if cid != sfgo.Zeros.String { + container = JSONData{ + ECS_CONTAINER_ID: cid, + ECS_CONTAINER_RUNTIME: flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_TYPE)(rec), + ECS_CONTAINER_PRIV: flatrecord.Mapper.MapInt(flatrecord.SF_CONTAINER_PRIVILEGED)(rec) != 0, + ECS_CONTAINER_NAME: flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_NAME)(rec), + } + imageid := flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_IMAGEID)(rec) + if imageid != sfgo.Zeros.String { + image := JSONData{ + ECS_IMAGE_ID: imageid, + ECS_IMAGE_NAME: flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_IMAGE)(rec), + } + container[ECS_IMAGE] = image + } + } + return container +} + +// encodeHost creates the ECS host field +func encodeHost(rec *flatrecord.Record) JSONData { + return JSONData{ + ECS_HOST_ID: flatrecord.Mapper.MapStr(flatrecord.SF_NODE_ID)(rec), + ECS_HOST_IP: flatrecord.Mapper.MapStr(flatrecord.SF_NODE_IP)(rec), + } +} + +// encodeUser creates an ECS user field using user and group of the actual process. +func encodeUser(rec *flatrecord.Record) JSONData { + gname := flatrecord.Mapper.MapStr(flatrecord.SF_PROC_GROUP)(rec) + group := JSONData{ + ECS_GROUP_ID: flatrecord.Mapper.MapInt(flatrecord.SF_PROC_GID)(rec), + } + if gname != sfgo.Zeros.String { + group[ECS_GROUP_NAME] = gname + } + uname := flatrecord.Mapper.MapStr(flatrecord.SF_PROC_USER)(rec) + user := JSONData{ + ECS_GROUP: group, + ECS_USER_ID: flatrecord.Mapper.MapInt(flatrecord.SF_PROC_UID)(rec), + } + if uname != sfgo.Zeros.String { + user[ECS_USER_NAME] = uname + } + return user +} + +// encodeProcess creates an ECS process field including the nested parent process. +func encodeProcess(rec *flatrecord.Record) JSONData { + exe := flatrecord.Mapper.MapStr(flatrecord.SF_PROC_EXE)(rec) + args_count := 0 + if flatrecord.Mapper.MapStr(flatrecord.SF_PROC_ARGS)(rec) != "" { + args_count = len(strings.Split(flatrecord.Mapper.MapStr(flatrecord.SF_PROC_ARGS)(rec), " ")) + } + process := JSONData{ + ECS_PROC_EXE: exe, + ECS_PROC_ARGS: flatrecord.Mapper.MapStr(flatrecord.SF_PROC_ARGS)(rec), + ECS_PROC_ARGS_COUNT: args_count, + ECS_PROC_CMDLINE: flatrecord.Mapper.MapStr(flatrecord.SF_PROC_CMDLINE)(rec), + ECS_PROC_PID: flatrecord.Mapper.MapInt(flatrecord.SF_PROC_PID)(rec), + ECS_PROC_START: utils.ToIsoTimeStr(flatrecord.Mapper.MapInt(flatrecord.SF_PROC_CREATETS)(rec)), + ECS_PROC_NAME: path.Base(exe), + ECS_PROC_THREAD: JSONData{ECS_PROC_TID: flatrecord.Mapper.MapInt(flatrecord.SF_PROC_TID)(rec)}, + } + pexe := flatrecord.Mapper.MapStr(flatrecord.SF_PPROC_EXE)(rec) + pargs_count := 0 + if flatrecord.Mapper.MapStr(flatrecord.SF_PPROC_ARGS)(rec) != "" { + pargs_count = len(strings.Split(flatrecord.Mapper.MapStr(flatrecord.SF_PPROC_ARGS)(rec), " ")) + } + parent := JSONData{ + ECS_PROC_EXE: pexe, + ECS_PROC_ARGS: flatrecord.Mapper.MapStr(flatrecord.SF_PPROC_ARGS)(rec), + ECS_PROC_ARGS_COUNT: pargs_count, + ECS_PROC_CMDLINE: flatrecord.Mapper.MapStr(flatrecord.SF_PPROC_CMDLINE)(rec), + ECS_PROC_PID: flatrecord.Mapper.MapInt(flatrecord.SF_PPROC_PID)(rec), + ECS_PROC_START: utils.ToIsoTimeStr(flatrecord.Mapper.MapInt(flatrecord.SF_PPROC_CREATETS)(rec)), + ECS_PROC_NAME: path.Base(pexe), + } + process[ECS_PROC_PARENT] = parent + return process +} + +// encodeEvent creates the central ECS event field and sets the classification attributes +func encodeEvent(rec *flatrecord.Record, category string, eventType string, action string) JSONData { + start := flatrecord.Mapper.MapInt(flatrecord.SF_TS)(rec) + end := flatrecord.Mapper.MapInt(flatrecord.SF_ENDTS)(rec) + if end == sfgo.Zeros.Int64 { + end = start + } + sfType := flatrecord.Mapper.MapStr(flatrecord.SF_TYPE)(rec) + sfRet := flatrecord.Mapper.MapInt(flatrecord.SF_RET)(rec) + + event := JSONData{ + ECS_EVENT_CATEGORY: category, + ECS_EVENT_TYPE: eventType, + ECS_EVENT_ACTION: action, + ECS_EVENT_SFTYPE: sfType, + ECS_EVENT_START: utils.ToIsoTimeStr(start), + ECS_EVENT_END: utils.ToIsoTimeStr(end), + ECS_EVENT_DURATION: end - start, + } + + if rec.Ctx.IsAlert() { + event[ECS_EVENT_KIND] = ECS_KIND_ALERT + } else { + event[ECS_EVENT_KIND] = ECS_KIND_EVENT + } + + if sfType == sfgo.TyPEStr || sfType == sfgo.TyFEStr { + event[ECS_EVENT_SFRET] = sfRet + } + return event +} + +// encodeFile creates an ECS file field +func encodeFile(rec *flatrecord.Record) JSONData { + opFlags := rec.GetInt(sfgo.EV_PROC_OPFLAGS_INT, sfgo.SYSFLOW_SRC) + ft := flatrecord.Mapper.MapStr(flatrecord.SF_FILE_TYPE)(rec) + fpath := flatrecord.Mapper.MapStr(flatrecord.SF_FILE_PATH)(rec) + fd := flatrecord.Mapper.MapInt(flatrecord.SF_FILE_FD)(rec) + pid := flatrecord.Mapper.MapInt(flatrecord.SF_PROC_PID)(rec) + + fileType := encodeFileType(ft) + if opFlags&sfgo.OP_SYMLINK == sfgo.OP_SYMLINK { + fileType = "symlink" + } + file := JSONData{ECS_FILE_TYPE: fileType} + + var name string + if fpath != sfgo.Zeros.String { + name = path.Base(fpath) + } else { + fpath = fmt.Sprintf("/proc/%d/fd/%d", pid, fd) + name = strconv.FormatInt(fd, 10) + } + + if fileType == "dir" { + file[ECS_FILE_DIR] = fpath + } else { + file[ECS_FILE_NAME] = name + file[ECS_FILE_DIR] = filepath.Dir(fpath) + if fpath != name { + file[ECS_FILE_PATH] = fpath + } + } + + return file +} + +func encodeFileType(ft string) string { + var fileType string + switch ft { + case "f": + fileType = "file" + case "d": + fileType = "dir" + case "u": + fileType = "socket" + case "p": + fileType = "pipe" + case "?": + fallthrough + default: + fileType = "unknown" + } + return fileType +} + +func extracTags(tags []policy.EnrichmentTag) []string { + s := make([]string, 0) + for _, v := range tags { + switch v := v.(type) { + case []string: + s = append(s, v...) + default: + s = append(s, string(fmt.Sprintf("%v", v))) + } + } + return s +} + +// Cleanup cleans up resources. +func (t *ECSEncoder) Cleanup() {} diff --git a/core/exporter/encoders/ecsconstants.go b/core/exporter/encoders/ecsconstants.go new file mode 100644 index 00000000..5a994590 --- /dev/null +++ b/core/exporter/encoders/ecsconstants.go @@ -0,0 +1,171 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Andreas Schade +// Frederico Araujo +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +// ECS_AGENT_TYPE denotes the ECS agent type. +const ECS_AGENT_TYPE = "SysFlow" + +// ECS attributes used in JSONData. +const ( + ECS_CONTAINER_ID = "id" + ECS_CONTAINER_NAME = "name" + ECS_CONTAINER_RUNTIME = "runtime" + ECS_CONTAINER_PRIV = "sf_privileged" + + ECS_IMAGE = "image" + ECS_IMAGE_ID = "id" + ECS_IMAGE_NAME = "name" + + ECS_HOST_ID = "id" + ECS_HOST_IP = "ip" + + ECS_EVENT_KIND = "kind" + ECS_EVENT_CATEGORY = "category" + ECS_EVENT_TYPE = "type" + ECS_EVENT_ACTION = "action" + ECS_EVENT_ORIGINAL = "original" + ECS_EVENT_START = "start" + ECS_EVENT_END = "end" + ECS_EVENT_DURATION = "duration" + ECS_EVENT_SFTYPE = "sf_type" + ECS_EVENT_SFRET = "sf_ret" + ECS_EVENT_REASON = "reason" + ECS_EVENT_SEVERITY = "severity" + + ECS_FILE_DIR = "directory" + ECS_FILE_NAME = "name" + ECS_FILE_PATH = "path" + ECS_FILE_TARGET = "target_path" + ECS_FILE_TYPE = "type" + + ECS_GROUP = "group" + ECS_GROUP_ID = "id" + ECS_GROUP_NAME = "name" + + // used in proc and file fields + ECS_HASH = "hash" + ECS_HASH_MD5 = "md5" + ECS_HASH_SHA1 = "sha1" + ECS_HASH_SHA256 = "sha256" + + ECS_NET_BYTES = "bytes" + ECS_NET_CID = "community_id" + ECS_NET_IANA = "iana_number" + ECS_NET_PROTO = "protocol" + + // used in source and destination fields + ECS_ENDPOINT_ADDR = "address" + ECS_ENDPOINT_BYTES = "bytes" + ECS_ENDPOINT_IP = "ip" + ECS_ENDPOINT_PACKETS = "packets" + ECS_ENDPOINT_PORT = "port" + + ECS_ORCHESTRATOR_NAMESPACE = "namespace" + ECS_ORCHESTRATOR_RESOURCE = "resource" + ECS_RESOURCE_NAME = "name" + ECS_RESOURCE_TYPE = "type" + ECS_ORCHESTRATOR_TYPE = "type" + + ECS_POD_TS = "ts" + ECS_POD_ID = "id" + ECS_POD_NAME = "name" + ECS_POD_NAMESPACE = "namespace" + ECS_POD_NODENAME = "nodename" + ECS_POD_HOSTIP = "hostip" + ECS_POD_INTERNALIP = "internalip" + ECS_POD_RESTARTCOUNT = "restartcnt" + + ECS_PROC_ARGS_COUNT = "args_count" + ECS_PROC_ARGS = "args" + ECS_PROC_CMDLINE = "command_line" + ECS_PROC_EXE = "executable" + ECS_PROC_NAME = "name" + ECS_PROC_PARENT = "parent" + ECS_PROC_PID = "pid" + ECS_PROC_THREAD = "thread" + ECS_PROC_TID = "id" + ECS_PROC_START = "start" + + ECS_SF_FA_RBYTES = "bytes_read" + ECS_SF_FA_ROPS = "read_ops" + ECS_SF_FA_WBYTES = "bytes_written" + ECS_SF_FA_WOPS = "write_ops" + + ECS_SERVICE_ID = "id" + ECS_SERVICE_NAME = "name" + ECS_SERVICE_NAMESPACE = "namespace" + ECS_SERVICE_CLUSTERIP = "clusterip" + ECS_SERVICE_PORTLIST = "ports" + ECS_SERVICE_PORT = "port" + ECS_SERVICE_TARGETPORT = "targetport" + ECS_SERVICE_NODEPORT = "nodeport" + ECS_SERVICE_PROTO = "proto" + + ECS_USER_ID = "id" + ECS_USER_NAME = "name" + + ECS_THREAT_FRAMEWORK = "framework" + ECS_THREAT_TECHNIQUE_ID = "id" + + ECS_TAGS = "tags" +) + +// ECS kind values. +const ( + ECS_KIND_ALERT = "alert" + ECS_KIND_EVENT = "event" +) + +// ECS category values. +const ( + ECS_CAT_DIR = "directory" + ECS_CAT_FILE = "file" + ECS_CAT_NETWORK = "network" + ECS_CAT_PROCESS = "process" + ECS_CAT_ORCH = "orchestration" +) + +// ECS type values. +const ( + ECS_TYPE_ACCESS = "access" + ECS_TYPE_CHANGE = "change" + ECS_TYPE_CONNECTION = "connection" + ECS_TYPE_CREATE = "creation" + ECS_TYPE_DELETE = "deletion" + ECS_TYPE_START = "start" + ECS_TYPE_EXIT = "exit" + ECS_TYPE_TSTART = "thread-start" + ECS_TYPE_TEXIT = "thread-exit" + ECS_TYPE_ERROR = "error" + ECS_TYPE_INFO = "info" +) + +// ECS action suffixes that differ from ECS types. +// Action values are typically - or - +const ( + ECS_ACTION_READ = "read" + ECS_ACTION_WRITE = "write" + ECS_ACTION_CREATE = "create" + ECS_ACTION_DELETE = "delete" + ECS_ACTION_LINK = "link" + ECS_ACTION_RENAME = "rename" + ECS_ACTION_TRAFFIC = "connection-traffic" +) diff --git a/core/exporter/encoders/encoder.go b/core/exporter/encoders/encoder.go new file mode 100644 index 00000000..f8e1e1e0 --- /dev/null +++ b/core/exporter/encoders/encoder.go @@ -0,0 +1,36 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +import ( + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +// Encoder converts and serializes a record into export data. +type Encoder interface { + Register(codecs map[commons.Format]EncoderFactory) + Encode(recs []*flatrecord.Record) ([]commons.EncodedData, error) + Cleanup() +} + +// EncoderFactory defines a factory type for record encoders. +type EncoderFactory func(commons.Config) Encoder diff --git a/core/exporter/encoders/json.go b/core/exporter/encoders/json.go new file mode 100644 index 00000000..1f3fe690 --- /dev/null +++ b/core/exporter/encoders/json.go @@ -0,0 +1,761 @@ +// +// Copyright (C) 2021 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// Andreas Schade +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +import ( + "path/filepath" + "reflect" + "strings" + "unicode/utf8" + + "github.com/mailru/easyjson/jwriter" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/utils" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +// JSONEncoder is a JSON encoder. +type JSONEncoder struct { + config commons.Config + fieldCache []*flatrecord.FieldValue + writer *jwriter.Writer + buf []byte + batch []commons.EncodedData +} + +// NewJSONEncoder instantiates a JSON encoder. +func NewJSONEncoder(config commons.Config) Encoder { + return &JSONEncoder{ + fieldCache: flatrecord.FieldValues, + config: config, + writer: &jwriter.Writer{}, + buf: make([]byte, 0, BUFFER_SIZE), + batch: make([]commons.EncodedData, 0, config.EventBuffer)} +} + +// Register registers the encoder to the codecs cache. +func (t *JSONEncoder) Register(codecs map[commons.Format]EncoderFactory) { + codecs[commons.JSONFormat] = NewJSONEncoder +} + +// Encode encodes telemetry records into a JSON representation. +func (t *JSONEncoder) Encode(recs []*flatrecord.Record) (data []commons.EncodedData, err error) { + t.batch = t.batch[:0] + for _, rec := range recs { + var j commons.EncodedData + if j, err = t.encode(rec); err != nil { + return nil, err + } + t.batch = append(t.batch, j) + } + return t.batch, nil +} + +// Encodes a telemetry record into a JSON representation. +func (t *JSONEncoder) encode(rec *flatrecord.Record) (commons.EncodedData, error) { + t.writer.RawString(VERSION_STR) + t.writer.RawString(t.config.JSONSchemaVersion) + t.writer.RawByte(COMMA) + state := BEGIN_STATE + sftype := flatrecord.Mapper.MapStr(flatrecord.SF_TYPE)(rec) + + pprocID := flatrecord.Mapper.MapInt(flatrecord.SF_PPROC_PID)(rec) + pprocExists := !reflect.ValueOf(pprocID).IsZero() + ct := flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_ID)(rec) + ctExists := !reflect.ValueOf(ct).IsZero() + pd := flatrecord.Mapper.MapStr(flatrecord.SF_POD_ID)(rec) + pdExists := !reflect.ValueOf(pd).IsZero() + existed := true + + for _, fv := range t.fieldCache { + numFields := len(fv.FieldSects) + if numFields == 2 { + t.writeAttribute(fv, 1, rec) + t.writer.RawByte(COMMA) + } else if numFields == 3 { + if sftype == sfgo.TyKEStr { + switch fv.Entry.Section { + case flatrecord.SectK8sEvt: + if state != KE_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + existed = true + t.writeSectionBegin(KE) + t.writeAttribute(fv, 2, rec) + state = KE_STATE + } else { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectNode: + if state != NODE_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + existed = true + t.writeSectionBegin(NODE) + t.writeAttribute(fv, 2, rec) + state = NODE_STATE + } else { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectMeta: + if state != META_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + existed = true + t.writeSectionBegin(META) + t.writeAttribute(fv, 2, rec) + state = META_STATE + } else { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + } + continue + } + + switch fv.Entry.Section { + case flatrecord.SectProc: + if state != PROC_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + existed = true + t.writeSectionBegin(PROC) + t.writeAttribute(fv, 2, rec) + state = PROC_STATE + } else { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectPProc: + if state != PPROC_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + if pprocExists { + existed = true + t.writeSectionBegin(PPROC) + t.writeAttribute(fv, 2, rec) + } else { + existed = false + } + state = PPROC_STATE + } else if pprocExists { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectNet: + if state != NET_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + if sftype == sfgo.TyNFStr { + t.writeSectionBegin(NET) + t.writeAttribute(fv, 2, rec) + existed = true + } else { + existed = false + } + state = NET_STATE + } else if sftype == sfgo.TyNFStr { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectFile: + if state != FILE_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + if sftype == sfgo.TyFFStr || sftype == sfgo.TyFEStr { + t.writeSectionBegin(FILEF) + t.writeAttribute(fv, 2, rec) + existed = true + } else { + existed = false + } + state = FILE_STATE + } else if sftype == sfgo.TyFFStr || sftype == sfgo.TyFEStr { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectFlow: + if state != FLOW_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + if sftype == sfgo.TyFFStr || sftype == sfgo.TyNFStr { + t.writeSectionBegin(FLOW) + t.writeAttribute(fv, 2, rec) + existed = true + } else { + existed = false + } + state = FLOW_STATE + } else if sftype == sfgo.TyFFStr || sftype == sfgo.TyNFStr { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectCont: + if state != CONT_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + if ctExists { + t.writeSectionBegin(CONTAINER) + t.writeAttribute(fv, 2, rec) + existed = true + } else { + existed = false + } + state = CONT_STATE + } else if ctExists { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectPod: + if state != POD_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + if pdExists { + t.writeSectionBegin(POD) + t.writeAttribute(fv, 2, rec) + existed = true + } else { + existed = false + } + state = POD_STATE + } else if pdExists { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectNode: + if state != NODE_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + existed = true + t.writeSectionBegin(NODE) + t.writeAttribute(fv, 2, rec) + state = NODE_STATE + } else { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + case flatrecord.SectMeta: + if state != META_STATE { + if state != BEGIN_STATE && existed { + t.writer.RawString(END_CURLY_COMMA) + } + existed = true + t.writeSectionBegin(META) + t.writeAttribute(fv, 2, rec) + state = META_STATE + } else { + t.writer.RawByte(COMMA) + t.writeAttribute(fv, 2, rec) + } + } + } + } + t.writer.RawByte(END_CURLY) + + // Encode policies + numRules := len(rec.Ctx.GetRules()) + rtags := make([]string, 0) + if numRules > 0 { + t.writer.RawString(POLICIES) + for num, r := range rec.Ctx.GetRules() { + t.writer.RawString(ID_TAG) + t.writer.String(r.Name) + t.writer.RawString(DESC) + t.writer.String(r.Desc) + t.writer.RawString(PRIORITY) + t.writer.Int64(int64(r.Priority)) + t.writer.RawByte(END_CURLY) + if num < (numRules - 1) { + t.writer.RawByte(COMMA) + } + + for _, tag := range r.Tags { + switch tag := tag.(type) { + case []string: + rtags = append(rtags, tag...) + default: + rtags = append(rtags, tag.(string)) + } + } + } + t.writer.RawByte(END_SQUARE) + } + + // Encode tags as a list of record tag context plus all rule tags + numTags := len(rtags) + len(rec.Ctx.GetTags()) + if numTags > 0 { + currentTag := 0 + t.writer.RawString(TAGS) + for _, tag := range rec.Ctx.GetTags() { + t.writer.String(tag) + if currentTag < (numTags - 1) { + t.writer.RawByte(COMMA) + } + currentTag++ + } + for _, tag := range rtags { + t.writer.String(tag) + if currentTag < (numTags - 1) { + t.writer.RawByte(COMMA) + } + currentTag++ + } + t.writer.RawByte(END_SQUARE) + } + t.writer.RawByte(END_CURLY) + + // BuildBytes returns writer data as a single byte slice. It tries to reuse buf. + //return t.writer.BuildBytes(t.buf) + return t.writer.BuildBytes() +} + +func (t *JSONEncoder) writeAttribute(fv *flatrecord.FieldValue, fieldID int, rec *flatrecord.Record) { + t.writer.RawByte(DOUBLE_QUOTE) + name := fv.FieldSects[fieldID] + if strings.HasSuffix(name, "+") { + t.writer.RawString(name[:len(name)-1]) + } else { + t.writer.RawString(name) + } + t.writer.RawString(QUOTE_COLON) + MapJSON(fv, t.writer, rec) +} + +func (t *JSONEncoder) writeSectionBegin(section string) { + t.writer.RawByte(DOUBLE_QUOTE) + t.writer.RawString(section) + t.writer.RawString(QUOTE_COLON_CURLY) +} + +func mapOpFlags(fv *flatrecord.FieldValue, writer *jwriter.Writer, r *flatrecord.Record) { + opflags := r.GetInt(fv.Entry.FlatIndex, fv.Entry.Source) + rtype, _ := sfgo.ParseRecordType(r.GetInt(sfgo.SF_REC_TYPE, fv.Entry.Source)) + flags := sfgo.GetOpFlags(int32(opflags), rtype) + mapStrArray(writer, flags) +} + +func mapStrArray(writer *jwriter.Writer, ss []string) { + l := len(ss) + writer.RawByte(BEGIN_SQUARE) + for idx, s := range ss { + writer.RawByte(DOUBLE_QUOTE) + writer.RawString(s) + writer.RawByte(DOUBLE_QUOTE) + if idx < (l - 1) { + writer.RawByte(COMMA) + } + } + writer.RawByte(END_SQUARE) + +} + +func mapIPStr(ip int64, w *jwriter.Writer) { + w.Int64(ip >> 0 & 0xFF) + w.RawByte(PERIOD) + w.Int64(ip >> 8 & 0xFF) + w.RawByte(PERIOD) + w.Int64(ip >> 16 & 0xFF) + w.RawByte(PERIOD) + w.Int64(ip >> 24 & 0xFF) +} + +func mapIPs(fv *flatrecord.FieldValue, writer *jwriter.Writer, r *flatrecord.Record) { + srcIP := r.GetInt(sfgo.FL_NETW_SIP_INT, fv.Entry.Source) + dstIP := r.GetInt(sfgo.FL_NETW_DIP_INT, fv.Entry.Source) + writer.RawByte(BEGIN_SQUARE) + writer.RawByte(DOUBLE_QUOTE) + mapIPStr(srcIP, writer) + writer.RawByte(DOUBLE_QUOTE) + writer.RawByte(COMMA) + writer.RawByte(DOUBLE_QUOTE) + mapIPStr(dstIP, writer) + writer.RawByte(DOUBLE_QUOTE) + writer.RawByte(END_SQUARE) +} + +func mapIPArray(ips *[]int64, writer *jwriter.Writer) { + writer.RawByte(BEGIN_SQUARE) + for _, ip := range *ips { + writer.RawByte(DOUBLE_QUOTE) + mapIPStr(ip, writer) + writer.RawByte(DOUBLE_QUOTE) + } + writer.RawByte(END_SQUARE) +} + +func mapOpenFlags(fv *flatrecord.FieldValue, writer *jwriter.Writer, r *flatrecord.Record) { + flags := sfgo.GetOpenFlags(r.GetInt(fv.Entry.FlatIndex, fv.Entry.Source)) + mapStrArray(writer, flags) +} + +func mapPorts(fv *flatrecord.FieldValue, writer *jwriter.Writer, r *flatrecord.Record) { + srcPort := r.GetInt(sfgo.FL_NETW_SPORT_INT, fv.Entry.Source) + dstPort := r.GetInt(sfgo.FL_NETW_DPORT_INT, fv.Entry.Source) + writer.RawByte(BEGIN_SQUARE) + writer.Int64(srcPort) + writer.RawByte(COMMA) + writer.Int64(dstPort) + writer.RawByte(END_SQUARE) +} + +func writeStrField(writer *jwriter.Writer, name string, val string) { + writer.RawByte(DOUBLE_QUOTE) + writer.RawString(name) + writer.RawString(QUOTE_COLON) + writer.String(val) +} + +func writeIntField(writer *jwriter.Writer, name string, val int32) { + writer.RawByte(DOUBLE_QUOTE) + writer.RawString(name) + writer.RawString(QUOTE_COLON) + writer.Int32(val) +} + +func writeIntArrayField(writer *jwriter.Writer, name string, val *[]int64) { + writer.RawByte(DOUBLE_QUOTE) + writer.RawString(name) + writer.RawString(QUOTE_COLON) + mapIPArray(val, writer) +} + +func mapPortList(writer *jwriter.Writer, ports *[]*sfgo.Port) { + writer.RawByte(DOUBLE_QUOTE) + writer.RawString("ports") + writer.RawString(QUOTE_COLON) + writer.RawByte(BEGIN_SQUARE) + for i, p := range *ports { + writer.RawByte(BEGIN_CURLY) + writeIntField(writer, "port", p.Port) + writer.RawByte(COMMA) + writeIntField(writer, "targetport", p.TargetPort) + writer.RawByte(COMMA) + writeIntField(writer, "nodeport", p.NodePort) + writer.RawByte(COMMA) + writeStrField(writer, "proto", p.Proto) + if (i + 1) < len(*ports) { + writer.RawString(END_CURLY_COMMA) + } else { + writer.RawByte(END_CURLY) + } + } + writer.RawByte(END_SQUARE) +} + +func mapSvcArray(fv *flatrecord.FieldValue, writer *jwriter.Writer, r *flatrecord.Record) { + writer.RawByte(BEGIN_SQUARE) + for _, s := range *r.GetSvcArray(fv.Entry.FlatIndex, fv.Entry.Source) { + writer.RawByte('{') + writeStrField(writer, "id", s.Id) + writer.RawByte(COMMA) + writeStrField(writer, "name", s.Name) + writer.RawByte(COMMA) + writeStrField(writer, "namespace", s.Namespace) + writer.RawByte(COMMA) + writeIntArrayField(writer, "clusterIP", &s.ClusterIP) + writer.RawByte(COMMA) + mapPortList(writer, &s.PortList) + writer.RawByte(END_CURLY) + } + writer.RawByte(END_SQUARE) +} + +// MapJSON writes a SysFlow attribute to a JSON stream. +func MapJSON(fv *flatrecord.FieldValue, writer *jwriter.Writer, r *flatrecord.Record) { + switch fv.Entry.FlatIndex { + case flatrecord.A_IDS, flatrecord.PARENT_IDS: + oid := sfgo.OID{CreateTS: r.GetInt(sfgo.PROC_OID_CREATETS_INT, fv.Entry.Source), Hpid: r.GetInt(sfgo.PROC_OID_HPID_INT, fv.Entry.Source)} + setCachedValueToJSON(r, oid, fv.Entry.AuxAttr, writer) + return + } + switch fv.Entry.Type { + case flatrecord.MapStrVal: + v := r.GetStr(fv.Entry.FlatIndex, fv.Entry.Source) + writer.String(utils.TrimBoundingQuotes(v)) + case flatrecord.MapIntVal: + writer.Int64(r.GetInt(fv.Entry.FlatIndex, fv.Entry.Source)) + case flatrecord.MapBoolVal: + writer.Bool(r.GetInt(fv.Entry.FlatIndex, fv.Entry.Source) == 1) + case flatrecord.MapSpecialStr: + v := fv.Entry.Map(r).(string) + writer.String(utils.TrimBoundingQuotes(v)) + case flatrecord.MapSpecialInt: + writer.Int64(fv.Entry.Map(r).(int64)) + case flatrecord.MapSpecialBool: + writer.Bool(fv.Entry.Map(r).(bool)) + case flatrecord.MapArrayStr, flatrecord.MapArrayInt: + if fv.Entry.Source == sfgo.SYSFLOW_SRC { + switch fv.Entry.FlatIndex { + case sfgo.EV_PROC_OPFLAGS_INT: + mapOpFlags(fv, writer, r) + return + case sfgo.FL_FILE_OPENFLAGS_INT: + recType := r.GetInt(sfgo.SF_REC_TYPE, fv.Entry.Source) + if recType == sfgo.NET_FLOW { + mapIPs(fv, writer, r) + return + } + mapOpenFlags(fv, writer, r) + return + case sfgo.FL_NETW_SPORT_INT: + mapPorts(fv, writer, r) + return + case sfgo.POD_HOSTIP_ANY, sfgo.POD_INTERNALIP_ANY: + ips := r.GetIntArray(fv.Entry.FlatIndex, fv.Entry.Source) + mapIPArray(ips, writer) + return + + } + } + v := fv.Entry.Map(r).(string) + writer.RawByte(BEGIN_SQUARE) + writer.String(v) + writer.RawByte(END_SQUARE) + case flatrecord.MapArraySvc: + mapSvcArray(fv, writer, r) + } +} + +// setCachedValueToJSON sets the value of attr from cache for process ID to a JSON writer. +func setCachedValueToJSON(r *flatrecord.Record, ID sfgo.OID, attr flatrecord.RecAttribute, writer *jwriter.Writer) { + if ptree := r.Fr.Ptree; ptree != nil { + switch attr { + case flatrecord.PProcName: + if len(ptree) > 1 { + writer.String(utils.TrimBoundingQuotes(filepath.Base(ptree[1].Exe))) + } else { + writer.String(EMPTY_STRING) + } + case flatrecord.PProcExe: + if len(ptree) > 1 { + writer.String(utils.TrimBoundingQuotes(ptree[1].Exe)) + } else { + writer.String(EMPTY_STRING) + } + case flatrecord.PProcArgs: + if len(ptree) > 1 { + writer.String(utils.TrimBoundingQuotes(ptree[1].ExeArgs)) + } else { + writer.String(EMPTY_STRING) + } + case flatrecord.PProcUID: + if len(ptree) > 1 { + writer.Int64(int64(ptree[1].Uid)) + } else { + writer.Int64(sfgo.Zeros.Int64) + } + case flatrecord.PProcUser: + if len(ptree) > 1 { + writer.String(utils.TrimBoundingQuotes(ptree[1].UserName)) + } else { + writer.String(EMPTY_STRING) + } + case flatrecord.PProcGID: + if len(ptree) > 1 { + writer.Int64(int64(ptree[1].Gid)) + } else { + writer.Int64(sfgo.Zeros.Int64) + } + case flatrecord.PProcGroup: + if len(ptree) > 1 { + writer.String(utils.TrimBoundingQuotes(ptree[1].GroupName)) + } else { + writer.String(EMPTY_STRING) + } + case flatrecord.PProcTTY: + if len(ptree) > 1 { + writer.Bool(ptree[1].Tty) + } else { + writer.Bool(false) + } + case flatrecord.PProcEntry: + if len(ptree) > 1 { + writer.Bool(ptree[1].Entry) + } else { + writer.Bool(false) + } + case flatrecord.PProcCmdLine: + if len(ptree) > 1 { + exe := utils.TrimBoundingQuotes(ptree[1].Exe) + exeArgs := utils.TrimBoundingQuotes(ptree[1].ExeArgs) + writer.RawByte(DOUBLE_QUOTE) + stringNoQuotes(exe, writer) + if len(exeArgs) > 0 { + writer.RawByte(SPACE) + stringNoQuotes(exeArgs, writer) + } + writer.RawByte(DOUBLE_QUOTE) + } else { + writer.String(EMPTY_STRING) + } + case flatrecord.ProcAName: + l := len(ptree) + writer.RawByte(BEGIN_SQUARE) + for i, p := range ptree { + writer.String(utils.TrimBoundingQuotes(filepath.Base(p.Exe))) + if i < (l - 1) { + writer.RawByte(COMMA) + } + } + writer.RawByte(END_SQUARE) + case flatrecord.ProcAExe: + l := len(ptree) + writer.RawByte(BEGIN_SQUARE) + for i, p := range ptree { + writer.String(utils.TrimBoundingQuotes(p.Exe)) + if i < (l - 1) { + writer.RawByte(COMMA) + } + } + writer.RawByte(END_SQUARE) + case flatrecord.ProcACmdLine: + l := len(ptree) + writer.RawByte(BEGIN_SQUARE) + for i, p := range ptree { + exe := utils.TrimBoundingQuotes(p.Exe) + exeArgs := utils.TrimBoundingQuotes(p.ExeArgs) + writer.RawByte(DOUBLE_QUOTE) + stringNoQuotes(exe, writer) + if len(exeArgs) > 0 { + writer.RawByte(SPACE) + stringNoQuotes(exeArgs, writer) + } + writer.RawByte(DOUBLE_QUOTE) + if i < (l - 1) { + writer.RawByte(COMMA) + } + } + writer.RawByte(END_SQUARE) + case flatrecord.ProcAPID: + l := len(ptree) + writer.RawByte(BEGIN_SQUARE) + for i, p := range ptree { + writer.Int64(p.Oid.Hpid) + if i < (l - 1) { + writer.RawByte(COMMA) + } + } + writer.RawByte(END_SQUARE) + } + } +} + +// code taken from github.com/mailru/easyjson/jwriter to support string encoding. +// original version prepends quotes around strings, this doesn't. +func getTable(falseValues ...int) [128]bool { + table := [128]bool{} + for i := 0; i < 128; i++ { + table[i] = true + } + for _, v := range falseValues { + table[v] = false + } + return table +} + +var ( + htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') + htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') +) + +// stringNoQuotes writes an escaped string with a JSON writer. Adapted from github.com/mailru/easyjson/jwriter. +func stringNoQuotes(s string, w *jwriter.Writer) { + p := 0 // last non-escape symbol + + escapeTable := &htmlEscapeTable + if w.NoEscapeHTML { + escapeTable = &htmlNoEscapeTable + } + + for i := 0; i < len(s); { + c := s[i] + + if c < utf8.RuneSelf { + if escapeTable[c] { + // single-width character, no escaping is required + i++ + continue + } + + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) +} + +// Cleanup cleans up resources. +func (t *JSONEncoder) Cleanup() {} diff --git a/core/exporter/encoders/jsonconstants.go b/core/exporter/encoders/jsonconstants.go new file mode 100644 index 00000000..7dd5ac67 --- /dev/null +++ b/core/exporter/encoders/jsonconstants.go @@ -0,0 +1,51 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +// Exporter constants +const ( + BUFFER_SIZE = 10240 +) + +// JSON schema constants +const ( + VERSION_STR = "{\"" + VERSION_ATTR + "\":" + GROUP_ID = "{\"" + GROUP_ID_ATTR + "\":\"" + COMMA = ',' + DOUBLE_QUOTE = '"' + QUOTE_COLON = "\":" + QUOTE_COLON_CURLY = "\":{" + BEGIN_CURLY = '{' + END_CURLY_COMMA = "}," + END_CURLY = '}' + END_SQUARE = ']' + BEGIN_SQUARE = '[' + SPACE = ' ' + POLICIES = ",\"" + POLICIES_ATTR + "\":[" + ID_TAG = "{\"" + ID_TAG_ATTR + "\":" + DESC = ",\"" + DESC_ATTR + "\":" + PRIORITY = ",\"" + PRIORITY_ATTR + "\":" + TAGS = ",\"" + TAGS_ATTR + "\":[" + PERIOD = '.' + EMPTY_STRING = "\"\"" +) + +const chars = "0123456789abcdef" diff --git a/core/exporter/encoders/occurrence.go b/core/exporter/encoders/occurrence.go new file mode 100644 index 00000000..70f27941 --- /dev/null +++ b/core/exporter/encoders/occurrence.go @@ -0,0 +1,489 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +import ( + "encoding/json" + "errors" + "fmt" + "hash" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/linkedin/goavro" + cmap "github.com/orcaman/concurrent-map" + "github.com/steakknife/bloomfilter" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/encoders/avro/occurrence/event" + "github.com/sysflow-telemetry/sf-processor/core/exporter/utils" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +// EventPool contains an event slice with metadata annotations. +type EventPool struct { + CID string + Events []*Event + Filter *bloomfilter.Filter + RuleTypes *utils.Set + TopSeverity Severity + LastFlushTime time.Time + encTs int64 + epw *EventPoolWriter +} + +// NewEventPool creates a new EventPool instace. +func NewEventPool(cid string, ts int64) (ep *EventPool, err error) { + bf, err := bloomfilter.NewOptimal(maxElements, probCollide) + if err != nil { + return + } + return &EventPool{CID: cid, Filter: bf, RuleTypes: utils.NewSet(), TopSeverity: SeverityLow, encTs: ts}, nil +} + +// State returns a tuple summarizing the state of the event pool. +func (ep *EventPool) State() (int, Severity) { + return ep.RuleTypes.Len(), ep.TopSeverity +} + +// Aged checks if event pool has aged. +func (ep *EventPool) Aged(maxAge int) bool { + return time.Since(ep.LastFlushTime).Minutes() > float64(maxAge) +} + +// ReachedCapacity indicates whether the pool has reached its configured event capacity. +func (ep *EventPool) ReachedCapacity(capacity int) bool { + return len(ep.Events) >= capacity +} + +// Flush writes off event slice. +func (ep *EventPool) Flush(pathPrefix string, s3Prefix string, clusterID string) (err error) { + var events []interface{} + for _, v := range ep.Events { + exportPath := fmt.Sprintf("%s/%s", pathPrefix, v.getExportFilePath(s3Prefix, clusterID, ep.encTs)) + if err = ep.UpdateEventPoolWriter(exportPath, v.Schema()); err != nil { + return + } + var m map[string]interface{} + s, _ := json.Marshal(v.Event) + json.Unmarshal(s, &m) + events = append(events, m) + } + if len(events) > 0 && ep.epw != nil { + if err = ep.epw.Append(events); err != nil { + return + } + ep.epw.fw.Sync() + } + ep.Events = nil + ep.LastFlushTime = time.Now() + return +} + +// Reset clears event slice and resets sketch counters and filter. +func (ep *EventPool) Reset() (err error) { + bf, err := bloomfilter.NewOptimal(maxElements, probCollide) + if err != nil { + return + } + ep.Events = nil + ep.Filter = bf + ep.RuleTypes = utils.NewSet() + ep.TopSeverity = SeverityLow + ep.LastFlushTime = time.Now() + return +} + +// UpdateEventPoolWriter updates the EventPoolWriter for exportPath. +// It reuses the current EventPoolWriter if already point to the given exportPath. +// Otherwise, it creates a new OCF writer and the export directory structure if not present. +func (ep *EventPool) UpdateEventPoolWriter(exportPath string, schema string) (err error) { + if ep.epw == nil { + ep.epw = new(EventPoolWriter) + } + if exportPath != ep.epw.currentExportPath { + dir := path.Dir(exportPath) + if _, err = os.Stat(dir); os.IsNotExist(err) { + err = os.MkdirAll(dir, 0755) + if err != nil { + return + } + } + if err = ep.epw.UpdateOCFWriter(exportPath, schema); err != nil { + return + } + } + // sanity check for cached OCF writer + if ep.epw.ocfw == nil { + return errors.New("EventPoolWriter's OCF file writer should not be null") + } + return +} + +// EventPoolWriter is an EventPool writer. +type EventPoolWriter struct { + currentExportPath string + fw *os.File + codec *goavro.Codec + ocfw *goavro.OCFWriter +} + +// UpdateOCFWriter creates a new OCF writer. +func (epw *EventPoolWriter) UpdateOCFWriter(exportPath string, schema string) (err error) { + // close the current file writer before creating a new one + if epw.fw != nil { + epw.fw.Close() + } + epw.currentExportPath = exportPath + epw.fw, err = os.OpenFile(epw.currentExportPath, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return + } + if epw.codec == nil { + epw.codec, err = goavro.NewCodec(schema) + if err != nil { + logger.Error.Println(err) + return + } + } + epw.ocfw, err = goavro.NewOCFWriter(goavro.OCFConfig{ + W: epw.fw, + Codec: epw.codec, + CompressionName: "snappy", + }) + return +} + +// Append appends an event slice to the event pool writer. +func (epw *EventPoolWriter) Append(events []interface{}) error { + if epw.ocfw != nil { + return epw.ocfw.Append(events) + } + return errors.New("trying to append events using a null OCF file writer reference") +} + +// Cleanup closes the event pool writer file writer. +func (epw *EventPoolWriter) Cleanup() error { + return epw.fw.Close() +} + +// Event is an event associated with an occurrence, used as context for the occurrence. +type Event struct { + *event.Event + Record *flatrecord.Record +} + +// getExportFileName returns the name of the file where the event should be exported. +func (e *Event) getExportFileName() string { + if e.ContainerID == sfgo.Zeros.String { + return hostFileName + } + return e.ContainerID +} + +// getExportFilePath builds the export file path for the event. +func (e *Event) getExportFilePath(prefix string, clusterID string, encTs int64) string { + y, m, d := e.getTimePartitions() + path := fmt.Sprintf("%d/%d/%d/%s_%d.avro", y, m, d, e.getExportFileName(), encTs) + return e.prependEnvPath(prefix, clusterID, path) +} + +// getEnvDescription builds the environment meta description for the event. +func (e *Event) getEnvDescription(prefix string, clusterID string) (path string) { + path = e.prependEnvPath(prefix, clusterID, path) + return strings.ReplaceAll(path, "/", vLine) +} + +// prependEnvPath prepends environment meta path to path. +func (e *Event) prependEnvPath(prefix string, clusterID string, path string) string { + if e.NodeIP != sfgo.Zeros.String && e.NodeIP != NA { + path = filepath.Join(e.NodeIP, path) + } + if e.NodeID != sfgo.Zeros.String && e.NodeID != NA && e.NodeID != e.NodeIP { + path = filepath.Join(e.NodeID, path) + } + if clusterID != sfgo.Zeros.String { + path = filepath.Join(clusterID, path) + } + if prefix != sfgo.Zeros.String { + path = filepath.Join(prefix, path) + } + return path +} + +// getTimePartitions obtains time partitions from timestamp. +func (e *Event) getTimePartitions() (year int, month int, day int) { + timeStamp := time.Unix(0, e.Ts) + return timeStamp.Year(), int(timeStamp.Month()), timeStamp.Day() +} + +// Occurrence object for IBM Findings API. +type Occurrence struct { + ID string + ShortDescr string + LongDescr string + Severity Severity + Certainty Certainty + ResType string + ResName string + AlertQuery string +} + +// NoteID returns the occurence note ID based on the occurrence's severity. +func (occ *Occurrence) NoteID() string { + if occ.Severity < SeverityHigh { + return NOTIFICATION + } + return OFFENSE +} + +// OccurrenceEncoder is an encoder for IBM Findings' occurrences. +type OccurrenceEncoder struct { + config commons.Config + exportCache cmap.ConcurrentMap + batch []commons.EncodedData + ts int64 +} + +// NewOccurrenceEncoder creates a new Occurrence encoder. +func NewOccurrenceEncoder(config commons.Config) Encoder { + return &OccurrenceEncoder{ + config: config, + exportCache: cmap.New(), + batch: make([]commons.EncodedData, 0, config.EventBuffer), + ts: time.Now().Unix()} +} + +// Register registers the encoder to the codecs cache. +func (oe *OccurrenceEncoder) Register(codecs map[commons.Format]EncoderFactory) { + codecs[commons.OccurrenceFormat] = NewOccurrenceEncoder +} + +// Encodes a telemetry record into an occurrence representation. +func (oe *OccurrenceEncoder) encode(rec *flatrecord.Record) (data commons.EncodedData, err error) { + if e, ep, alert := oe.addEvent(rec); alert { + data = oe.createOccurrence(e, ep) + } + return +} + +// Encode encodes telemetry records into an occurrence representation. +func (oe *OccurrenceEncoder) Encode(recs []*flatrecord.Record) ([]commons.EncodedData, error) { + oe.batch = oe.batch[:0] + for _, r := range recs { + if data, _ := oe.encode(r); data != nil { + oe.batch = append(oe.batch, data) + } + } + return oe.batch, nil +} + +// addEvent adds a record to export queue. +func (oe *OccurrenceEncoder) addEvent(r *flatrecord.Record) (e *Event, ep *EventPool, alert bool) { + cid := flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_ID)(r) + ep = oe.getEventPool(cid) + + // record the event pool state prior to adding a new event + rco, so := ep.State() + + // encode and add event to event pool + e = oe.encodeEvent(r) + ep.Events = append(ep.Events, e) + for _, r := range r.Ctx.GetRules() { + ep.RuleTypes.Add(r.Name) + ep.TopSeverity = Severity(utils.Max(int(ep.TopSeverity), int(r.Priority))) + } + + // check if a semantically equivalent record has been seen before + h := oe.semanticHash(r) + if !ep.Filter.Contains(h) { + ep.Filter.Add(h) + alert = true + } + + // check for state changes in the pool after adding the event + rc, s := ep.State() + if rco != rc || so != s { + alert = true + } + + // write events out if + // (1) an occurrence is generated for the current event, or + // (2) the event pool has reached its configured capacity, or + // (3) the event pool has aged. + full := ep.ReachedCapacity(oe.config.FindingsPoolCapacity) + aged := ep.Aged(oe.config.FindingsPoolMaxAge) + if alert || full || aged { + if err := ep.Flush(oe.config.FindingsPath, oe.config.FindingsS3Prefix, oe.config.ClusterID); err != nil { + logger.Error.Println(err) + } + if aged { + ep.Reset() + } + } + + return +} + +// getEventPool retrieves container event pool from cache, or create one if absent. +func (oe *OccurrenceEncoder) getEventPool(cid string) *EventPool { + m := oe.exportCache + var ep *EventPool + if v, ok := m.Get(cid); ok { + ep = v.(*EventPool) + } else { + ep, _ = NewEventPool(cid, oe.ts) + m.Set(cid, ep) + } + return ep +} + +// createOccurrence creates a new Occurence object. +func (oe *OccurrenceEncoder) createOccurrence(e *Event, ep *EventPool) *Occurrence { + oc := new(Occurrence) + oc.Certainty = CertaintyMedium + oc.ID = fmt.Sprintf(noteIDStrFmt, ep.CID, time.Now().UTC().UnixNano()/1000) + envStr := e.getEnvDescription(oe.config.FindingsS3Prefix, oe.config.ClusterID) + if ep.CID != sfgo.Zeros.String { + oc.ResName = fmt.Sprintf("%s:%s [%s]", ep.CID, flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_NAME)(e.Record), envStr) + oc.ResType = flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_TYPE)(e.Record) + } else { + oc.ResName = fmt.Sprintf("%s [%s]", hostType, envStr) + oc.ResType = hostType + } + rnames, tags, severity := oe.summarizePolicy(e.Record) + oc.Severity = severity + polStr := fmt.Sprintf(policiesStrFmt, strings.Join(rnames, listSep)) + tagsStr := fmt.Sprintf(tagsStrFmt, strings.Join(tags, listSep)) + var detStr string + switch e.Record.GetInt(sfgo.SF_REC_TYPE, sfgo.SYSFLOW_SRC) { + case sfgo.PROC_EVT: + proc := flatrecord.Mapper.MapStr(flatrecord.SF_PROC_CMDLINE)(e.Record) + pproc := flatrecord.Mapper.MapStr(flatrecord.SF_PPROC_CMDLINE)(e.Record) + detStr = fmt.Sprintf(peStrFmt, pproc, proc) + case sfgo.FILE_EVT: + proc := flatrecord.Mapper.MapStr(flatrecord.SF_PROC_CMDLINE)(e.Record) + path := oe.formatResource(e.Record) + detStr = fmt.Sprintf(feStrFmt, proc, path) + case sfgo.FILE_FLOW: + proc := flatrecord.Mapper.MapStr(flatrecord.SF_PROC_CMDLINE)(e.Record) + path := oe.formatResource(e.Record) + detStr = fmt.Sprintf(ffStrFmt, proc, path) + case sfgo.NET_FLOW: + proc := flatrecord.Mapper.MapStr(flatrecord.SF_PROC_CMDLINE)(e.Record) + conn := oe.formatResource(e.Record) + detStr = fmt.Sprintf(nfStrFmt, proc, conn) + } + // sanitizes details string to avoid being flagged by tools like CloudFlare + encDetStr := strings.ReplaceAll(detStr, "/", fwdSlash) + shortDescr := defaultShortDescr + if len(rnames) > 0 { + shortDescr = strings.ReplaceAll(rnames[0], "/", fwdSlash) + if len(rnames) > 1 { + shortDescr = shortDescr + " (+)" + } + } + oc.ShortDescr = shortDescr + oc.LongDescr = fmt.Sprintf(detailsStrFmt, encDetStr, polStr, tagsStr) + oc.AlertQuery = fmt.Sprintf(sqlQueryStrFmt, oe.config.FindingsS3Region, oe.config.FindingsS3Bucket, + e.getExportFilePath(oe.config.FindingsS3Prefix, oe.config.ClusterID, ep.encTs), oe.config.FindingsS3Region, oe.config.FindingsS3Bucket) + return oc +} + +// summarizePolicy extracts a summary of rules applied to a record. +func (oe *OccurrenceEncoder) summarizePolicy(r *flatrecord.Record) (rnames []string, tags []string, severity Severity) { + tags = append(tags, r.Ctx.GetTags()...) + for _, r := range r.Ctx.GetRules() { + rnames = append(rnames, r.Name) + severity = Severity(utils.Max(int(severity), int(r.Priority))) + for _, tag := range r.Tags { + switch tag := tag.(type) { + case []string: + tags = append(tags, tag...) + default: + tags = append(tags, tag.(string)) + } + } + } + return +} + +// encodeEvent maps a record into an event that can be associated with an occurrence. +func (oe *OccurrenceEncoder) encodeEvent(r *flatrecord.Record) *Event { + rnames, tags, severity := oe.summarizePolicy(r) + e := &Event{Record: r, Event: event.NewEvent()} + e.Ts = flatrecord.Mapper.MapInt(flatrecord.SF_TS)(r) + e.Description = strings.Join(rnames, listSep) + e.Severity = severity.String() + e.ClusterID = oe.config.ClusterID + e.NodeID = flatrecord.Mapper.MapStr(flatrecord.SF_NODE_ID)(r) + e.NodeIP = flatrecord.Mapper.MapStr(flatrecord.SF_NODE_IP)(r) + e.ContainerID = flatrecord.Mapper.MapStr(flatrecord.SF_CONTAINER_ID)(r) + e.RecordType = flatrecord.Mapper.MapStr(flatrecord.SF_TYPE)(r) + e.OpFlags = flatrecord.Mapper.MapStr(flatrecord.SF_OPFLAGS)(r) + e.PProcCmd = flatrecord.Mapper.MapStr(flatrecord.SF_PPROC_CMDLINE)(r) + e.PProcPID = flatrecord.Mapper.MapInt(flatrecord.SF_PPROC_PID)(r) + e.ProcCmd = flatrecord.Mapper.MapStr(flatrecord.SF_PROC_CMDLINE)(r) + e.ProcPID = flatrecord.Mapper.MapInt(flatrecord.SF_PROC_PID)(r) + e.Resource = oe.formatResource(r) + e.Tags = strings.Join(tags, listSep) + e.Trace = flatrecord.Mapper.MapStr(flatrecord.SF_TRACENAME)(r) + return e +} + +// formatResource formats a file or network resource. +func (oe *OccurrenceEncoder) formatResource(r *flatrecord.Record) (res string) { + switch r.GetInt(sfgo.SF_REC_TYPE, sfgo.SYSFLOW_SRC) { + case sfgo.FILE_EVT, sfgo.FILE_FLOW: + return flatrecord.Mapper.MapStr(flatrecord.SF_FILE_PATH)(r) + case sfgo.NET_FLOW: + sip := flatrecord.Mapper.MapStr(flatrecord.SF_NET_SIP)(r) + sport := flatrecord.Mapper.MapInt(flatrecord.SF_NET_SPORT)(r) + dip := flatrecord.Mapper.MapStr(flatrecord.SF_NET_DIP)(r) + dport := flatrecord.Mapper.MapInt(flatrecord.SF_NET_DPORT)(r) + return fmt.Sprintf(connStrFmt, sip, sport, dip, dport) + } + return +} + +// semanticHash computes a hash value over record attributes denoting the semantics of the record (used in the bloom filter). +func (oe *OccurrenceEncoder) semanticHash(r *flatrecord.Record) hash.Hash64 { + h := xxhash.New() + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_PROC_CMDLINE)(r))) + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_PROC_UID)(r))) + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_FILE_OID)(r))) + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_OPFLAGS)(r))) + h.Write([]byte(flatrecord.Mapper.MapStr(flatrecord.SF_PROC_TTY)(r))) + return h +} + +// Cleanup cleans up resources. +func (oe *OccurrenceEncoder) Cleanup() { + for _, v := range oe.exportCache.Items() { + ep := v.(*EventPool) + ep.epw.Cleanup() + } +} diff --git a/core/exporter/encoders/occurrence_test.go b/core/exporter/encoders/occurrence_test.go new file mode 100644 index 00000000..022625e4 --- /dev/null +++ b/core/exporter/encoders/occurrence_test.go @@ -0,0 +1,150 @@ +package encoders_test + +import ( + "encoding/json" + "fmt" + "os" + "testing" + "time" + + "github.com/actgardner/gogen-avro/v7/container" + "github.com/linkedin/goavro" + "github.com/stretchr/testify/assert" + "github.com/sysflow-telemetry/sf-processor/core/exporter/encoders" + "github.com/sysflow-telemetry/sf-processor/core/exporter/encoders/avro/occurrence/event" +) + +func TestEventSerialization(t *testing.T) { + path := "/tmp/events.avro" + count := 25 + fw, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + assert.NoError(t, err) + for i := 0; i < count; i++ { + e := &encoders.Event{Event: event.NewEvent()} + e.Ts = time.Now().Unix() + e.Description = fmt.Sprintf("event %d", i) + err := e.Serialize(fw) + assert.NoError(t, err) + } + fw.Close() + fr, err := os.OpenFile(path, os.O_RDONLY, 0644) + assert.NoError(t, err) + var events []*event.Event + for { + if e, err := event.DeserializeEvent(fr); err == nil { + events = append(events, e) + } else { + break + } + } + assert.Equal(t, count, len(events)) + fr.Close() + os.Remove(path) +} + +func TestEventContainerSerialization(t *testing.T) { + path := "/tmp/events_schema.avro" + count := 25 + fw, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + assert.NoError(t, err) + cw, err := event.NewEventWriter(fw, container.Snappy, 512) + assert.NoError(t, err) + for i := 0; i < count; i++ { + e := &encoders.Event{Event: event.NewEvent()} + e.Ts = time.Now().Unix() + e.Description = fmt.Sprintf("event %d", i) + err := cw.WriteRecord(e) + assert.NoError(t, err) + } + cw.Flush() + fw.Close() + fr, err := os.OpenFile(path, os.O_RDONLY, 0644) + assert.NoError(t, err) + cr, err := event.NewEventReader(fr) + assert.NoError(t, err) + var events []*event.Event + for { + if e, err := cr.Read(); err == nil { + events = append(events, e) + } else { + break + } + } + assert.Equal(t, count, len(events)) + fr.Close() + os.Remove(path) +} + +func TestGoavroEventSerialization(t *testing.T) { + path := "/tmp/events_goavro.avro" + count := 25 + + fw, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + assert.NoError(t, err) + + buf, err := os.ReadFile("./avro/occurrence/avsc/Event.avsc") + assert.NoError(t, err) + codec, err := goavro.NewCodec(string(buf)) + assert.NoError(t, err) + + ocfw, err := goavro.NewOCFWriter(goavro.OCFConfig{ + W: fw, + Codec: codec, + CompressionName: "snappy", + }) + assert.NoError(t, err) + + var values []map[string]interface{} + for i := 0; i < count; i++ { + e := &encoders.Event{Event: event.NewEvent()} + e.Ts = time.Now().Unix() + e.Description = fmt.Sprintf("event %d", i) + var m map[string]interface{} + s, _ := json.Marshal(e) + json.Unmarshal(s, &m) + values = append(values, m) + } + err = ocfw.Append(values) + assert.NoError(t, err) + + fr, err := os.OpenFile(path, os.O_RDONLY, 0644) + assert.NoError(t, err) + ocfr, err := goavro.NewOCFReader(fr) + assert.NoError(t, err) + var events []interface{} + for ocfr.Scan() { + d, err := ocfr.Read() + assert.NoError(t, err) + events = append(events, d) + } + assert.Equal(t, count, len(events)) + + values = nil + for i := 0; i < count; i++ { + e := &encoders.Event{Event: event.NewEvent()} + e.Ts = time.Now().Unix() + e.Description = fmt.Sprintf("event (2) %d", i) + var m map[string]interface{} + s, _ := json.Marshal(e) + json.Unmarshal(s, &m) + values = append(values, m) + } + err = ocfw.Append(values) + assert.NoError(t, err) + + fr, err = os.OpenFile(path, os.O_RDONLY, 0644) + assert.NoError(t, err) + ocfr, err = goavro.NewOCFReader(fr) + assert.NoError(t, err) + events = nil + for ocfr.Scan() { + d, err := ocfr.Read() + assert.NoError(t, err) + events = append(events, d) + } + assert.Equal(t, 2*count, len(events)) + + fw.Close() + fr.Close() + os.Remove(path) +} diff --git a/core/exporter/encoders/occurrenceconstants.go b/core/exporter/encoders/occurrenceconstants.go new file mode 100644 index 00000000..4f0a4193 --- /dev/null +++ b/core/exporter/encoders/occurrenceconstants.go @@ -0,0 +1,102 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoders implements codecs for exporting records and events in different data formats. +package encoders + +import "github.com/sysflow-telemetry/sf-processor/core/exporter/utils" + +// Formatted string constants. +const ( + rightArrow = "\u2192" + fwdSlash = "\u2215" + vLine = "\u007c" + + peStrFmt = "Process execution %s " + rightArrow + " %s" + feStrFmt = "Filesystem manipulation %s " + rightArrow + " %s" + ffStrFmt = "File access %s " + rightArrow + " %s" + nfStrFmt = "Network traffic %s " + rightArrow + " %s" + + defaultShortDescr = "telemetry event" + + policiesStrFmt = "Policies
%s" + tagsStrFmt = "Tags
%s" + detailsStrFmt = "%s

%s

%s" + noteIDStrFmt = "%s-%d" + connStrFmt = "%s:%d-%s:%d" + + sqlQueryStrFmt = "SELECT * FROM cos://%s/%s/%s STORED AS AVRO LIMIT 500 INTO cos://%s/%s/sql-query" + + listSep = "," + + hostFileName = "host" + hostType = "host" + + NA = "NA" +) + +// Occurence NoteIDs. +const ( + NOTIFICATION = "notification" + OFFENSE = "actionable-offense" +) + +// NoteIDs returns the set of note IDs. +func NoteIDs() *utils.Set { + s := utils.NewSet() + s.Add(NOTIFICATION) + s.Add(OFFENSE) + return s +} + +// Bloom filter settings. +const ( + maxElements = 100000 + probCollide = 0.0000001 +) + +// Severity type for enumeration. +type Severity int + +// Severity enumeration. +const ( + SeverityLow Severity = iota + SeverityMedium + SeverityHigh +) + +// String returns the string representation of a severity instance. +func (s Severity) String() string { + return [...]string{"LOW", "MEDIUM", "HIGH"}[s] +} + +// Certainty type for enumeration. +type Certainty int + +// Certainty enumeration. +const ( + CertaintyLow Certainty = iota + CertaintyMedium + CertaintyHigh +) + +// String returns the string representation of a severity instance. +func (s Certainty) String() string { + return [...]string{"LOW", "MEDIUM", "HIGH"}[s] +} diff --git a/core/exporter/exporter.go b/core/exporter/exporter.go index 36f4c692..7c755aff 100644 --- a/core/exporter/exporter.go +++ b/core/exporter/exporter.go @@ -16,7 +16,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package exporter implements a module plugin for encoding and exporting telemetry records and events. package exporter import ( @@ -24,31 +25,33 @@ import ( "sync" "time" - syslog "github.com/RackSec/srslog" "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/plugins" - "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/encoders" + "github.com/sysflow-telemetry/sf-processor/core/exporter/transports" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" ) const ( pluginName string = "exporter" ) -// Exporter defines a syslogger plugin. +var codecs = make(map[commons.Format]encoders.EncoderFactory) +var protocols = make(map[commons.Transport]transports.TransportProtocolFactory) + +// Exporter defines a telemetry export plugin. type Exporter struct { - recs []*engine.Record - counter int - sysl *syslog.Writer - config Config - exporter *JSONExporter - exportProto ExportProtocol - exportProtoCache map[string]interface{} + config commons.Config + encoder encoders.Encoder + transport transports.TransportProtocol + recs []*flatrecord.Record + counter int } // NewExporter creates a new plugin instance. func NewExporter() plugins.SFProcessor { - e := &Exporter{exportProtoCache: make(map[string]interface{})} - return e + return &Exporter{} } // GetName returns the plugin name. @@ -61,42 +64,75 @@ func (s *Exporter) Register(pc plugins.SFPluginCache) { pc.AddProcessor(pluginName, NewExporter) } -// AddExportProtocol registers an export protocol object with the Exporter -func (s *Exporter) AddExportProtocol(protoName string, ep interface{}) { - s.exportProtoCache[protoName] = ep +// registerCodecs register encoders for exporting processor data. +func (s *Exporter) registerCodecs() { + (&encoders.JSONEncoder{}).Register(codecs) + (&encoders.ECSEncoder{}).Register(codecs) + (&encoders.OccurrenceEncoder{}).Register(codecs) } -func (s *Exporter) initProtos() { - (&SyslogProto{}).Register(s) - (&TerminalProto{}).Register(s) - (&TextFileProto{}).Register(s) - (&NullProto{}).Register(s) - +// registerExportProtocols register transport protocols for exporting processor data. +func (s *Exporter) registerExportProtocols() { + (&transports.SyslogProto{}).Register(protocols) + (&transports.TerminalProto{}).Register(protocols) + (&transports.TextFileProto{}).Register(protocols) + (&transports.NullProto{}).Register(protocols) + (&transports.FindingsAPIProto{}).Register(protocols) + (&transports.ElasticProto{}).Register(protocols) } // Init initializes the plugin with a configuration map and cache. -func (s *Exporter) Init(conf map[string]string) error { +func (s *Exporter) Init(conf map[string]interface{}) error { var err error - s.config = CreateConfig(conf) - s.initProtos() - if val, ok := s.exportProtoCache[s.config.Export.String()]; ok { - funct := val.(func() ExportProtocol) - s.exportProto = funct() - err = s.exportProto.Init(conf) + + // register encoders + s.registerCodecs() + + // register export protocols + s.registerExportProtocols() + + // create and read config object + s.config, err = commons.CreateConfig(conf) + if err != nil { + return err + } + + // initialize encoder + if createCodec, ok := codecs[s.config.Format]; ok { + s.encoder = createCodec(s.config) + } else { + return errors.New("Unable to find encoder for " + s.config.Format.String()) + } + + // initiliaze transport protocol + if createTransport, ok := protocols[s.config.Transport]; ok { + s.transport = createTransport(s.config) + err = s.transport.Init() if err != nil { return err } } else { - return errors.New("Unable to find export protocol: " + s.config.Export.String()) + return errors.New("Unable to find transport protocol for " + s.config.Transport.String()) } - s.exporter = NewJSONExporter(s.exportProto, s.config) return err } +// Test implements health checks for the plugin. +func (s *Exporter) Test() (bool, error) { + if t, ok := s.transport.(transports.TestableTransportProtocol); ok { + return t.Test() + } + return true, nil +} + // Process implements the main interface of the plugin. -func (s *Exporter) Process(ch interface{}, wg *sync.WaitGroup) { - cha := ch.(*engine.RecordChannel) +func (s *Exporter) Process(ch []interface{}, wg *sync.WaitGroup) { + if len(ch) != 1 { + logger.Error.Println("Exporter only supports a single input channel at this time") + return + } + cha := ch[0].(*plugins.Channel[*flatrecord.Record]) record := cha.In defer wg.Done() @@ -105,7 +141,8 @@ func (s *Exporter) Process(ch interface{}, wg *sync.WaitGroup) { defer ticker.Stop() lastFlush := time.Now() - logger.Trace.Printf("Starting Exporter in mode %s with channel capacity %d", s.config.Export.String(), cap(record)) + logger.Trace.Printf("Starting exporter in mode %s with channel capacity %d", s.config.Transport.String(), cap(record)) + RecLoop: for { select { @@ -113,20 +150,23 @@ RecLoop: if ok { s.counter++ s.recs = append(s.recs, fc) - if s.counter > s.config.EventBuffer { + if s.counter >= s.config.EventBuffer { s.process() s.recs = s.recs[:0] s.counter = 0 lastFlush = time.Now() } } else { - s.process() + ticker.Stop() + if s.counter > 0 { + s.process() + } logger.Trace.Println("Channel closed. Shutting down.") break RecLoop } case <-ticker.C: // force flush records after 1sec idle - if time.Now().Sub(lastFlush) > maxIdle && s.counter > 0 { + if time.Since(lastFlush) > maxIdle && s.counter > 0 { s.process() s.recs = s.recs[:0] s.counter = 0 @@ -136,28 +176,28 @@ RecLoop: } } -func (s *Exporter) process() { - if s.config.ExpType == BatchType { - err := s.exporter.ExportOffenses(s.recs) - if err != nil { - logger.Error.Println("Error exporting events: " + err.Error()) - } - } else { - err := s.exporter.ExportTelemetryRecords(s.recs) +func (s *Exporter) process() error { + data, err := s.encoder.Encode(s.recs) + if err != nil { + logger.Error.Println(err) + return err + } + if len(data) > 0 { + err = s.transport.Export(data) if err != nil { - logger.Error.Println("Error exporting events: " + err.Error()) + logger.Error.Println(err) + return err } } + return nil } // SetOutChan sets the output channel of the plugin. -func (s *Exporter) SetOutChan(ch interface{}) {} +func (s *Exporter) SetOutChan(ch []interface{}) {} // Cleanup tears down plugin resources. func (s *Exporter) Cleanup() { logger.Trace.Println("Exiting ", pluginName) - s.exportProto.Cleanup() + s.encoder.Cleanup() + s.transport.Cleanup() } - -// This function is not run when module is used as a plugin. -func main() {} diff --git a/core/exporter/exportprotocol.go b/core/exporter/exportprotocol.go deleted file mode 100644 index b9995f5b..00000000 --- a/core/exporter/exportprotocol.go +++ /dev/null @@ -1,189 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package exporter - -import ( - "crypto/tls" - "fmt" - syslog "github.com/RackSec/srslog" - "github.com/sysflow-telemetry/sf-apis/go/sfgo" - "os" -) - -const ( - SYSLOG = "syslog" - FILE = "file" - TERM = "terminal" - NULL = "null" -) - -// ExportProtocol is an interface to support a transport protocol -type ExportProtocol interface { - Init(conf map[string]string) error - Export(buf []byte) error - Register(e *Exporter) - Cleanup() -} - -// NullProto implements the ExportProtocol interface with not output -// for performance testing -type NullProto struct { -} - -// NewNullProto creates a new null protocol object -func NewNullProto() ExportProtocol { - return &NullProto{} -} - -// Init intializes a new null protocol object -func (s *NullProto) Init(conf map[string]string) error { - return nil -} - -// Export does nothing -func (s *NullProto) Export(buf []byte) error { - return nil -} - -// Register registers the null protocol object with the exporter -func (s *NullProto) Register(e *Exporter) { - e.AddExportProtocol(NULL, NewNullProto) -} - -//Cleanup cleans up the null protocol object -func (s *NullProto) Cleanup() { -} - -// SyslogProto implements the ExportProtocol interface for syslog -type SyslogProto struct { - sysl *syslog.Writer - config Config -} - -// NewSyslogProto creates a new syslog protocol object -func NewSyslogProto() ExportProtocol { - return &SyslogProto{} -} - -// Init initializes the syslog daemon connection -func (s *SyslogProto) Init(conf map[string]string) error { - s.config = CreateConfig(conf) - raddr := fmt.Sprintf("%s:%d", s.config.Host, s.config.Port) - var err error - if s.config.Proto == TCPTLSProto { - // TODO: verify connection with given trust certifications - nopTLSConfig := &tls.Config{InsecureSkipVerify: true} - s.sysl, err = syslog.DialWithTLSConfig("tcp+tls", raddr, syslog.LOG_ALERT|syslog.LOG_DAEMON, s.config.Tag, nopTLSConfig) - } else { - s.sysl, err = syslog.Dial(s.config.Proto.String(), raddr, syslog.LOG_ALERT|syslog.LOG_DAEMON, s.config.Tag) - } - if err == nil { - s.sysl.SetFormatter(syslog.RFC5424Formatter) - if s.config.LogSource != sfgo.Zeros.String { - s.sysl.SetHostname(s.config.LogSource) - } - } - return err -} - -// Export sends buffer to syslog daemon as an alert. -func (s *SyslogProto) Export(buf []byte) error { - err := s.sysl.Alert(UnsafeBytesToString(buf)) - return err -} - -// Register registers the syslog proto object with the exporter -func (s *SyslogProto) Register(e *Exporter) { - e.AddExportProtocol(SYSLOG, NewSyslogProto) -} - -// Cleanup closes the syslog connection. -func (s *SyslogProto) Cleanup() { - if s.sysl != nil { - s.sysl.Close() - } -} - -// TextFileProto implements the ExportProtocol interface for a text file. -type TextFileProto struct { - config Config - fhandle *os.File -} - -// NewTextFileProto creates a new text file protcol object -func NewTextFileProto() ExportProtocol { - return &TextFileProto{} -} - -// Init initializes the text file. -func (s *TextFileProto) Init(conf map[string]string) error { - s.config = CreateConfig(conf) - os.Remove(s.config.Path) - f, err := os.OpenFile(s.config.Path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - s.fhandle = f - return err -} - -// Export writes the buffer to the open file. -func (s *TextFileProto) Export(buf []byte) error { - _, err := s.fhandle.Write(buf) - s.fhandle.WriteString("\n") - return err -} - -// Register registers the text file proto object with the exporter -func (s *TextFileProto) Register(e *Exporter) { - e.AddExportProtocol(FILE, NewTextFileProto) -} - -// Cleanup closes the text file. -func (s *TextFileProto) Cleanup() { - if s.fhandle != nil { - s.fhandle.Close() - } -} - -// TerminalProto implements the ExportProtocol interface of a terminal output. -type TerminalProto struct { -} - -// NewTerminalProto creates a new terminal protcol object -func NewTerminalProto() ExportProtocol { - return &TerminalProto{} -} - -//Init initializes the terminal output object -func (s *TerminalProto) Init(conf map[string]string) error { - return nil -} - -// Export exports the contets of buffer for the terminal. -func (s *TerminalProto) Export(buf []byte) error { - fmt.Println(UnsafeBytesToString(buf)) - return nil -} - -// Register registers the terminal proto object with the exporter -func (s *TerminalProto) Register(e *Exporter) { - e.AddExportProtocol(TERM, NewTerminalProto) -} - -// Cleanup cleans up the terminal output object. -func (s *TerminalProto) Cleanup() {} diff --git a/core/exporter/jsonexporter.go b/core/exporter/jsonexporter.go deleted file mode 100644 index 899ff4c0..00000000 --- a/core/exporter/jsonexporter.go +++ /dev/null @@ -1,400 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package exporter - -import ( - "fmt" - "reflect" - - "github.com/mailru/easyjson/jwriter" - "github.com/sysflow-telemetry/sf-apis/go/sfgo" - "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" -) - -// SysFlow record components -const ( - PROC = "proc" - PPROC = "pproc" - NET = "net" - FILEF = "file" - FLOW = "flow" - CONTAINER = "container" - NODE = "node" - - BEGIN_STATE = iota - PROC_STATE - PPROC_STATE - NET_STATE - FILE_STATE - FLOW_STATE - CONT_STATE - NODE_STATE - - BUFFER_SIZE = 10240 -) - -// JSONExporter implements a JSON serializer and exporter -type JSONExporter struct { - fieldCache []*engine.FieldValue - proto ExportProtocol - config Config - buf []byte - writer *jwriter.Writer -} - -// NewJSONExporter instantiates a JSON exporter -func NewJSONExporter(p ExportProtocol, config Config) *JSONExporter { - t := &JSONExporter{} - t.fieldCache = engine.FieldValues - t.proto = p - t.config = config - t.writer = &jwriter.Writer{} - t.buf = make([]byte, 0, BUFFER_SIZE) - t.writer.Buffer.Buf = t.buf - return t -} - -const ( - VERSION_STR = "{\"version\":" - GROUP_ID = "{\"groupId\":\"" - FORWARD_SLASH = '/' - QUOTE_COMMA = "\"," - OBSERVATIONS = "\"observations\":[" - COMMA = ',' - END_SQ_SQUIGGLE = "]}" - DOUBLE_QUOTE = '"' - QUOTE_COLON = "\":" - QUOTE_COLON_OSUIG = "\":{" - END_SQUIGGLE_COMMA = "}," - END_SQUIGGLE = '}' - END_SQUARE = ']' - BEGIN_SQUARE = '[' - POLICIES = ",\"policies\":[" - ID_TAG = "{\"id\":" - DESC = ",\"desc\":" - PRIORITY = ",\"priority\":" - TAGS = ",\"tags\":[" - PERIOD = '.' -) - -func (t *JSONExporter) exportOffense(recs []*engine.Record, groupID string, contID string) error { - t.writer.RawString(GROUP_ID) - t.writer.RawString(groupID) - if contID != sfgo.Zeros.String { - t.writer.RawByte(FORWARD_SLASH) - t.writer.RawString(contID) - } - t.writer.RawString(QUOTE_COMMA) - t.writer.RawString(OBSERVATIONS) - numRecs := len(recs) - for idx, rec := range recs { - t.encodeTelemetry(rec) - if idx < (numRecs - 1) { - t.writer.RawByte(COMMA) - } - } - t.writer.RawString(END_SQ_SQUIGGLE) - if t.writer.Size() <= BUFFER_SIZE { - return t.proto.Export(t.writer.Buffer.Buf) - } else { - b, err := t.writer.BuildBytes() - if err != nil { - return err - } - return t.proto.Export(b) - } - -} - -// ExportOffenses exports a set of offesnes as JSON objects -func (t *JSONExporter) ExportOffenses(recs []*engine.Record) error { - if len(recs) == 1 { - groupID := engine.Mapper.MapStr(engine.SF_NODE_ID)(recs[0]) - contID := engine.Mapper.MapStr(engine.SF_CONTAINER_ID)(recs[0]) - t.buf = t.buf[:0] - t.writer.Buffer.Buf = t.buf - err := t.exportOffense(recs, groupID, contID) - return err - } else { - var cobs = make(map[string][]*engine.Record) - for _, rec := range recs { - groupID := engine.Mapper.MapStr(engine.SF_NODE_ID)(rec) - contID := engine.Mapper.MapStr(engine.SF_CONTAINER_ID)(rec) - if contID != sfgo.Zeros.String { - groupID = fmt.Sprintf("%s/%s", groupID, contID) - } - if m, ok := cobs[contID]; ok { - cobs[groupID] = append(m, rec) - } else { - cobs[groupID] = append(make([]*engine.Record, 0), rec) - } - } - for k, v := range cobs { - t.buf = t.buf[:0] - t.writer.Buffer.Buf = t.buf - err := t.exportOffense(v, k, sfgo.Zeros.String) - if err != nil { - return err - } - - } - - } - return nil - -} - -// ExportTelemetryRecords exports a set of telemetry records as JSON objects. -func (t *JSONExporter) ExportTelemetryRecords(recs []*engine.Record) error { - var b []byte - var err error - for _, rec := range recs { - t.buf = t.buf[:0] - t.writer.Buffer.Buf = t.buf - t.encodeTelemetry(rec) - - if t.writer.Size() <= BUFFER_SIZE { - b = t.writer.Buffer.Buf - } else { - b, err = t.writer.BuildBytes() - if err != nil { - return err - } - } - err = t.proto.Export(b) - if err != nil { - return err - } - } - return nil - -} - -func (t *JSONExporter) writeAttribute(fv *engine.FieldValue, fieldId int, rec *engine.Record) { - t.writer.RawByte(DOUBLE_QUOTE) - t.writer.RawString(fv.FieldSects[fieldId]) - t.writer.RawString(QUOTE_COLON) - MapJSON(fv, t.writer, rec) -} - -func (t *JSONExporter) writeSectionBegin(section string) { - t.writer.RawByte(DOUBLE_QUOTE) - t.writer.RawString(section) - t.writer.RawString(QUOTE_COLON_OSUIG) -} - -func (t *JSONExporter) encodeTelemetry(rec *engine.Record) { - t.writer.RawString(VERSION_STR) - t.writer.RawString(t.config.JSONSchemaVersion) - t.writer.RawByte(COMMA) - state := BEGIN_STATE - pprocID := engine.Mapper.MapInt(engine.SF_PPROC_PID)(rec) - sftype := engine.Mapper.MapStr(engine.SF_TYPE)(rec) - pprocExists := !reflect.ValueOf(pprocID).IsZero() - ct := engine.Mapper.MapStr(engine.SF_CONTAINER_ID)(rec) - ctExists := !reflect.ValueOf(ct).IsZero() - existed := true - /* //Need to add flat support - if config.Flat { - r.FlatRecord = new(FlatRecord) - r.FlatRecord.Data = make(map[string]interface{}) - for _, k := range engine.Fields { - r.Data[k] = engine.Mapper.Mappers[k](rec) - }*/ - - for _, fv := range t.fieldCache { - numFields := len(fv.FieldSects) - if numFields == 2 { - t.writeAttribute(fv, 1, rec) - t.writer.RawByte(COMMA) - } else if numFields == 3 { - switch fv.Entry.Section { - case engine.SectProc: - if state != PROC_STATE { - if state != BEGIN_STATE && existed { - t.writer.RawString(END_SQUIGGLE_COMMA) - } - existed = true - t.writeSectionBegin(PROC) - t.writeAttribute(fv, 2, rec) - state = PROC_STATE - } else { - t.writer.RawByte(COMMA) - t.writeAttribute(fv, 2, rec) - } - case engine.SectPProc: - if state != PPROC_STATE { - if state != BEGIN_STATE && existed { - t.writer.RawString(END_SQUIGGLE_COMMA) - } - if pprocExists { - existed = true - t.writeSectionBegin(PPROC) - t.writeAttribute(fv, 2, rec) - } else { - existed = false - } - state = PPROC_STATE - } else if pprocExists { - t.writer.RawByte(COMMA) - t.writeAttribute(fv, 2, rec) - } - case engine.SectNet: - if state != NET_STATE { - if state != BEGIN_STATE && existed { - t.writer.RawString(END_SQUIGGLE_COMMA) - } - if sftype == engine.TyNF { - t.writeSectionBegin(NET) - t.writeAttribute(fv, 2, rec) - existed = true - } else { - existed = false - } - state = NET_STATE - } else if sftype == engine.TyNF { - t.writer.RawByte(COMMA) - t.writeAttribute(fv, 2, rec) - } - case engine.SectFile: - if state != FILE_STATE { - if state != BEGIN_STATE && existed { - t.writer.RawString(END_SQUIGGLE_COMMA) - } - if sftype == engine.TyFF || sftype == engine.TyFE { - t.writeSectionBegin(FILEF) - t.writeAttribute(fv, 2, rec) - existed = true - } else { - existed = false - } - state = FILE_STATE - } else if sftype == engine.TyFF || sftype == engine.TyFE { - t.writer.RawByte(COMMA) - t.writeAttribute(fv, 2, rec) - } - case engine.SectFlow: - if state != FLOW_STATE { - if state != BEGIN_STATE && existed { - t.writer.RawString(END_SQUIGGLE_COMMA) - } - if sftype == engine.TyFF || sftype == engine.TyNF { - t.writeSectionBegin(FLOW) - t.writeAttribute(fv, 2, rec) - existed = true - } else { - existed = false - } - state = FLOW_STATE - } else if sftype == engine.TyFF || sftype == engine.TyNF { - t.writer.RawByte(COMMA) - t.writeAttribute(fv, 2, rec) - } - case engine.SectCont: - if state != CONT_STATE { - if state != BEGIN_STATE && existed { - t.writer.RawString(END_SQUIGGLE_COMMA) - } - if ctExists { - t.writeSectionBegin(CONTAINER) - t.writeAttribute(fv, 2, rec) - existed = true - } else { - existed = false - } - state = CONT_STATE - } else if ctExists { - t.writer.RawByte(COMMA) - t.writeAttribute(fv, 2, rec) - } - case engine.SectNode: - if state != NODE_STATE { - if state != BEGIN_STATE && existed { - t.writer.RawString(END_SQUIGGLE_COMMA) - } - existed = true - t.writeSectionBegin(NODE) - t.writeAttribute(fv, 2, rec) - state = NODE_STATE - } else { - t.writer.RawByte(COMMA) - t.writeAttribute(fv, 2, rec) - } - } - } - - } - t.writer.RawByte(END_SQUIGGLE) - /* // Need to add hash support - hashset := rec.Ctx.GetHashes() - if !reflect.ValueOf(hashset.MD5).IsZero() { - r.Hashes = &hashset - } */ - rules := rec.Ctx.GetRules() - numRules := len(rules) - if numRules > 0 { - t.writer.RawString(POLICIES) - - for id, r := range rules { - t.writer.RawString(ID_TAG) - t.writer.String(r.Name) - t.writer.RawString(DESC) - t.writer.String(r.Desc) - t.writer.RawString(PRIORITY) - t.writer.Int64(int64(r.Priority)) - numTags := len(r.Tags) - currentTag := 0 - if numTags > 0 { - t.writer.RawString(TAGS) - for _, tag := range r.Tags { - switch tag.(type) { - case []string: - tags := tag.([]string) - numTags := numTags + len(tags) - 1 - for _, s := range tags { - t.writer.String(s) - if currentTag < (numTags - 1) { - t.writer.RawByte(COMMA) - } - currentTag += 1 - } - default: - //t.writer.RawByte(DOUBLE_QUOTE) - t.writer.String(tag.(string)) //fmt.Sprintf("%v", tag)) - //t.writer.RawByte(DOUBLE_QUOTE) - if currentTag < (numTags - 1) { - t.writer.RawByte(COMMA) - } - currentTag += 1 - } - } - t.writer.RawByte(END_SQUARE) - } - t.writer.RawByte(END_SQUIGGLE) - if id < (numRules - 1) { - t.writer.RawByte(COMMA) - } - - } - t.writer.RawByte(END_SQUARE) - } - t.writer.RawByte(END_SQUIGGLE) - -} diff --git a/core/exporter/jsonwriter.go b/core/exporter/jsonwriter.go deleted file mode 100644 index 7dbedc16..00000000 --- a/core/exporter/jsonwriter.go +++ /dev/null @@ -1,111 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package exporter - -import ( - "github.com/mailru/easyjson/jwriter" - "unicode/utf8" -) - -const chars = "0123456789abcdef" - -// code taken from github.com/mailru/easyjson/jwriter to support string encoding. -// original version prepends quotes around string this doesn't. -func getTable(falseValues ...int) [128]bool { - table := [128]bool{} - for i := 0; i < 128; i++ { - table[i] = true - } - for _, v := range falseValues { - table[v] = false - } - return table -} - -var ( - htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') - htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') -) - -// StringNoQuotes writes an escaped string with a JSON writer. Adapted from github.com/mailru/easyjson/jwriter -func StringNoQuotes(s string, w *jwriter.Writer) { - p := 0 // last non-escape symbol - - escapeTable := &htmlEscapeTable - if w.NoEscapeHTML { - escapeTable = &htmlNoEscapeTable - } - - for i := 0; i < len(s); { - c := s[i] - - if c < utf8.RuneSelf { - if escapeTable[c] { - // single-width character, no escaping is required - i++ - continue - } - - w.Buffer.AppendString(s[p:i]) - switch c { - case '\t': - w.Buffer.AppendString(`\t`) - case '\r': - w.Buffer.AppendString(`\r`) - case '\n': - w.Buffer.AppendString(`\n`) - case '\\': - w.Buffer.AppendString(`\\`) - case '"': - w.Buffer.AppendString(`\"`) - default: - w.Buffer.AppendString(`\u00`) - w.Buffer.AppendByte(chars[c>>4]) - w.Buffer.AppendByte(chars[c&0xf]) - } - - i++ - p = i - continue - } - - // broken utf - runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) - if runeValue == utf8.RuneError && runeWidth == 1 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\ufffd`) - i++ - p = i - continue - } - - // jsonp stuff - tab separator and line separator - if runeValue == '\u2028' || runeValue == '\u2029' { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u202`) - w.Buffer.AppendByte(chars[runeValue&0xf]) - i += runeWidth - p = i - continue - } - i += runeWidth - } - w.Buffer.AppendString(s[p:]) -} diff --git a/core/exporter/mapperutils.go b/core/exporter/mapperutils.go deleted file mode 100644 index ac0257f5..00000000 --- a/core/exporter/mapperutils.go +++ /dev/null @@ -1,296 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package exporter - -import ( - "path/filepath" - "reflect" - "unsafe" - - "github.com/mailru/easyjson/jwriter" - "github.com/sysflow-telemetry/sf-apis/go/sfgo" - "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" -) - -func mapOpFlags(fv *engine.FieldValue, writer *jwriter.Writer, r *engine.Record) { - opflags := r.GetInt(fv.Entry.ID, fv.Entry.Source) - rtype := engine.GetRecType(r, fv.Entry.Source) - flags := sfgo.GetOpFlags(int32(opflags), rtype) - mapStrArray(writer, flags) -} - -func mapStrArray(writer *jwriter.Writer, ss []string) { - l := len(ss) - writer.RawByte(BEGIN_SQUARE) - for idx, s := range ss { - writer.RawByte(DOUBLE_QUOTE) - writer.RawString(s) - writer.RawByte(DOUBLE_QUOTE) - if idx < (l - 1) { - writer.RawByte(COMMA) - } - } - writer.RawByte(END_SQUARE) - -} - -func mapIPStr(ip int64, w *jwriter.Writer) { - w.Int64(ip >> 0 & 0xFF) - w.RawByte(PERIOD) - w.Int64(ip >> 8 & 0xFF) - w.RawByte(PERIOD) - w.Int64(ip >> 16 & 0xFF) - w.RawByte(PERIOD) - w.Int64(ip >> 24 & 0xFF) -} -func mapIPs(fv *engine.FieldValue, writer *jwriter.Writer, r *engine.Record) { - srcIP := r.GetInt(sfgo.FL_NETW_SIP_INT, fv.Entry.Source) - dstIP := r.GetInt(sfgo.FL_NETW_DIP_INT, fv.Entry.Source) - writer.RawByte(BEGIN_SQUARE) - writer.RawByte(DOUBLE_QUOTE) - mapIPStr(srcIP, writer) - writer.RawByte(DOUBLE_QUOTE) - writer.RawByte(COMMA) - writer.RawByte(DOUBLE_QUOTE) - mapIPStr(dstIP, writer) - writer.RawByte(DOUBLE_QUOTE) - writer.RawByte(END_SQUARE) -} - -func mapOpenFlags(fv *engine.FieldValue, writer *jwriter.Writer, r *engine.Record) { - flags := sfgo.GetOpenFlags(r.GetInt(fv.Entry.ID, fv.Entry.Source)) - mapStrArray(writer, flags) -} - -func mapPorts(fv *engine.FieldValue, writer *jwriter.Writer, r *engine.Record) { - srcPort := r.GetInt(sfgo.FL_NETW_SPORT_INT, fv.Entry.Source) - dstPort := r.GetInt(sfgo.FL_NETW_DPORT_INT, fv.Entry.Source) - writer.RawByte(BEGIN_SQUARE) - writer.Int64(srcPort) - writer.RawByte(COMMA) - writer.Int64(dstPort) - writer.RawByte(END_SQUARE) -} - -// MapJSON writes a SysFlow attribute to a JSON stream. -func MapJSON(fv *engine.FieldValue, writer *jwriter.Writer, r *engine.Record) { - switch fv.Entry.ID { - case engine.A_IDS, engine.PARENT_IDS: - oid := sfgo.OID{CreateTS: r.GetInt(sfgo.PROC_OID_CREATETS_INT, fv.Entry.Source), Hpid: r.GetInt(sfgo.PROC_OID_HPID_INT, fv.Entry.Source)} - SetCachedValueJSON(r, oid, fv.Entry.AuxAttr, writer) - return - } - - switch fv.Entry.Type { - case engine.MapStrVal: - v := r.GetStr(fv.Entry.ID, fv.Entry.Source) - l := len(v) - if l > 0 && (v[0] == '"' || v[0] == '\'') { - boundingQuotes := trimBoundingQuotes(v) - writer.String(boundingQuotes) - } else { - writer.String(v) - } - case engine.MapIntVal: - writer.Int64(r.GetInt(fv.Entry.ID, fv.Entry.Source)) - case engine.MapBoolVal: - writer.Bool(r.GetInt(fv.Entry.ID, fv.Entry.Source) == 1) - case engine.MapSpecialStr: - v := fv.Entry.Map(r).(string) - l := len(v) - if l > 0 && (v[0] == '"' || v[0] == '\'') { - boundingQuotes := trimBoundingQuotes(v) - writer.String(boundingQuotes) - } else { - writer.String(v) - } - case engine.MapSpecialInt: - writer.Int64(fv.Entry.Map(r).(int64)) - case engine.MapSpecialBool: - writer.Bool(fv.Entry.Map(r).(bool)) - case engine.MapArrayStr, engine.MapArrayInt: - if fv.Entry.Source == sfgo.SYSFLOW_SRC { - switch fv.Entry.ID { - case sfgo.EV_PROC_OPFLAGS_INT: - mapOpFlags(fv, writer, r) - return - case sfgo.FL_FILE_OPENFLAGS_INT: - recType := r.GetInt(sfgo.SF_REC_TYPE, fv.Entry.Source) - if recType == sfgo.NET_FLOW { - mapIPs(fv, writer, r) - return - } - mapOpenFlags(fv, writer, r) - return - case sfgo.FL_NETW_SPORT_INT: - mapPorts(fv, writer, r) - return - } - } - - v := fv.Entry.Map(r).(string) - writer.RawByte('[') - writer.String(v) - writer.RawByte(']') - } -} - -func trimBoundingQuotes(s string) string { - if len(s) > 0 && (s[0] == '"' || s[0] == '\'') { - s = s[1:] - } - if len(s) > 0 && (s[len(s)-1] == '"' || s[len(s)-1] == '\'') { - s = s[:len(s)-1] - } - return s -} - -// CheckForQuotes removes unnecessary quotes from a string. -func CheckForQuotes(v string, writer *jwriter.Writer) { - l := len(v) - if l > 0 && (v[0] == '"' || v[0] == '\'') { - boundingQuotes := trimBoundingQuotes(v) - writer.String(boundingQuotes) - } else { - writer.String(v) - } -} - -// UnsafeBytesToString creates a string based on a by array without copying. -func UnsafeBytesToString(b []byte) string { - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - sh := reflect.StringHeader{bh.Data, bh.Len} - return *(*string)(unsafe.Pointer(&sh)) -} - -// SetCachedValueJSON sets the value of attr from cache for process ID to a JSON writer. -func SetCachedValueJSON(r *engine.Record, ID sfgo.OID, attr engine.RecAttribute, writer *jwriter.Writer) { - if ptree := r.MemoizePtree(ID); ptree != nil { - switch attr { - case engine.PProcName: - if len(ptree) > 1 { - CheckForQuotes(filepath.Base(ptree[1].Exe), writer) - } - break - case engine.PProcExe: - if len(ptree) > 1 { - CheckForQuotes(ptree[1].Exe, writer) - } - break - case engine.PProcArgs: - if len(ptree) > 1 { - CheckForQuotes(ptree[1].ExeArgs, writer) - } - break - case engine.PProcUID: - if len(ptree) > 1 { - writer.Int64(int64(ptree[1].Uid)) - } - break - case engine.PProcUser: - if len(ptree) > 1 { - CheckForQuotes(ptree[1].UserName, writer) - } - break - case engine.PProcGID: - if len(ptree) > 1 { - writer.Int64(int64(ptree[1].Gid)) - } - break - case engine.PProcGroup: - if len(ptree) > 1 { - CheckForQuotes(ptree[1].GroupName, writer) - } - break - case engine.PProcTTY: - if len(ptree) > 1 { - writer.Bool(ptree[1].Tty) - } - break - case engine.PProcEntry: - if len(ptree) > 1 { - writer.Bool(ptree[1].Entry) - } - break - case engine.PProcCmdLine: - if len(ptree) > 1 { - exe := trimBoundingQuotes(ptree[1].Exe) - exeArgs := trimBoundingQuotes(ptree[1].ExeArgs) - writer.RawByte('"') - StringNoQuotes(exe, writer) - if len(exeArgs) > 0 { - writer.RawByte(' ') - StringNoQuotes(exeArgs, writer) - } - writer.RawByte('"') - } - break - case engine.ProcAName: - //var s []string - l := len(ptree) - writer.RawByte('[') - for i, p := range ptree { - CheckForQuotes(filepath.Base(p.Exe), writer) - if i < (l - 1) { - writer.RawByte(',') - } - } - writer.RawByte(']') - case engine.ProcAExe: - l := len(ptree) - writer.RawByte('[') - for i, p := range ptree { - CheckForQuotes(p.Exe, writer) - if i < (l - 1) { - writer.RawByte(',') - } - } - writer.RawByte(']') - case engine.ProcACmdLine: - l := len(ptree) - writer.RawByte('[') - for i, p := range ptree { - exe := trimBoundingQuotes(p.Exe) - exeArgs := trimBoundingQuotes(p.ExeArgs) - writer.RawByte('"') - StringNoQuotes(exe, writer) - if len(exeArgs) > 0 { - writer.RawByte(' ') - StringNoQuotes(exeArgs, writer) - } - writer.RawByte('"') - if i < (l - 1) { - writer.RawByte(',') - } - } - writer.RawByte(']') - case engine.ProcAPID: - l := len(ptree) - writer.RawByte('[') - for i, p := range ptree { - writer.Int64(p.Oid.Hpid) - if i < (l - 1) { - writer.RawByte(',') - } - } - writer.RawByte(']') - } - } -} diff --git a/core/exporter/offense.go b/core/exporter/offense.go deleted file mode 100644 index 41127299..00000000 --- a/core/exporter/offense.go +++ /dev/null @@ -1,97 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package exporter - -import ( - "encoding/json" - "fmt" - - "github.com/sysflow-telemetry/sf-apis/go/sfgo" - "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" -) - -// Offense type -type Offense struct { - GroupID string `json:"groupId"` - Observations []TelemetryRecord `json:"observations"` -} - -// Policy type -type Policy struct { - ID string `json:"id"` - Desc string `json:"desc"` - Priority int `json:"priority"` - Tags []string `json:"tags"` -} - -// CreateOffenses creates offense instances based on a list of records -func CreateOffenses(recs []*engine.Record, config Config) []Event { - var offenses = make([]Event, 0) - var cobs = make(map[string][]TelemetryRecord) - for i, o := range extractObservations(recs, config) { - groupID := engine.Mapper.MapStr(engine.SF_NODE_ID)(recs[i]) - contID := engine.Mapper.MapStr(engine.SF_CONTAINER_ID)(recs[i]) - if contID != sfgo.Zeros.String { - groupID = fmt.Sprintf("%s/%s", groupID, contID) - } - if m, ok := cobs[contID]; ok { - cobs[groupID] = append(m, o) - } else { - cobs[groupID] = append(make([]TelemetryRecord, 0), o) - } - } - for k, v := range cobs { - o := Offense{ - GroupID: k, - Observations: v, - } - offenses = append(offenses, o) - } - return offenses -} - -// ToJSONStr returns a JSON string representation of an offense -func (s Offense) ToJSONStr() string { - return string(s.ToJSON()) -} - -// ToJSON returns a JSON bytearray representation of an offense -func (s Offense) ToJSON() []byte { - o, _ := json.Marshal(s) - return o -} - -// CreateObservations creates offense instances based on a list of records -func CreateObservations(recs []*engine.Record, config Config) []Event { - var observations = make([]Event, 0) - for _, o := range extractObservations(recs, config) { - observations = append(observations, o) - } - return observations -} - -func extractObservations(recs []*engine.Record, config Config) []TelemetryRecord { - var observations = make([]TelemetryRecord, 0) - for _, r := range recs { - o := extractTelemetryRecord(r, config) - observations = append(observations, o) - } - return observations -} diff --git a/core/exporter/telemetry.go b/core/exporter/telemetry.go deleted file mode 100644 index 23f4c040..00000000 --- a/core/exporter/telemetry.go +++ /dev/null @@ -1,266 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package exporter - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - - "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" -) - -// SysFlow record components -const ( - proc = "proc" - pproc = "pproc" - net = "net" - file = "file" - flow = "flow" - container = "container" - node = "node" -) - -// TelemetryRecord type -type TelemetryRecord struct { - Version string `json:"version,omitempty"` - *FlatRecord `json:",omitempty"` - *DataRecord `json:",omitempty"` - Hashes *engine.HashSet `json:"hashes,omitempty"` - Policies []Policy `json:"policies,omitempty"` -} - -// FlatRecord type -type FlatRecord struct { - Data map[string]interface{} `json:"record"` -} - -// DataRecord type (warning: make sure field names have only first letter capitalized) -type DataRecord struct { - Type string `json:"type"` - Opflags []string `json:"opflags"` - Ret int64 `json:"ret"` - Ts int64 `json:"ts"` - Endts int64 `json:"endts,omitempty"` - Schema int64 `json:"schema,omitempty"` - *ProcData `json:",omitempty"` - *PprocData `json:",omitempty"` - *NetData `json:",omitempty"` - *FileData `json:",omitempty"` - *FlowData `json:",omitempty"` - *ContData `json:",omitempty"` - *NodeData `json:",omitempty"` -} - -// ProcData type -type ProcData struct { - Proc map[string]interface{} `json:"proc"` -} - -// PprocData type -type PprocData struct { - Pproc map[string]interface{} `json:"pproc"` -} - -// NetData type -type NetData struct { - Net map[string]interface{} `json:"net"` -} - -// FileData type -type FileData struct { - File map[string]interface{} `json:"file"` -} - -// FlowData type -type FlowData struct { - Flow map[string]interface{} `json:"flow"` -} - -// ContData type -type ContData struct { - Container map[string]interface{} `json:"container"` -} - -// NodeData type -type NodeData struct { - Node map[string]interface{} `json:"node"` -} - -// CreateTelemetryRecords creates offense instances based on a list of records -func CreateTelemetryRecords(occs []*engine.Record, config Config) []Event { - var recs = make([]Event, 0) - for _, o := range occs { - recs = append(recs, extractTelemetryRecord(o, config)) - } - return recs -} - -// ToJSONStr returns a JSON string representation of an observation -func (s TelemetryRecord) ToJSONStr() string { - return string(s.ToJSON()) -} - -// ToJSON returns a JSON bytearray representation of an observation -func (s TelemetryRecord) ToJSON() []byte { - o, _ := json.Marshal(s) - return o -} - -func extractTelemetryRecord(rec *engine.Record, config Config) TelemetryRecord { - r := TelemetryRecord{} - r.Version = config.JSONSchemaVersion - if config.Flat { - r.FlatRecord = new(FlatRecord) - r.FlatRecord.Data = make(map[string]interface{}) - for _, k := range engine.Fields { - r.Data[k] = engine.Mapper.Mappers[k].Map(rec) - } - } else { - r.DataRecord = new(DataRecord) - pprocID := engine.Mapper.MapInt(engine.SF_PPROC_PID)(rec) - pprocExists := !reflect.ValueOf(pprocID).IsZero() - ct := engine.Mapper.MapStr(engine.SF_CONTAINER_ID)(rec) - ctExists := !reflect.ValueOf(ct).IsZero() - for _, k := range engine.Fields { - kc := strings.Split(k, ".") - value := extractValue(k, engine.Mapper.Mappers[k].Map(rec)) - if len(kc) == 2 { - switch value.(type) { - case string: - reflect.ValueOf(r.DataRecord).Elem().FieldByName(strings.Title(kc[1])).SetString(value.(string)) - case int64: - reflect.ValueOf(r.DataRecord).Elem().FieldByName(strings.Title(kc[1])).SetInt(value.(int64)) - case []string: - reflect.ValueOf(r.DataRecord).Elem().FieldByName(strings.Title(kc[1])).Set(reflect.ValueOf(value)) - } - } else if len(kc) == 3 { - switch kc[1] { - case proc: - if r.ProcData == nil { - r.ProcData = new(ProcData) - r.ProcData.Proc = make(map[string]interface{}) - } - r.Proc[kc[2]] = value - case pproc: - if pprocExists { - if r.PprocData == nil { - r.PprocData = new(PprocData) - r.PprocData.Pproc = make(map[string]interface{}) - } - r.Pproc[kc[2]] = value - } - case net: - if r.Type == engine.TyNF { - if r.NetData == nil { - r.NetData = new(NetData) - r.NetData.Net = make(map[string]interface{}) - } - r.Net[kc[2]] = value - } - case file: - if r.Type == engine.TyFF || r.Type == engine.TyFE { - if r.FileData == nil { - r.FileData = new(FileData) - r.FileData.File = make(map[string]interface{}) - } - r.File[kc[2]] = value - } - case flow: - if r.Type == engine.TyFF || r.Type == engine.TyNF { - if r.FlowData == nil { - r.FlowData = new(FlowData) - r.FlowData.Flow = make(map[string]interface{}) - } - r.Flow[kc[2]] = value - } - case container: - if ctExists { - if r.ContData == nil { - r.ContData = new(ContData) - r.ContData.Container = make(map[string]interface{}) - } - r.Container[kc[2]] = value - } - case node: - if r.NodeData == nil { - r.NodeData = new(NodeData) - r.NodeData.Node = make(map[string]interface{}) - } - r.Node[kc[2]] = value - } - } - } - } - hashset := rec.Ctx.GetHashes() - if !reflect.ValueOf(hashset.MD5).IsZero() { - r.Hashes = &hashset - } - r.Policies = extractPolicySet(rec.Ctx.GetRules()) - return r -} - -func extractPolicySet(rules []engine.Rule) []Policy { - var pols = make([]Policy, 0) - for _, r := range rules { - p := Policy{ - ID: r.Name, - Desc: r.Desc, - Priority: int(r.Priority), - Tags: extracTags(r.Tags), - } - pols = append(pols, p) - } - return pols -} - -func extracTags(tags []engine.EnrichmentTag) []string { - s := make([]string, 0) - for _, v := range tags { - switch v.(type) { - case []string: - s = append(s, v.([]string)...) - break - default: - s = append(s, string(fmt.Sprintf("%v", v))) - break - } - } - return s -} - -func extractValue(k string, v interface{}) interface{} { - switch v.(type) { - case string: - if array(k) { - return strings.Split(v.(string), engine.LISTSEP) - } - return v - default: - return v - } -} - -func array(k string) bool { - return k == engine.SF_OPFLAGS || k == engine.SF_PROC_APID || k == engine.SF_PROC_ANAME || - k == engine.SF_PROC_AEXE || k == engine.SF_PROC_ACMDLINE || k == engine.SF_FILE_OPENFLAGS || - k == engine.SF_NET_IP || k == engine.SF_NET_PORT -} diff --git a/core/exporter/transports/elastic.go b/core/exporter/transports/elastic.go new file mode 100644 index 00000000..14ac9d32 --- /dev/null +++ b/core/exporter/transports/elastic.go @@ -0,0 +1,144 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Andreas Schade +// Frederico Araujo +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transports implements transports for telemetry data. +package transports + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + netmod "net" + "net/http" + "os" + "time" + + elasticsearch "github.com/elastic/go-elasticsearch/v8" + estransport "github.com/elastic/go-elasticsearch/v8/estransport" + "github.com/elastic/go-elasticsearch/v8/esutil" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/encoders" +) + +// ElasticProto implements the TransportProtocol interface for Elastic. +type ElasticProto struct { + es *elasticsearch.Client + config commons.Config + bi esutil.BulkIndexer + ctx context.Context + start time.Time +} + +// NewElasticProto creates a new Elastic protocol object. +func NewElasticProto(conf commons.Config) TransportProtocol { + return &ElasticProto{config: conf} +} + +// Init initializes the Elastic client. +func (s *ElasticProto) Init() (err error) { + cfg := elasticsearch.Config{ + Addresses: s.config.ESAddresses, + Username: s.config.ESUsername, + Password: s.config.ESPassword, + Transport: &http.Transport{ + DialContext: (&netmod.Dialer{Timeout: time.Second}).DialContext, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + //Certificates: []tls.Certificate{cert}, + //RootCAs: caCertPool, + }, + }, + //CACert: ioutil.ReadFile("path/to/ca.crt"), + Logger: &estransport.JSONLogger{Output: os.Stdout}, + } + s.es, err = elasticsearch.NewClient(cfg) + return err +} + +// Export creates the batch, adds the ecs data and executes it +func (s *ElasticProto) Export(data []commons.EncodedData) (err error) { + s.bi, err = esutil.NewBulkIndexer(esutil.BulkIndexerConfig{ + Index: s.config.ESIndex, + Client: s.es, + NumWorkers: s.config.ESNumWorkers, // default: 0 (= number of CPUs) + FlushBytes: s.config.ESFlushBuffer, // default: 5M + FlushInterval: s.config.ESFlushTimeout, // default: 30s + }) + if err != nil { + logger.Error.Println("Failed to create bulk indexer") + return err + } + + s.ctx = context.Background() + s.start = time.Now().UTC() + + for _, d := range data { + if r, ok := d.(*encoders.ECSRecord); ok { + body, err := json.Marshal(r) + if err != nil { + logger.Error.Println("Failed to create json") + return err + } + + err = s.bi.Add(s.ctx, esutil.BulkIndexerItem{ + Action: "create", + DocumentID: r.ID, + Body: bytes.NewReader(body), + OnFailure: func(ctx context.Context, item esutil.BulkIndexerItem, res esutil.BulkIndexerResponseItem, err error) { + if err != nil { + logger.Error.Print(err) + } else { + logger.Error.Printf("%s: %s", res.Error.Type, res.Error.Reason) + } + }, + }) + if err != nil { + logger.Error.Println("Failed to add document") + return err + } + } else { + return errors.New("expected ECSRecord as exported data") + } + } + + if err = s.bi.Close(s.ctx); err != nil { + logger.Error.Println("Failed to close bulk indexer") + return err + } + + duration := time.Since(s.start) + biStats := s.bi.Stats() + v := 1000.0 * float64(biStats.NumAdded) / float64(duration/time.Millisecond) + logger.Info.Printf("add=%d\tflush=%d\tfail=%d\treqs=%d\tdur=%-6s\t%6d recs/s", + biStats.NumAdded, biStats.NumFlushed, biStats.NumFailed, biStats.NumRequests, + duration.Truncate(time.Millisecond), int64(v)) + + return +} + +// Register registers the Elastic proto object with the exporter. +func (s *ElasticProto) Register(eps map[commons.Transport]TransportProtocolFactory) { + eps[commons.ESTransport] = NewElasticProto +} + +// Cleanup closes the Elastic connection. +func (s *ElasticProto) Cleanup() {} diff --git a/core/exporter/transports/file.go b/core/exporter/transports/file.go new file mode 100644 index 00000000..2318c6bb --- /dev/null +++ b/core/exporter/transports/file.go @@ -0,0 +1,82 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transports implements transports for telemetry data. +package transports + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" +) + +// TextFileProto implements the TransportProtocol interface for a text file. +type TextFileProto struct { + config commons.Config + fhandle *os.File +} + +// NewTextFileProto creates a new text file protcol object. +func NewTextFileProto(conf commons.Config) TransportProtocol { + return &TextFileProto{config: conf} +} + +// Init initializes the text file. +func (s *TextFileProto) Init() error { + os.Remove(s.config.Path) + f, err := os.OpenFile(s.config.Path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + s.fhandle = f + return err +} + +// Export writes the buffer to the open file. +func (s *TextFileProto) Export(data []commons.EncodedData) (err error) { + for _, d := range data { + if buf, ok := d.([]byte); ok { + if _, err = s.fhandle.Write(buf); err != nil { + return err + } + s.fhandle.WriteString("\n") + } else if buf, err := json.Marshal(d); err == nil { + if _, err = s.fhandle.Write(buf); err != nil { + return err + } + s.fhandle.WriteString("\n") + } else { + if _, err = s.fhandle.WriteString(fmt.Sprintf("%v\n", d)); err != nil { + return err + } + } + } + return +} + +// Register registers the text file proto object with the exporter. +func (s *TextFileProto) Register(eps map[commons.Transport]TransportProtocolFactory) { + eps[commons.FileTransport] = NewTextFileProto +} + +// Cleanup closes the text file. +func (s *TextFileProto) Cleanup() { + if s.fhandle != nil { + s.fhandle.Close() + } +} diff --git a/core/exporter/transports/findings.go b/core/exporter/transports/findings.go new file mode 100644 index 00000000..99a48dd5 --- /dev/null +++ b/core/exporter/transports/findings.go @@ -0,0 +1,303 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transports implements transports for telemetry data. +package transports + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/scc-go-sdk/v3/common" + "github.com/IBM/scc-go-sdk/v3/findingsv1" + "github.com/pkg/errors" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/encoders" + "github.com/sysflow-telemetry/sf-processor/core/exporter/utils" +) + +const ( + details = "Finding Context" + queryURLFmt = "%s/?instance_crn=%s&statement=%s" +) + +// FindingsAPIProto implements a custom client for IBM Cloud Security and Compliance Insights. +type FindingsAPIProto struct { + AccountID string + ProviderID string + APIKey string + FindingsURL string + SQLQueryURL string + SQLQueryCrn string + Region string +} + +// NewFindingsAPIProto is a constructor for FindingsAPIProto. +func NewFindingsAPIProto(conf commons.Config) TransportProtocol { + return &FindingsAPIProto{AccountID: conf.FindingsAccountID, + ProviderID: conf.FindingsProviderID, + APIKey: conf.FindingsAPIKey, + FindingsURL: conf.FindingsURL, + SQLQueryURL: conf.FindingsSQLQueryURL, + SQLQueryCrn: conf.FindingsSQLQueryCrn, + Region: conf.FindingsRegion} +} + +// Init intializes a new protocol object. +func (s *FindingsAPIProto) Init() error { + return nil +} + +// Test tests the transport protocol. +func (s *FindingsAPIProto) Test() (bool, error) { + service, err := NewFindingsAPI(s.AccountID, s.APIKey, s.FindingsURL) + if err != nil { + return false, errors.Wrap(err, "failed to instantiate Findings API") + } + return service.CheckAPIConfiguration(s.ProviderID) +} + +// Export does nothing. +func (s *FindingsAPIProto) Export(data []commons.EncodedData) (err error) { + for _, d := range data { + if occ, ok := d.(*encoders.Occurrence); ok { + if err = s.CreateOccurrence(occ); err != nil { + return + } + } else { + return errors.New("Expected Occurrence object as exported data") + } + } + return +} + +// Register registers the protocol object with the exporter. +func (s *FindingsAPIProto) Register(eps map[commons.Transport]TransportProtocolFactory) { + eps[commons.FindingsTransport] = NewFindingsAPIProto +} + +// Cleanup cleans up the protocol object. +func (s *FindingsAPIProto) Cleanup() {} + +// CreateOccurrence creates a new occurrence of type finding. +func (s *FindingsAPIProto) CreateOccurrence(occ *encoders.Occurrence) error { + service, err := NewFindingsAPI(s.AccountID, s.APIKey, s.FindingsURL) + if err != nil { + return err + } + + noteName := fmt.Sprintf("%s/providers/%s/notes/%s", s.AccountID, s.ProviderID, occ.NoteID()) + var nextStep []findingsv1.RemediationStep + if occ.AlertQuery != "" { + nextStep = []findingsv1.RemediationStep{{ + Title: core.StringPtr(details), + URL: core.StringPtr(fmt.Sprintf(queryURLFmt, s.SQLQueryURL, s.SQLQueryCrn, occ.AlertQuery))}, + } + } + finding := findingsv1.Finding{Severity: core.StringPtr(occ.Severity.String()), Certainty: core.StringPtr(occ.Certainty.String()), NextSteps: nextStep} + context := findingsv1.Context{Region: core.StringPtr(s.Region), ResourceType: core.StringPtr(occ.ResType), ResourceName: core.StringPtr(occ.ResName)} + + var options = service.Service.NewCreateOccurrenceOptions(s.ProviderID, noteName, findingsv1.CreateOccurrenceOptionsKindFindingConst, occ.ID) + options.SetFinding(&finding) + options.SetContext(&context) + options.SetLongDescription(occ.LongDescr) + options.SetShortDescription(occ.ShortDescr) + + result, response, err := service.CreateOccurrence(options) + if err != nil { + if response != nil { + logger.Error.Println(response.Result) + } + return errors.Wrap(err, "error while creating occurrence") + } + + logger.Trace.Println(response.StatusCode) + logger.Trace.Println(*result.ID) + + return nil +} + +// FindingsAPI implements an API for IBM Findings. +type FindingsAPI struct { + Service *findingsv1.FindingsV1 +} + +// NewFindingsAPI constructs an instance of FindingsAPI with passed in options. +func NewFindingsAPI(accountID string, apiKey string, url string) (service *FindingsAPI, err error) { + authenticator := &core.IamAuthenticator{ + ApiKey: apiKey, + } + serviceOptions := &findingsv1.FindingsV1Options{ + URL: findingsv1.DefaultServiceURL, + Authenticator: authenticator, + AccountID: core.StringPtr(accountID), + } + var baseService *findingsv1.FindingsV1 + baseService, err = findingsv1.NewFindingsV1(serviceOptions) + if err != nil { + return service, errors.Wrap(err, "couldn't instantiate base service for Findings API") + } + service = &FindingsAPI{ + Service: baseService, + } + return +} + +// CheckAPIConfiguration checks Findings API connectivity and access. +func (s *FindingsAPI) CheckAPIConfiguration(providerID string) (pass bool, err error) { + listNotesOptions := s.Service.NewListNotesOptions(providerID) + listNotesResult, listNotesResponse, err := s.Service.ListNotes(listNotesOptions) + if err != nil { + return false, errors.Wrap(err, "couldn't list notes using Findings API") + } + + if listNotesResponse.StatusCode != 200 { + return false, errors.Wrapf(err, "bad response code while checking Findings API: %d", listNotesResponse.StatusCode) + } + + ids := utils.NewSet() + for _, n := range listNotesResult.Notes { + id, err := json.Marshal(n.ID) + if err != nil { + return false, errors.Wrap(err, "can't decode note ID") + } + ids.Add(string(id[1 : len(id)-1])) + } + req := encoders.NoteIDs() + if !req.IsSubset(ids) { + return false, errors.Errorf("Provider doesn't contain required note IDs: %v", req) + } + return true, nil +} + +// CreateOccurrence : Create an occurrence +// Create an occurrence to denote the existence of a particular type of finding. +// +// An occurrence describes provider-specific details of a note and contains vulnerability details, remediation steps, +// and other general information. +// Extends https://github.com/IBM/scc-go-sdk/blob/41f22b39e9ceea47d5c0c0a5515d9eaf5fee23d0/v3/findingsv1/findings_v1.go#L806 +// to include short and long descriptions. +func (findings *FindingsAPI) CreateOccurrence(createOccurrenceOptions *findingsv1.CreateOccurrenceOptions) (result *findingsv1.APIOccurrence, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createOccurrenceOptions, "createOccurrenceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createOccurrenceOptions, "createOccurrenceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "account_id": *findings.Service.AccountID, + "provider_id": *createOccurrenceOptions.ProviderID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(context.Background()) + builder.EnableGzipCompression = findings.Service.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(findings.Service.Service.Options.URL, `/v1/{account_id}/providers/{provider_id}/occurrences`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createOccurrenceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("findings", "V1", "CreateOccurrence") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createOccurrenceOptions.TransactionID != nil { + builder.AddHeader("Transaction-Id", fmt.Sprint(*createOccurrenceOptions.TransactionID)) + } + if createOccurrenceOptions.ReplaceIfExists != nil { + builder.AddHeader("Replace-If-Exists", fmt.Sprint(*createOccurrenceOptions.ReplaceIfExists)) + } + + body := make(map[string]interface{}) + if createOccurrenceOptions.NoteName != nil { + body["note_name"] = createOccurrenceOptions.NoteName + } + if createOccurrenceOptions.Kind != nil { + body["kind"] = createOccurrenceOptions.Kind + } + if createOccurrenceOptions.ID != nil { + body["id"] = createOccurrenceOptions.ID + } + if createOccurrenceOptions.ResourceURL != nil { + body["resource_url"] = createOccurrenceOptions.ResourceURL + } + if createOccurrenceOptions.Remediation != nil { + body["remediation"] = createOccurrenceOptions.Remediation + } + if createOccurrenceOptions.CreateTime != nil { + body["create_time"] = createOccurrenceOptions.CreateTime + } + if createOccurrenceOptions.UpdateTime != nil { + body["update_time"] = createOccurrenceOptions.UpdateTime + } + if createOccurrenceOptions.Context != nil { + body["context"] = createOccurrenceOptions.Context + } + if createOccurrenceOptions.Finding != nil { + body["finding"] = createOccurrenceOptions.Finding + } + if createOccurrenceOptions.Kpi != nil { + body["kpi"] = createOccurrenceOptions.Kpi + } + if createOccurrenceOptions.ReferenceData != nil { + body["reference_data"] = createOccurrenceOptions.ReferenceData + } + if createOccurrenceOptions.LongDescription != nil { + body["long_description"] = createOccurrenceOptions.LongDescription + } + if createOccurrenceOptions.ShortDescription != nil { + body["short_description"] = createOccurrenceOptions.ShortDescription + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = findings.Service.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, findingsv1.UnmarshalAPIOccurrence) + if err != nil { + return + } + response.Result = result + } + + return +} diff --git a/core/exporter/transports/findings_test.go b/core/exporter/transports/findings_test.go new file mode 100644 index 00000000..f51bd336 --- /dev/null +++ b/core/exporter/transports/findings_test.go @@ -0,0 +1,25 @@ +package transports_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/transports" +) + +func TestFindings(t *testing.T) { + config := commons.Config{ + FindingsConfig: commons.FindingsConfig{ + FindingsAccountID: "", + FindingsAPIKey: "", + FindingsProviderID: "system-analytics-pipeline", + FindingsRegion: "us-south", + }, + } + proto := transports.NewFindingsAPIProto(config) + if p, ok := proto.(transports.TestableTransportProtocol); ok { + _, err := p.Test() + assert.NoError(t, err) + } +} diff --git a/core/exporter/transports/null.go b/core/exporter/transports/null.go new file mode 100644 index 00000000..592e126e --- /dev/null +++ b/core/exporter/transports/null.go @@ -0,0 +1,51 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transports implements transports for telemetry data. +package transports + +import "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + +// NullProto implements the TransportProtocol interface with not output +// for performance testing. +type NullProto struct { +} + +// NewNullProto creates a new null protocol object. +func NewNullProto(conf commons.Config) TransportProtocol { + return &NullProto{} +} + +// Init intializes a new null protocol object. +func (s *NullProto) Init() error { + return nil +} + +// Export does nothing. +func (s *NullProto) Export(data []commons.EncodedData) error { + return nil +} + +// Register registers the null protocol object with the exporter. +func (s *NullProto) Register(eps map[commons.Transport]TransportProtocolFactory) { + eps[commons.NullTransport] = NewNullProto +} + +// Cleanup cleans up the null protocol object. +func (s *NullProto) Cleanup() {} diff --git a/core/exporter/transports/protocol.go b/core/exporter/transports/protocol.go new file mode 100644 index 00000000..d0287f2f --- /dev/null +++ b/core/exporter/transports/protocol.go @@ -0,0 +1,40 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transports implements transports for telemetry data. +package transports + +import "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + +// TransportProtocol is an interface to support a transport protocol. +type TransportProtocol interface { + Register(eps map[commons.Transport]TransportProtocolFactory) + Init() error + Export(data []commons.EncodedData) error + Cleanup() +} + +// TestableTransportProtocol is a testable transport protocol. +type TestableTransportProtocol interface { + TransportProtocol + Test() (bool, error) +} + +// TransportProtocolFactory defines a factory type for transport protocols. +type TransportProtocolFactory func(commons.Config) TransportProtocol diff --git a/core/exporter/transports/syslog.go b/core/exporter/transports/syslog.go new file mode 100644 index 00000000..cc85e7ba --- /dev/null +++ b/core/exporter/transports/syslog.go @@ -0,0 +1,94 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transports implements transports for telemetry data. +package transports + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + + syslog "github.com/RackSec/srslog" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/utils" +) + +// SyslogProto implements the TransportProtocol interface for syslog. +type SyslogProto struct { + sysl *syslog.Writer + config commons.Config +} + +// NewSyslogProto creates a new syslog protocol object. +func NewSyslogProto(conf commons.Config) TransportProtocol { + return &SyslogProto{config: conf} +} + +// Init initializes the syslog daemon connection. +func (s *SyslogProto) Init() error { + var err error + raddr := fmt.Sprintf("%s:%d", s.config.Host, s.config.Port) + if s.config.Proto == commons.TCPTLSProto { + // TODO: verify connection with given trust certifications + nopTLSConfig := &tls.Config{InsecureSkipVerify: true} + s.sysl, err = syslog.DialWithTLSConfig("tcp+tls", raddr, syslog.LOG_ALERT|syslog.LOG_DAEMON, s.config.Tag, nopTLSConfig) + } else { + s.sysl, err = syslog.Dial(s.config.Proto.String(), raddr, syslog.LOG_ALERT|syslog.LOG_DAEMON, s.config.Tag) + } + if err == nil { + s.sysl.SetFormatter(syslog.RFC5424Formatter) + if s.config.LogSource != sfgo.Zeros.String { + s.sysl.SetHostname(s.config.LogSource) + } + } + return err +} + +// Export sends buffer to syslog daemon as an alert. +func (s *SyslogProto) Export(data []commons.EncodedData) (err error) { + for _, d := range data { + if buf, ok := d.([]byte); ok { + if err = s.sysl.Alert(utils.UnsafeBytesToString(buf)); err != nil { + return err + } + } else if buf, err := json.Marshal(d); err == nil { + if err = s.sysl.Alert(utils.UnsafeBytesToString(buf)); err != nil { + return err + } + } else { + return errors.New("expected byte array or serializable object as export data") + } + } + return +} + +// Register registers the syslog proto object with the exporter. +func (s *SyslogProto) Register(eps map[commons.Transport]TransportProtocolFactory) { + eps[commons.SyslogTransport] = NewSyslogProto +} + +// Cleanup closes the syslog connection. +func (s *SyslogProto) Cleanup() { + if s.sysl != nil { + s.sysl.Close() + } +} diff --git a/core/exporter/transports/terminal.go b/core/exporter/transports/terminal.go new file mode 100644 index 00000000..19f066ee --- /dev/null +++ b/core/exporter/transports/terminal.go @@ -0,0 +1,64 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transports implements transports for telemetry data. +package transports + +import ( + "encoding/json" + "fmt" + + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/core/exporter/utils" +) + +// TerminalProto implements the TransportProtocol interface of a terminal output. +type TerminalProto struct{} + +// NewTerminalProto creates a new terminal protcol object. +func NewTerminalProto(conf commons.Config) TransportProtocol { + return &TerminalProto{} +} + +// Init initializes the terminal output object. +func (s *TerminalProto) Init() error { + return nil +} + +// Export exports the contents of buffer for the terminal. +func (s *TerminalProto) Export(data []commons.EncodedData) error { + for _, d := range data { + if buf, ok := d.([]byte); ok { + fmt.Println(utils.UnsafeBytesToString(buf)) + } else if buf, err := json.Marshal(d); err == nil { + fmt.Println(utils.UnsafeBytesToString(buf)) + } else { + fmt.Printf("%v\n", d) + } + } + return nil +} + +// Register registers the terminal proto object with the exporter. +func (s *TerminalProto) Register(eps map[commons.Transport]TransportProtocolFactory) { + eps[commons.StdOutTransport] = NewTerminalProto +} + +// Cleanup cleans up the terminal output object. +func (s *TerminalProto) Cleanup() {} diff --git a/core/exporter/utils/set.go b/core/exporter/utils/set.go new file mode 100644 index 00000000..a71292a5 --- /dev/null +++ b/core/exporter/utils/set.go @@ -0,0 +1,66 @@ +// +// Copyright (C) 2021 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package utils implements common helpers for the exporter. +package utils + +var exists = struct{}{} + +// Set defines a set data structure. +type Set struct { + m map[string]struct{} +} + +// NewSet creates a new set. +func NewSet() *Set { + s := &Set{} + s.m = make(map[string]struct{}) + return s +} + +// Add adds an element to the set. +func (s *Set) Add(value string) { + s.m[value] = exists +} + +// Remove remoces an element from the set. +func (s *Set) Remove(value string) { + delete(s.m, value) +} + +// Contains checks if value is in the set. +func (s *Set) Contains(value string) bool { + _, c := s.m[value] + return c +} + +// Len returns the number of elements in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// IsSubset checks if set s is a subset of l. +func (s *Set) IsSubset(l *Set) bool { + for k := range s.m { + if !l.Contains(k) { + return false + } + } + return true +} diff --git a/core/exporter/utils/utils.go b/core/exporter/utils/utils.go new file mode 100644 index 00000000..c97fe293 --- /dev/null +++ b/core/exporter/utils/utils.go @@ -0,0 +1,80 @@ +// +// Copyright (C) 2021 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package utils implements common helpers for the exporter. +package utils + +import ( + "reflect" + "time" + "unsafe" + + "github.com/sysflow-telemetry/sf-apis/go/sfgo" +) + +// TrimBoundingQuotes removes bounding quotes from string. +func TrimBoundingQuotes(s string) string { + if len(s) > 0 && (s[0] == '"' || s[0] == '\'') { + s = s[1:] + } + if len(s) > 0 && (s[len(s)-1] == '"' || s[len(s)-1] == '\'') { + s = s[:len(s)-1] + } + return s +} + +// Max returns the larger of two integers, x or y. +func Max(x, y int) int { + if x < y { + return y + } + return x +} + +// Max64 returns the larger of two 64-bit integers, x or y. +func Max64(x, y int64) int64 { + if x < y { + return y + } + return x +} + +// ToIsoTimeStr converts a unix time value in ns to UTC time and returns an RFC3399 string +func ToIsoTimeStr(ts int64) string { + tsSec := int64(ts / 1e+9) + tsNs := int64(ts % 1e+9) + t := time.Unix(tsSec, tsNs).In(time.UTC) + return t.Format(time.RFC3339Nano) +} + +// ToIPStrArray converts an array of int ip values to an ip string array +func ToIPStrArray(ips *[]int64) []string { + ipstrs := make([]string, 0) + for _, ip := range *ips { + ipstrs = append(ipstrs, sfgo.GetIPStr(int32(ip))) + } + return ipstrs +} + +// UnsafeBytesToString creates a string based on a bite slice without copying. +func UnsafeBytesToString(b []byte) string { + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := reflect.StringHeader{Data: bh.Data, Len: bh.Len} + return *(*string)(unsafe.Pointer(&sh)) //nolint:govet +} diff --git a/core/flattener/config.go b/core/flattener/config.go new file mode 100644 index 00000000..dc2f6484 --- /dev/null +++ b/core/flattener/config.go @@ -0,0 +1,82 @@ +// +// Copyright (C) 2022 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package flattener flattens input telemetry in a flattened representation. +package flattener + +import ( + "strconv" + "time" +) + +// Configuration keys. +const ( + FilterOnOffKey string = "filter.enabled" + FilterMaxAgeKey string = "filter.maxage" +) + +// Config defines a configuration object for the engine. +type Config struct { + FilterOnOff OnOff + FilterMaxAge time.Duration +} + +// CreateConfig creates a new config object from config dictionary. +func CreateConfig(conf map[string]interface{}) (Config, error) { + var c Config = Config{FilterOnOff: Off, FilterMaxAge: 24 * time.Hour} // default values + var err error + if v, ok := conf[FilterOnOffKey].(string); ok { + c.FilterOnOff = parseOnOffType(v) + } + if v, ok := conf[FilterMaxAgeKey].(string); ok { + var duration int + duration, err = strconv.Atoi(v) + if err == nil { + c.FilterMaxAge = time.Duration(duration) * time.Second + } + } + return c, err +} + +// OnOff defines an On-Off state type. +type OnOff int32 + +// OnOff types. +const ( + Off OnOff = iota + On +) + +func (s OnOff) String() string { + return [...]string{"off", "on"}[s] +} + +func (s OnOff) Enabled() bool { + return s == On +} + +func parseOnOffType(s string) OnOff { + if Off.String() == s { + return Off + } + if On.String() == s { + return On + } + return Off +} diff --git a/core/flattener/filter.go b/core/flattener/filter.go new file mode 100644 index 00000000..674153e3 --- /dev/null +++ b/core/flattener/filter.go @@ -0,0 +1,126 @@ +// +// Copyright (C) 2022 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package flattener flattens input telemetry in a flattened representation. +package flattener + +import ( + "container/list" + "encoding/binary" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" +) + +var byteInt64 []byte = make([]byte, 8) + +// Filter is a time decaying filter with a TTL per entry. +type Filter struct { + m map[uint64]int64 + q *list.List + ttl time.Duration +} + +// Entry encodes a hash value with the time it was first added to the filter. +type Entry struct { + h uint64 + firstSeen time.Time +} + +// NewFilter creates a new time decaying filter that evicts entries that have been seen longer than t duration. +func NewFilter(t time.Duration) *Filter { + return &Filter{m: make(map[uint64]int64), q: list.New(), ttl: t} +} + +// Test tests if hash h has been seen since maximum ttl. +func (f *Filter) Test(h uint64) bool { + f.evictAgedEntries() + _, ok := f.m[h] + return ok +} + +// TestAndAdd tests if hash h has been seen since maximum ttl and adds or increments the element in the filter cache. +func (f *Filter) TestAndAdd(h uint64) bool { + f.evictAgedEntries() + _, ok := f.m[h] + f.Add(h) + return ok +} + +// Contains returns how many times hash h has been seen during its ttl time. +func (f *Filter) Count(h uint64) int64 { + f.evictAgedEntries() + if count, ok := f.m[h]; ok { + return count + } + return 0 +} + +// Add adds hash h to the filter. +func (f *Filter) Add(h uint64) { + if v, ok := f.m[h]; !ok { + f.m[h] = 1 + f.q.PushBack(Entry{h: h, firstSeen: time.Now()}) + } else { + f.m[h] = v + 1 + } +} + +func (f *Filter) evictAgedEntries() { + for f.q.Len() > 0 { + e := f.q.Front() + entry := e.Value.(Entry) + if time.Since(entry.firstSeen) < f.ttl { + break + } + f.q.Remove(e) + delete(f.m, entry.h) + } +} + +// semanticHash computes a hash value over record attributes denoting the semantics of the record (used in the time decay filter). +func semanticHash(fr *sfgo.FlatRecord) uint64 { + h := xxhash.New() + h.Write([]byte(fr.Strs[sfgo.SYSFLOW_SRC][sfgo.PROC_EXE_STR])) + h.Write([]byte(fr.Strs[sfgo.SYSFLOW_SRC][sfgo.PROC_EXEARGS_STR])) + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.PROC_UID_INT])) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.PROC_GID_INT])) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.OPFLAGS_INT])) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.PROC_TTY_INT])) + h.Write(byteInt64) + sfType := fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] + if sfType == sfgo.NET_FLOW { + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.FL_NETW_SIP_INT])) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.FL_NETW_DIP_INT])) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.FL_NETW_DPORT_INT])) + h.Write(byteInt64) + binary.LittleEndian.PutUint64(byteInt64, uint64(fr.Ints[sfgo.SYSFLOW_SRC][sfgo.FL_NETW_PROTO_INT])) + h.Write(byteInt64) + } + if sfType == sfgo.FILE_FLOW || sfType == sfgo.FILE_EVT { + h.Write([]byte(fr.Strs[sfgo.SYSFLOW_SRC][sfgo.FILE_PATH_STR])) + } + return h.Sum64() +} diff --git a/core/flattener/flattener.go b/core/flattener/flattener.go index f4119412..b7bdb83b 100644 --- a/core/flattener/flattener.go +++ b/core/flattener/flattener.go @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +17,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package flattener flattens input telemetry in a flattened representation. package flattener import ( + "encoding/hex" + "encoding/json" "strings" "github.com/sysflow-telemetry/sf-apis/go/logger" @@ -32,19 +36,16 @@ const ( channelName string = "flattenerchan" ) -// FlatChannel defines a multi-source flat channel -type FlatChannel struct { - In chan *sfgo.FlatRecord -} - // NewFlattenerChan creates a new channel with given capacity. func NewFlattenerChan(size int) interface{} { - return &FlatChannel{In: make(chan *sfgo.FlatRecord, size)} + return &plugins.Channel[*sfgo.FlatRecord]{In: make(chan *sfgo.FlatRecord, size)} } // Flattener defines the main class for the flatterner plugin. type Flattener struct { - outCh chan *sfgo.FlatRecord + config Config + filter *Filter + outCh []chan *sfgo.FlatRecord } // NewFlattener creates a new Flattener instance. @@ -52,14 +53,23 @@ func NewFlattener() plugins.SFHandler { return new(Flattener) } -// Register registers plugin to plugin cache. -func (s *Flattener) Register(pc plugins.SFPluginCache) { - pc.AddHandler(handlerName, NewFlattener) +// RegisterChannel registers channels to plugin cache. +func (s *Flattener) RegisterChannel(pc plugins.SFPluginCache) { pc.AddChannel(channelName, NewFlattenerChan) } +// RegisterHandler registers handler to handler cache. +func (s *Flattener) RegisterHandler(hc plugins.SFHandlerCache) { + hc.AddHandler(handlerName, NewFlattener) +} + // Init initializes the handler with a configuration map. -func (s *Flattener) Init(conf map[string]string) error { +func (s *Flattener) Init(conf map[string]interface{}) error { + s.config, _ = CreateConfig(conf) // no err check, assuming defaults + if s.config.FilterOnOff.Enabled() { + s.filter = NewFilter(s.config.FilterMaxAge) + logger.Info.Printf("Initialized rate limiter with %s time decay", s.config.FilterMaxAge) + } return nil } @@ -69,43 +79,77 @@ func (s *Flattener) IsEntityEnabled() bool { } // SetOutChan sets the plugin output channel. -func (s *Flattener) SetOutChan(chObj interface{}) { - s.outCh = chObj.(*FlatChannel).In +func (s *Flattener) SetOutChan(chObj []interface{}) { + for _, ch := range chObj { + s.outCh = append(s.outCh, ch.(*plugins.Channel[*sfgo.FlatRecord]).In) + } +} + +// out sends a record to every output channel in the plugin. +func (s *Flattener) out(fr *sfgo.FlatRecord) { + if s.config.FilterOnOff.Enabled() && s.filter != nil && s.filter.TestAndAdd(semanticHash(fr)) { + return + } + for _, c := range s.outCh { + c <- fr + } } // Cleanup tears down resources. func (s *Flattener) Cleanup() { logger.Trace.Println("Calling Cleanup on Flattener channel") if s.outCh != nil { - close(s.outCh) + for _, ch := range s.outCh { + close(ch) + } } } // HandleHeader processes Header entities. -func (s *Flattener) HandleHeader(hdr *sfgo.SFHeader) error { +func (s *Flattener) HandleHeader(sf *plugins.CtxSysFlow, hdr *sfgo.SFHeader) error { return nil } // HandleContainer processes Container entities. -func (s *Flattener) HandleContainer(hdr *sfgo.SFHeader, cont *sfgo.Container) error { +func (s *Flattener) HandleContainer(sf *plugins.CtxSysFlow, cont *sfgo.Container) error { + return nil +} + +// HandlePod processes Pod entities. +func (s *Flattener) HandlePod(sf *plugins.CtxSysFlow, cont *sfgo.Pod) error { + return nil +} + +// HandleK8sEvt processes K8s Events. +func (s *Flattener) HandleK8sEvt(sf *plugins.CtxSysFlow, ke *sfgo.K8sEvent) error { + fr := newFlatRecord() + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] = sfgo.K8S_EVT + s.fillHeader(sf.Header, fr) + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.TS_INT] = ke.Ts + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.K8SE_MESSAGE_STR] = ke.Message + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.K8SE_KIND_INT] = int64(ke.Kind) + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.K8SE_ACTION_INT] = int64(ke.Action) + for _, ch := range s.outCh { + ch <- fr + } return nil } // HandleProcess processes Process entities. -func (s *Flattener) HandleProcess(hdr *sfgo.SFHeader, cont *sfgo.Container, proc *sfgo.Process) error { +func (s *Flattener) HandleProcess(sf *plugins.CtxSysFlow, proc *sfgo.Process) error { return nil } // HandleFile processes File entities. -func (s *Flattener) HandleFile(hdr *sfgo.SFHeader, cont *sfgo.Container, file *sfgo.File) error { +func (s *Flattener) HandleFile(sf *plugins.CtxSysFlow, file *sfgo.File) error { return nil } // HandleNetFlow processes Network Flows. -func (s *Flattener) HandleNetFlow(hdr *sfgo.SFHeader, cont *sfgo.Container, proc *sfgo.Process, nf *sfgo.NetworkFlow) error { +func (s *Flattener) HandleNetFlow(sf *plugins.CtxSysFlow, nf *sfgo.NetworkFlow) error { fr := newFlatRecord() fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] = sfgo.NET_FLOW - s.fillEntities(hdr, cont, proc, nil, fr) + s.fillEntities(sf.Header, sf.Pod, sf.Container, sf.Process, nil, fr) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_NETW_TS_INT] = nf.Ts fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_NETW_TID_INT] = nf.Tid fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_NETW_OPFLAGS_INT] = int64(nf.OpFlags) @@ -120,15 +164,17 @@ func (s *Flattener) HandleNetFlow(hdr *sfgo.SFHeader, cont *sfgo.Container, proc fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_NETW_NUMWSENDOPS_INT] = nf.NumWSendOps fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_NETW_NUMRRECVBYTES_INT] = nf.NumRRecvBytes fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_NETW_NUMWSENDBYTES_INT] = nf.NumWSendBytes - s.outCh <- fr + fr.Ptree = sf.PTree + fr.GraphletID = sf.GraphletID + s.out(fr) return nil } // HandleFileFlow processes File Flows. -func (s *Flattener) HandleFileFlow(hdr *sfgo.SFHeader, cont *sfgo.Container, proc *sfgo.Process, file *sfgo.File, ff *sfgo.FileFlow) error { +func (s *Flattener) HandleFileFlow(sf *plugins.CtxSysFlow, ff *sfgo.FileFlow) error { fr := newFlatRecord() fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] = sfgo.FILE_FLOW - s.fillEntities(hdr, cont, proc, file, fr) + s.fillEntities(sf.Header, sf.Pod, sf.Container, sf.Process, sf.File, fr) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_FILE_TS_INT] = ff.Ts fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_FILE_TID_INT] = ff.Tid fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_FILE_OPFLAGS_INT] = int64(ff.OpFlags) @@ -139,58 +185,87 @@ func (s *Flattener) HandleFileFlow(hdr *sfgo.SFHeader, cont *sfgo.Container, pro fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_FILE_NUMWSENDOPS_INT] = ff.NumWSendOps fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_FILE_NUMRRECVBYTES_INT] = ff.NumRRecvBytes fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FL_FILE_NUMWSENDBYTES_INT] = ff.NumWSendBytes - s.outCh <- fr + fr.Ptree = sf.PTree + fr.GraphletID = sf.GraphletID + s.out(fr) return nil } // HandleFileEvt processes File Events. -func (s *Flattener) HandleFileEvt(hdr *sfgo.SFHeader, cont *sfgo.Container, proc *sfgo.Process, file1 *sfgo.File, file2 *sfgo.File, fe *sfgo.FileEvent) error { +func (s *Flattener) HandleFileEvt(sf *plugins.CtxSysFlow, fe *sfgo.FileEvent) error { fr := newFlatRecord() - if file2 != nil { - fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_STATE_INT] = int64(file2.State) - fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_TS_INT] = file2.Ts - fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_RESTYPE_INT] = int64(file2.Restype) - fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_PATH_STR] = strings.TrimSpace(file2.Path) - if file2.ContainerId != nil && file2.ContainerId.UnionType == sfgo.UnionNullStringTypeEnumString { - fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_CONTAINERID_STRING_STR] = file2.ContainerId.String + if sf.NewFile != nil { + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_STATE_INT] = int64(sf.NewFile.State) + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_TS_INT] = sf.NewFile.Ts + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_RESTYPE_INT] = int64(sf.NewFile.Restype) + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_OID_STR] = getOIDStr(sf.NewFile.Oid[:]) + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_PATH_STR] = strings.TrimSpace(sf.NewFile.Path) + if sf.NewFile.ContainerId != nil && sf.NewFile.ContainerId.UnionType == sfgo.ContainerIdUnionTypeEnumString { + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_CONTAINERID_STRING_STR] = sf.NewFile.ContainerId.String } else { fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_CONTAINERID_STRING_STR] = sfgo.Zeros.String } + } else { + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_STATE_INT] = sfgo.Zeros.Int64 + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_TS_INT] = sfgo.Zeros.Int64 + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_RESTYPE_INT] = sfgo.Zeros.Int64 + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_PATH_STR] = sfgo.Zeros.String + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_CONTAINERID_STRING_STR] = sfgo.Zeros.String + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SEC_FILE_OID_STR] = sfgo.Zeros.String } fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] = sfgo.FILE_EVT - s.fillEntities(hdr, cont, proc, file1, fr) + s.fillEntities(sf.Header, sf.Pod, sf.Container, sf.Process, sf.File, fr) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_FILE_TS_INT] = fe.Ts fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_FILE_TID_INT] = fe.Tid fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_FILE_OPFLAGS_INT] = int64(fe.OpFlags) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_FILE_RET_INT] = int64(fe.Ret) - s.outCh <- fr + fr.Ptree = sf.PTree + fr.GraphletID = sf.GraphletID + s.out(fr) + return nil +} + +// HandleNetEvt processes Network Events. +func (s *Flattener) HandleNetEvt(sf *plugins.CtxSysFlow, ne *sfgo.NetworkEvent) error { + return nil +} + +// HandleProcFlow processes Process Flows. +func (s *Flattener) HandleProcFlow(sf *plugins.CtxSysFlow, pf *sfgo.ProcessFlow) error { return nil } // HandleProcEvt processes Process Events. -func (s *Flattener) HandleProcEvt(hdr *sfgo.SFHeader, cont *sfgo.Container, proc *sfgo.Process, pe *sfgo.ProcessEvent) error { +func (s *Flattener) HandleProcEvt(sf *plugins.CtxSysFlow, pe *sfgo.ProcessEvent) error { fr := newFlatRecord() fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] = sfgo.PROC_EVT - s.fillEntities(hdr, cont, proc, nil, fr) + s.fillEntities(sf.Header, sf.Pod, sf.Container, sf.Process, nil, fr) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_PROC_TS_INT] = pe.Ts fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_PROC_TID_INT] = pe.Tid fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_PROC_OPFLAGS_INT] = int64(pe.OpFlags) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_PROC_RET_INT] = int64(pe.Ret) - s.outCh <- fr + fr.Ptree = sf.PTree + fr.GraphletID = sf.GraphletID + s.out(fr) return nil } -func (s *Flattener) fillEntities(hdr *sfgo.SFHeader, cont *sfgo.Container, proc *sfgo.Process, file *sfgo.File, fr *sfgo.FlatRecord) { +func (s *Flattener) fillHeader(hdr *sfgo.SFHeader, fr *sfgo.FlatRecord) { if hdr != nil { fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SFHE_VERSION_INT] = hdr.Version fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SFHE_EXPORTER_STR] = hdr.Exporter fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SFHE_IP_STR] = hdr.Ip + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SFHE_FILENAME_STR] = hdr.Filename } else { logger.Warn.Println("Event does not have a related header. This should not happen.") fr.Ints[sfgo.SYSFLOW_IDX][sfgo.SFHE_VERSION_INT] = sfgo.Zeros.Int64 fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SFHE_EXPORTER_STR] = sfgo.Zeros.String fr.Strs[sfgo.SYSFLOW_IDX][sfgo.SFHE_IP_STR] = sfgo.Zeros.String } +} + +func (s *Flattener) fillEntities(hdr *sfgo.SFHeader, pod *sfgo.Pod, cont *sfgo.Container, proc *sfgo.Process, file *sfgo.File, fr *sfgo.FlatRecord) { + s.fillHeader(hdr, fr) if cont != nil { fr.Strs[sfgo.SYSFLOW_IDX][sfgo.CONT_ID_STR] = cont.Id fr.Strs[sfgo.SYSFLOW_IDX][sfgo.CONT_NAME_STR] = strings.TrimSpace(cont.Name) @@ -209,13 +284,39 @@ func (s *Flattener) fillEntities(hdr *sfgo.SFHeader, cont *sfgo.Container, proc fr.Strs[sfgo.SYSFLOW_IDX][sfgo.CONT_IMAGEID_STR] = sfgo.Zeros.String fr.Ints[sfgo.SYSFLOW_IDX][sfgo.CONT_TYPE_INT] = sfgo.Zeros.Int64 fr.Ints[sfgo.SYSFLOW_IDX][sfgo.CONT_PRIVILEGED_INT] = sfgo.Zeros.Int64 - + } + if pod != nil { + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.POD_TS_INT] = pod.Ts + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_ID_STR] = pod.Id + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_NAME_STR] = strings.TrimSpace(pod.Name) + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_NODENAME_STR] = strings.TrimSpace(pod.NodeName) + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_NAMESPACE_STR] = strings.TrimSpace(pod.Namespace) + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.POD_RESTARTCOUNT_INT] = pod.RestartCount + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_HOSTIP_STR] = getIPStr(&pod.HostIP) + fr.Anys[sfgo.SYSFLOW_IDX][sfgo.POD_HOSTIP_ANY] = &pod.HostIP + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_INTERNALIP_STR] = getIPStr(&pod.InternalIP) + fr.Anys[sfgo.SYSFLOW_IDX][sfgo.POD_INTERNALIP_ANY] = &pod.InternalIP + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_SERVICES_STR] = getJSONStr(&pod.Services) + fr.Anys[sfgo.SYSFLOW_IDX][sfgo.POD_SERVICES_ANY] = &pod.Services + } else { + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.POD_TS_INT] = sfgo.Zeros.Int64 + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_ID_STR] = sfgo.Zeros.String + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_NAME_STR] = sfgo.Zeros.String + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_NODENAME_STR] = sfgo.Zeros.String + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_NAMESPACE_STR] = sfgo.Zeros.String + fr.Ints[sfgo.SYSFLOW_IDX][sfgo.POD_RESTARTCOUNT_INT] = sfgo.Zeros.Int64 + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_HOSTIP_STR] = sfgo.Zeros.String + fr.Anys[sfgo.SYSFLOW_IDX][sfgo.POD_HOSTIP_ANY] = sfgo.Zeros.Any + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_INTERNALIP_STR] = sfgo.Zeros.String + fr.Anys[sfgo.SYSFLOW_IDX][sfgo.POD_INTERNALIP_ANY] = sfgo.Zeros.Any + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.POD_SERVICES_STR] = sfgo.Zeros.String + fr.Anys[sfgo.SYSFLOW_IDX][sfgo.POD_SERVICES_ANY] = sfgo.Zeros.Any } if proc != nil { fr.Ints[sfgo.SYSFLOW_IDX][sfgo.PROC_STATE_INT] = int64(proc.State) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.PROC_OID_CREATETS_INT] = int64(proc.Oid.CreateTS) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.PROC_OID_HPID_INT] = int64(proc.Oid.Hpid) - if proc.Poid != nil && proc.Poid.UnionType == sfgo.UnionNullOIDTypeEnumOID { + if proc.Poid != nil && proc.Poid.UnionType == sfgo.PoidUnionTypeEnumOID { fr.Ints[sfgo.SYSFLOW_IDX][sfgo.PROC_POID_CREATETS_INT] = proc.Poid.OID.CreateTS fr.Ints[sfgo.SYSFLOW_IDX][sfgo.PROC_POID_HPID_INT] = proc.Poid.OID.Hpid } else { @@ -239,7 +340,7 @@ func (s *Flattener) fillEntities(hdr *sfgo.SFHeader, cont *sfgo.Container, proc } else { fr.Ints[sfgo.SYSFLOW_IDX][sfgo.PROC_ENTRY_INT] = 0 } - if proc.ContainerId != nil && proc.ContainerId.UnionType == sfgo.UnionNullStringTypeEnumString { + if proc.ContainerId != nil && proc.ContainerId.UnionType == sfgo.ContainerIdUnionTypeEnumString { fr.Strs[sfgo.SYSFLOW_IDX][sfgo.PROC_CONTAINERID_STRING_STR] = proc.ContainerId.String } else { fr.Strs[sfgo.SYSFLOW_IDX][sfgo.PROC_CONTAINERID_STRING_STR] = sfgo.Zeros.String @@ -266,8 +367,9 @@ func (s *Flattener) fillEntities(hdr *sfgo.SFHeader, cont *sfgo.Container, proc fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FILE_STATE_INT] = int64(file.State) fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FILE_TS_INT] = file.Ts fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FILE_RESTYPE_INT] = int64(file.Restype) + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.FILE_OID_STR] = getOIDStr(file.Oid[:]) fr.Strs[sfgo.SYSFLOW_IDX][sfgo.FILE_PATH_STR] = strings.TrimSpace(file.Path) - if file.ContainerId != nil && file.ContainerId.UnionType == sfgo.UnionNullStringTypeEnumString { + if file.ContainerId != nil && file.ContainerId.UnionType == sfgo.ContainerIdUnionTypeEnumString { fr.Strs[sfgo.SYSFLOW_IDX][sfgo.FILE_CONTAINERID_STRING_STR] = file.ContainerId.String } else { fr.Strs[sfgo.SYSFLOW_IDX][sfgo.FILE_CONTAINERID_STRING_STR] = sfgo.Zeros.String @@ -278,7 +380,29 @@ func (s *Flattener) fillEntities(hdr *sfgo.SFHeader, cont *sfgo.Container, proc fr.Ints[sfgo.SYSFLOW_IDX][sfgo.FILE_RESTYPE_INT] = sfgo.Zeros.Int64 fr.Strs[sfgo.SYSFLOW_IDX][sfgo.FILE_PATH_STR] = sfgo.Zeros.String fr.Strs[sfgo.SYSFLOW_IDX][sfgo.FILE_CONTAINERID_STRING_STR] = sfgo.Zeros.String + fr.Strs[sfgo.SYSFLOW_IDX][sfgo.FILE_OID_STR] = sfgo.Zeros.String + } +} + +func getIPStr(ips *[]int64) string { + var sb strings.Builder + sb.WriteByte('[') + for _, ip := range *ips { + sb.WriteByte('"') + sb.WriteString(sfgo.GetIPStr(int32(ip))) + sb.WriteByte('"') } + sb.WriteByte(']') + return sb.String() +} + +func getJSONStr(s *[]*sfgo.Service) string { + b, _ := json.Marshal(s) + return string(b) +} + +func getOIDStr(bs []byte) string { + return hex.EncodeToString(bs) } func newFlatRecord() *sfgo.FlatRecord { @@ -286,9 +410,10 @@ func newFlatRecord() *sfgo.FlatRecord { fr.Sources = make([]sfgo.Source, 1) fr.Ints = make([][]int64, 1) fr.Strs = make([][]string, 1) + fr.Anys = make([][]interface{}, 1) fr.Sources[sfgo.SYSFLOW_IDX] = sfgo.SYSFLOW_SRC - fr.Ints[sfgo.SYSFLOW_IDX] = make([]int64, sfgo.INT_ARRAY_SIZE) fr.Strs[sfgo.SYSFLOW_IDX] = make([]string, sfgo.STR_ARRAY_SIZE) + fr.Anys[sfgo.SYSFLOW_IDX] = make([]interface{}, sfgo.ANY_ARRAY_SIZE) return fr } diff --git a/core/go.mod b/core/go.mod index abaae614..415a6306 100644 --- a/core/go.mod +++ b/core/go.mod @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,18 +18,57 @@ // See the License for the specific language governing permissions and // limitations under the License. // -module github.ibm.com/sysflow/sf-processor/core +module github.com/sysflow-telemetry/sf-processor/core -go 1.14 +go 1.19 require ( + github.com/IBM/go-sdk-core/v5 v5.9.2 + github.com/IBM/scc-go-sdk/v3 v3.1.5 github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 + github.com/actgardner/gogen-avro/v7 v7.3.1 github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0 - github.com/cespare/xxhash v1.1.0 - github.com/cespare/xxhash/v2 v2.1.1 - github.com/enriquebris/goconcurrentqueue v0.6.0 + github.com/bradleyjkemp/sigma-go v0.5.1 + github.com/cespare/xxhash/v2 v2.1.2 + github.com/elastic/go-elasticsearch/v8 v8.0.0-20210427093042-01613f93a7ae + github.com/fsnotify/fsnotify v1.5.1 + github.com/linkedin/goavro v2.1.0+incompatible github.com/mailru/easyjson v0.7.6 github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 - github.com/stretchr/testify v1.6.1 - github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201029230226-b00c1f55c6b0 + github.com/paulbellamy/ratecounter v0.2.0 + github.com/pkg/errors v0.9.1 + github.com/satta/gommunityid v0.0.0-20210315182841-1cdcb73ce408 + github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 + github.com/stretchr/testify v1.7.0 + github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 + github.com/tidwall/gjson v1.14.1 + golang.org/x/exp v0.0.0-20230206171751-46f607a40771 +) + +require ( + github.com/alecthomas/participle v0.7.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/errors v0.19.8 // indirect + github.com/go-openapi/strfmt v0.21.1 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-stack/stack v1.8.0 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.0 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + go.mongodb.org/mongo-driver v1.7.5 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + gopkg.in/go-playground/validator.v9 v9.31.0 // indirect + gopkg.in/linkedin/goavro.v1 v1.0.5 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/core/go.sum b/core/go.sum index 73b7fa58..94979bd7 100644 --- a/core/go.sum +++ b/core/go.sum @@ -1,241 +1,328 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/IBM/go-sdk-core/v5 v5.7.0/go.mod h1:+YbdhrjCHC84ls4MeBp+Hj4NZCni+tDAc0XQUqRO9Jc= +github.com/IBM/go-sdk-core/v5 v5.9.2 h1:QKB5JwhlZfRvFHqcOwMeu/Dis/Q7qCBxrQLhx04onMc= +github.com/IBM/go-sdk-core/v5 v5.9.2/go.mod h1:YlOwV9LeuclmT/qi/LAK2AsobbAP42veV0j68/rlZsE= +github.com/IBM/scc-go-sdk/v3 v3.1.5 h1:6JUivsaIb32NorA0+Fb/KsI6MSZaoDS8BFgtl/wyjh4= +github.com/IBM/scc-go-sdk/v3 v3.1.5/go.mod h1:cBxkth9AIOcKQx4Gy9bWgyGYa7vYwHAalUBvY+O8xAE= +github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= +github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= +github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 h1:vX+gnvBc56EbWYrmlhYbFYRaeikAke1GL84N4BEYOFE= github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ= -github.com/actgardner/gogen-avro v6.5.0+incompatible h1:P73NiZR/S0lBWQDkK6mbvdgBXRc6e0/AaaSTqu/AvLI= -github.com/actgardner/gogen-avro v6.5.0+incompatible/go.mod h1:N2PzqZtS+5w9xxGp2daeykhWdTL0lBiRhbbvkVj4Yd8= -github.com/actgardner/gogen-avro/v7 v7.1.1 h1:fAKfqQNIDIXq4Pwop3Fqu+0Tym5PuAX/cMVbdEIuVdM= -github.com/actgardner/gogen-avro/v7 v7.1.1/go.mod h1:DALbHv5zAeoz7KJ/fPAvl+d8Ixcy6x8Fjo+PO0YM8mU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/actgardner/gogen-avro/v7 v7.3.1 h1:6JJU3o7168lcyIB6uXYyYdflCsJT3aMFKZPSpSc4toI= +github.com/actgardner/gogen-avro/v7 v7.3.1/go.mod h1:1d45RpDvI29sU7l9wUxlRTEglZSdQSbd6bDbWJaEMgo= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/participle v0.7.1 h1:2bN7reTw//5f0cugJcTOnY/NYZcWQOaajW+BwZB5xWs= +github.com/alecthomas/participle v0.7.1/go.mod h1:HfdmEuwvr12HXQN44HPWXR0lHmVolVYe4dyL6lQ3duY= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0 h1:j7MyDjg6pb7A2ziow17FDZ2Oj5vGnJsLyDmjpN4Jkcg= github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/bradleyjkemp/cupaloy/v2 v2.6.0 h1:knToPYa2xtfg42U3I6punFEjaGFKWQRXJwj0JTv4mTs= +github.com/bradleyjkemp/cupaloy/v2 v2.6.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= +github.com/bradleyjkemp/sigma-go v0.5.1 h1:2a747+swYse4KfIvLRCg49q118MSONk5+W/JeGM40cc= +github.com/bradleyjkemp/sigma-go v0.5.1/go.mod h1:ZiTmCLylS8LOQPm1/2FuNDlSteiWwuHWScE69vOhh8c= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/enriquebris/goconcurrentqueue v0.6.0 h1:DJ97cgoPVoqlC4tTGBokn/omaB3o16yIs5QdAm6YEjc= -github.com/enriquebris/goconcurrentqueue v0.6.0/go.mod h1:wGJhQNFI4wLNHleZLo5ehk1puj8M6OIl0tOjs3kwJus= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210427093042-01613f93a7ae h1:sZOzFMm2XxvAO0hwo0k1XUyKusaUedme7rnUMXF96zs= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210427093042-01613f93a7ae/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= +github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.21.1 h1:G6s2t5V5kGCHLVbSdZ/6lI8Wm4OzoPFkc3/cjAsKQrM= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= +github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/johnstarich/go/gopages v0.1.8/go.mod h1:OaSRjfHdFfN+LS7u6xqgNO7C2Uxjlvpm17DcKcvLBhY= +github.com/johnstarich/go/pipe v0.2.0/go.mod h1:3X9IdVJJnI7pkpzEH6np98wqHl55zFmbilKG+9+koMo= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/linkedin/goavro v2.1.0+incompatible h1:DV2aUlj2xZiuxQyvag8Dy7zjY69ENjS66bWkSfdpddY= github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= -github.com/linkedin/goavro/v2 v2.9.7/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw= github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/satta/gommunityid v0.0.0-20210315182841-1cdcb73ce408 h1:l1nqzjjPpj99dxtQizYjbzvIf2RBHneeuOoka3G7Lu4= +github.com/satta/gommunityid v0.0.0-20210315182841-1cdcb73ce408/go.mod h1:dz6UCF9ERHtGjdv5LwOTgZxng/7IZm2spR/mXtTpLjc= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201026195524-bd9cb63ccc96 h1:XbL0XPH5OZMVH5d0RhM0g9VXOKclsy9hVUh6+cem73c= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201026195524-bd9cb63ccc96/go.mod h1:rpTKky267xtopNUCoInTEZiaFkOrawhzE0HaZMEvIAI= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201029230226-b00c1f55c6b0 h1:ezn2cJhqCZTilHOX8S5botGSLf54V+PboDFsgXNr32M= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201029230226-b00c1f55c6b0/go.mod h1:rpTKky267xtopNUCoInTEZiaFkOrawhzE0HaZMEvIAI= -github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230905191833-17e4c751b04c h1:5BCJMIuiysHlYJe+nr/203cqIS6cpTIssbUD8v88VOU= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230905191833-17e4c751b04c/go.mod h1:eo1ATE056Rqb9LhE4LA/0Y2AHfV//1zdCw0py4/S5HM= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 h1:ZxzwimQe2R4kYorqS33/l+m/+SXWMzPn1cLtpA1ExA0= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300/go.mod h1:rvE0WXuIQmACykrVpAKxP5Crf/7KvZplUTULATmAuf4= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.7.5 h1:ny3p0reEpgsR2cfA5cjgwFZg3Cv/ofFh/8jbhGtz9VI= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210315170653-34ac3e1c2000/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20201021000207-d49c4edd7d96/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= +gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/linkedin/goavro.v1 v1.0.5 h1:BJa69CDh0awSsLUmZ9+BowBdokpduDZSM9Zk8oKHfN4= +gopkg.in/linkedin/goavro.v1 v1.0.5/go.mod h1:Aw5GdAbizjOEl0kAMHV9iHmA8reZzW/OKuJAl4Hb9F0= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/core/policyengine/common/constants.go b/core/policyengine/common/constants.go new file mode 100644 index 00000000..8d13e9c2 --- /dev/null +++ b/core/policyengine/common/constants.go @@ -0,0 +1,9 @@ +package common + +// Parsing constants. +const ( + LISTSEP string = "," + EMPTY string = "" + QUOTE string = "\"" + SPACE string = " " +) diff --git a/core/policyengine/common/utils.go b/core/policyengine/common/utils.go new file mode 100644 index 00000000..21c3b5a8 --- /dev/null +++ b/core/policyengine/common/utils.go @@ -0,0 +1,11 @@ +package common + +func TrimBoundingQuotes(s string) string { + if len(s) > 0 && (s[0] == '"' || s[0] == '\'') { + s = s[1:] + } + if len(s) > 0 && (s[len(s)-1] == '"' || s[len(s)-1] == '\'') { + s = s[:len(s)-1] + } + return s +} diff --git a/core/policyengine/engine/actionhandler.go b/core/policyengine/engine/actionhandler.go index 7223b7f2..058d68fc 100644 --- a/core/policyengine/engine/actionhandler.go +++ b/core/policyengine/engine/actionhandler.go @@ -1,9 +1,8 @@ // -// Copyright (C) 2020 IBM Corporation. +// Copyright (C) 2021 IBM Corporation. // // Authors: -// Frederico Araujo -// Teryl Taylor +// Andreas Schade // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,50 +17,98 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package engine implements a rules engine for telemetry records. package engine -import "errors" +import ( + "strconv" + "time" +) // Configuration keys. const ( PoliciesConfigKey string = "policies" + ConfigKey string = "config" + LanguageKey string = "language" ModeConfigKey string = "mode" VersionKey string = "version" JSONSchemaVersionKey string = "jsonschemaversion" BuildNumberKey string = "buildnumber" + MonitorKey string = "monitor" + MonitorIntervalKey string = "monitor.interval" + ConcurrencyKey string = "concurrency" + ActionDirKey string = "actiondir" + BenchRulesetSizeKey string = "bench.rulesetsize" + BenchRuleIndexKey string = "bench.ruleindex" ) // Config defines a configuration object for the engine. type Config struct { PoliciesPath string + ConfigPath string + Language Language Mode Mode Version string JSONSchemaVersion string BuildNumber string + Monitor MonitorType + MonitorInterval time.Duration + Concurrency int + ActionDir string + BenchRulesetSize int + BenchRuleIndex int } // CreateConfig creates a new config object from config dictionary. -func CreateConfig(conf map[string]string) (Config, error) { - var c Config = Config{Mode: AlertMode} // default values - if v, ok := conf[PoliciesConfigKey]; ok { +func CreateConfig(conf map[string]interface{}) (Config, error) { + var c Config = Config{Mode: AlertMode, Concurrency: 5, Monitor: NoneType, MonitorInterval: 30 * time.Second, ActionDir: "../resources/actions", Language: Falco, BenchRulesetSize: -1, BenchRuleIndex: -1} // default values + var err error + + if v, ok := conf[PoliciesConfigKey].(string); ok { c.PoliciesPath = v - } else { - return c, errors.New("Configuration tag 'policies' missing from policy engine plugin settings") } - if v, ok := conf[ModeConfigKey]; ok { + if v, ok := conf[ConfigKey].(string); ok { + c.ConfigPath = v + } + if v, ok := conf[LanguageKey].(string); ok { + c.Language = parseLanguage(v) + } + if v, ok := conf[ModeConfigKey].(string); ok { c.Mode = parseModeConfig(v) } - if v, ok := conf[VersionKey]; ok { + if v, ok := conf[VersionKey].(string); ok { c.Version = v } - if v, ok := conf[JSONSchemaVersionKey]; ok { + if v, ok := conf[JSONSchemaVersionKey].(string); ok { c.JSONSchemaVersion = v } - if v, ok := conf[BuildNumberKey]; ok { + if v, ok := conf[BuildNumberKey].(string); ok { c.BuildNumber = v } - return c, nil + if v, ok := conf[MonitorKey].(string); ok { + c.Monitor = parseMonitorType(v) + } + if v, ok := conf[MonitorIntervalKey].(string); ok { + var duration int + duration, err = strconv.Atoi(v) + if err == nil { + c.MonitorInterval = time.Duration(duration) * time.Second + } + } + if v, ok := conf[ConcurrencyKey].(string); ok { + c.Concurrency, err = strconv.Atoi(v) + } + if v, ok := conf[ActionDirKey].(string); ok { + c.ActionDir = v + } + if v, ok := conf[BenchRulesetSizeKey].(string); ok { + c.BenchRulesetSize, err = strconv.Atoi(v) + } + if v, ok := conf[BenchRuleIndexKey].(string); ok { + c.BenchRuleIndex, err = strconv.Atoi(v) + } + return c, err } // Mode type. @@ -67,24 +116,66 @@ type Mode int // Mode config options. const ( - AlertMode Mode = iota - FilterMode - BypassMode + EnrichMode Mode = iota + AlertMode ) func (s Mode) String() string { - return [...]string{"alert", "filter", "bypass"}[s] + return [...]string{"enrich", "alert"}[s] } func parseModeConfig(s string) Mode { + if EnrichMode.String() == s { + return EnrichMode + } if AlertMode.String() == s { return AlertMode } - if FilterMode.String() == s { - return FilterMode + return EnrichMode +} + +// MonitorType defines a policy monitor type. +type MonitorType uint32 + +// Monitor types. +const ( + NoneType MonitorType = iota + LocalType +) + +func (s MonitorType) String() string { + return [...]string{"none", "local"}[s] +} + +func parseMonitorType(s string) MonitorType { + if NoneType.String() == s { + return NoneType + } + if LocalType.String() == s { + return LocalType + } + return NoneType +} + +// Language defines a policy language. +type Language uint32 + +// Language types. +const ( + Falco Language = iota + Sigma +) + +func (s Language) String() string { + return [...]string{"falco", "sigma"}[s] +} + +func parseLanguage(s string) Language { + if Falco.String() == s { + return Falco } - if BypassMode.String() == s { - return BypassMode + if Sigma.String() == s { + return Sigma } - return AlertMode + return Falco } diff --git a/core/policyengine/engine/interpreter.go b/core/policyengine/engine/interpreter.go index 592c3351..cc1a8d64 100644 --- a/core/policyengine/engine/interpreter.go +++ b/core/policyengine/engine/interpreter.go @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,359 +17,189 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package engine implements a rules engine for telemetry records. package engine import ( - "regexp" - "strconv" - "strings" + "sync" + "time" - "github.com/antlr/antlr4/runtime/Go/antlr" + "github.com/paulbellamy/ratecounter" "github.com/sysflow-telemetry/sf-apis/go/logger" - "github.ibm.com/sysflow/sf-processor/core/policyengine/lang/parser" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source" ) -// Parsed rule and filter object maps. -var rules = make([]Rule, 0) -var filters = make([]Filter, 0) - -// Accessory parsing maps. -var lists = make(map[string][]string) -var macroCtxs = make(map[string]parser.IExpressionContext) - -// Regular expression for pasting lists. -var itemsre = regexp.MustCompile(`(^\[)(.*)(\]$?)`) - // PolicyInterpreter defines a rules engine for SysFlow data streams. -type PolicyInterpreter struct { - ahdl ActionHandler -} - -// NewPolicyInterpreter constructs a new interpreter instance. -func NewPolicyInterpreter(conf Config) PolicyInterpreter { - ah := NewActionHandler(conf) - return PolicyInterpreter{ah} -} +type PolicyInterpreter[R any] struct { -// Compile parses and interprets an input policy defined in path. -func (pi PolicyInterpreter) compile(path string) error { - // Setup the input - is, err := antlr.NewFileStream(path) - if err != nil { - logger.Error.Println("Error reading policy from path", path) - return err - } + // Input policy language compiler + pc policy.PolicyCompiler[R] - // Create the Lexer - lexer := parser.NewSfplLexer(is) - stream := antlr.NewCommonTokenStream(lexer, antlr.TokenDefaultChannel) + // Configuration + config Config - // Create the Parser - p := parser.NewSfplParser(stream) + // Prefilter + prefilter source.Prefilter[R] - // Parse the policy - antlr.ParseTreeWalkerDefault.Walk(&sfplListener{}, p.Policy()) - return nil -} + // Record contextualizer + ctx source.Contextualizer[R] -// Compile parses and interprets a set of input policies defined in paths. -func (pi PolicyInterpreter) Compile(paths ...string) error { - for _, path := range paths { - logger.Trace.Println("Parsing policy file ", path) - if err := pi.compile(path); err != nil { - return err - } - } - return nil -} + // Parsed rule and filter object maps + rules []policy.Rule[R] + filters []policy.Filter[R] -// ProcessAsync executes all compiled policies against record r. -func (pi PolicyInterpreter) ProcessAsync(applyFilters bool, filterOnly bool, r *Record, out func(r *Record)) { - if applyFilters && pi.EvalFilters(r) { - return - } - if filterOnly { - out(r) - } - for _, rule := range rules { - //fmt.Println("Rule: ", rule.Name, rule.Enabled, rule.isApplicable(r)) - if rule.Enabled && rule.isApplicable(r) && rule.condition.Eval(r) { - pi.ahdl.HandleActionAsync(rule, r, out) - } - } -} + // Worker channel and waitgroup + workerCh chan R + wg *sync.WaitGroup -// Process executes all compiled policies against record r. -func (pi PolicyInterpreter) Process(applyFilters bool, filterOnly bool, r *Record) (bool, *Record) { - match := false - if applyFilters && pi.EvalFilters(r) { - return match, nil - } - if filterOnly { - return true, r - } - for _, rule := range rules { - if rule.Enabled && rule.isApplicable(r) && rule.condition.Eval(r) { - pi.ahdl.HandleAction(rule, r) - match = true - } - } - return match, r -} + // Callback for sending records downstream + out func(R) -// EvalFilters executes compiled policy filters against record r. -func (pi PolicyInterpreter) EvalFilters(r *Record) bool { - for _, f := range filters { - if f.Enabled && f.condition.Eval(r) { - return true - } - } - return false -} + // Worker pool size + concurrency int -type sfplListener struct { - *parser.BaseSfplListener -} + // Action Handler + ah *ActionHandler[R] -// ExitList is called when production list is exited. -func (listener *sfplListener) ExitPlist(ctx *parser.PlistContext) { - logger.Trace.Println("Parsing list ", ctx.GetText()) - lists[ctx.ID().GetText()] = listener.extractListFromItems(ctx.Items()) + // Rate counter + rc *ratecounter.RateCounter + lastRcTs time.Time } -// ExitMacro is called when production macro is exited. -func (listener *sfplListener) ExitPmacro(ctx *parser.PmacroContext) { - logger.Trace.Println("Parsing macro ", ctx.GetText()) - macroCtxs[ctx.ID().GetText()] = ctx.Expression() -} - -// ExitFilter is called when production filter is exited. -func (listener *sfplListener) ExitPfilter(ctx *parser.PfilterContext) { - logger.Trace.Println("Parsing filter ", ctx.GetText()) - f := Filter{ - Name: ctx.ID().GetText(), - condition: listener.visitExpression(ctx.Expression()), - Enabled: ctx.ENABLED() == nil || listener.getEnabledFlag(ctx.Enabled()), +// NewPolicyInterpreter constructs a new interpreter instance. +func NewPolicyInterpreter[R any](conf Config, pc policy.PolicyCompiler[R], pf source.Prefilter[R], ctx source.Contextualizer[R], out func(R)) *PolicyInterpreter[R] { + pi := new(PolicyInterpreter[R]) + pi.pc = pc + if pi.prefilter = pf; pf == nil { + pi.prefilter = source.NewDefaultPrefilter[R]() } - filters = append(filters, f) -} - -// ExitFilter is called when production filter is exited. -func (listener *sfplListener) ExitPrule(ctx *parser.PruleContext) { - logger.Trace.Println("Parsing rule ", ctx.GetText()) - r := Rule{ - Name: listener.getOffChannelText(ctx.Text(0)), - Desc: listener.getOffChannelText(ctx.Text(1)), - condition: listener.visitExpression(ctx.Expression()), - Actions: listener.getActions(ctx), - Tags: listener.getTags(ctx), - Priority: listener.getPriority(ctx.Severity().GetText()), - Prefilter: listener.getPrefilter(ctx), - Enabled: ctx.ENABLED(0) == nil || listener.getEnabledFlag(ctx.Enabled(0)), + if pi.ctx = ctx; ctx == nil { + pi.ctx = source.NewDefaultContextualizer[R]() } - rules = append(rules, r) -} + pi.config = conf + pi.concurrency = conf.Concurrency + pi.rules = make([]policy.Rule[R], 0) + pi.filters = make([]policy.Filter[R], 0) + pi.out = out + pi.ah = NewActionHandler[R](conf) -func (listener *sfplListener) getEnabledFlag(ctx parser.IEnabledContext) bool { - flag := trimBoundingQuotes(ctx.GetText()) - if b, err := strconv.ParseBool(flag); err == nil { - return b + // This should only be used for benchmarking the engine + if logger.IsEnabled(logger.Perf) { + pi.rc = ratecounter.NewRateCounter(1 * time.Second) + pi.lastRcTs = time.Now() } - logger.Warn.Println("Unrecognized enabled flag: ", flag) - return true + return pi } -func (listener *sfplListener) getOffChannelText(ctx parser.ITextContext) string { - a := ctx.GetStart().GetStart() - b := ctx.GetStop().GetStop() - interval := antlr.Interval{Start: a, Stop: b} - return ctx.GetStart().GetInputStream().GetTextFromInterval(&interval) -} - -func (listener *sfplListener) getTags(ctx *parser.PruleContext) []EnrichmentTag { - var tags = make([]EnrichmentTag, 0) - ictx := ctx.Tags(0) - if ictx != nil { - return append(tags, listener.extractTags(ictx)) +// StartWorkers creates the worker pool. +func (pi *PolicyInterpreter[R]) StartWorkers() { + logger.Trace.Printf("Starting policy engine's thread pool with %d workers", pi.concurrency) + pi.workerCh = make(chan R, pi.concurrency) + pi.wg = new(sync.WaitGroup) + pi.wg.Add(pi.concurrency) + for i := 0; i < pi.concurrency; i++ { + go pi.worker() } - return tags } -func (listener *sfplListener) getPrefilter(ctx *parser.PruleContext) []string { - var pfs = make([]string, 0) - ictx := ctx.Prefilter(0) - if ictx != nil { - return append(pfs, listener.extractList(ictx.GetText())...) - } - return pfs +// StopWorkers stops the worker pool and waits for all tasks to finish. +func (pi *PolicyInterpreter[R]) StopWorkers() { + logger.Trace.Println("Stopping policy engine's thread pool") + close(pi.workerCh) + pi.wg.Wait() } -func (listener *sfplListener) getPriority(p string) Priority { - switch strings.ToLower(p) { - case Low.String(): - return Low - case Medium.String(): - return Medium - case High.String(): - return High - case FPriorityDebug: - return Low - case FPriorityInfo: - return Low - case FPriorityNotice: - return Low - case FPriorityWarning: - return Medium - case FPriorityError: - return High - case FPriorityCritical: - return High - case FPriorityEmergency: - return High - default: - logger.Warn.Printf("Unrecognized priority value %s. Deferring to %s\n", p, Low.String()) - break +// Compile parses and interprets a set of input policies defined in paths. +func (pi *PolicyInterpreter[R]) Compile(paths ...string) (err error) { + if pi.rules, pi.filters, err = pi.pc.Compile(paths...); err != nil { + return err } - return Low -} - -func (listener *sfplListener) getActions(ctx *parser.PruleContext) []Action { - var actions []Action - if ctx.OUTPUT() != nil { - actions = append(actions, Alert) - } else if ctx.ACTION() != nil { - astr := ctx.Text(2).GetText() - l := listener.extractList(astr) - for _, v := range l { - switch strings.ToLower(v) { - case Alert.String(): - actions = append(actions, Alert) - case Tag.String(): - actions = append(actions, Tag) - case Hash.String(): - actions = append(actions, Hash) - default: - logger.Warn.Println("Unrecognized action value ", v) - break - } + if logger.IsEnabled(logger.Perf) { + if pi.config.BenchRuleIndex >= 0 && pi.config.BenchRuleIndex < len(pi.rules) { + pi.rules = append(make([]policy.Rule[R], 0), pi.rules[pi.config.BenchRuleIndex]) + } else if pi.config.BenchRulesetSize >= 0 && pi.config.BenchRulesetSize <= len(pi.rules) { + pi.rules = append(make([]policy.Rule[R], 0), pi.rules[:pi.config.BenchRulesetSize]...) + } + logger.Perf.Printf("Benchmarking %d rule(s)", len(pi.rules)) + for _, r := range pi.rules { + logger.Perf.Printf("Rule Name: %s, Description: %-50s", r.Name, r.Desc) } } - return actions -} - -func (listener *sfplListener) extractList(str string) []string { - s := []string{} - ls := strings.Split(itemsre.ReplaceAllString(str, "$2"), LISTSEP) - for _, v := range ls { - s = append(s, v) - } - return s + logger.Info.Printf("Policy engine loaded %d rules and %d prefilters", len(pi.rules), len(pi.filters)) + pi.ah.CheckActions(pi.rules) + return nil } -func (listener *sfplListener) extractListFromItems(ctx parser.IItemsContext) []string { - if ctx != nil { - return listener.extractList(ctx.GetText()) +// ProcessAsync queues the record for processing in the worker pool. +func (pi *PolicyInterpreter[R]) ProcessAsync(r R) { + pi.workerCh <- r + if logger.IsEnabled(logger.Perf) && time.Since(pi.lastRcTs) > (15*time.Second) { + logger.Perf.Println("Policy engine rate (events/sec): ", pi.rc.Rate()) + pi.lastRcTs = time.Now() } - return []string{} } -func (listener *sfplListener) extractTags(ctx parser.ITagsContext) []string { - if ctx != nil { - return listener.extractList(ctx.GetText()) - } - return []string{} -} +// Asynchronous worker thread: apply all compiled policies, enrich matching records, and send records downstream. +func (pi *PolicyInterpreter[R]) worker() { + for { + // Fetch record + r, ok := <-pi.workerCh + if !ok { + logger.Trace.Println("Worker channel closed. Shutting down.") + break + } -func (listener *sfplListener) extractListFromAtoms(ctxs []parser.IAtomContext) []string { - s := []string{} - for _, v := range ctxs { - s = append(s, listener.reduceList(v.GetText())...) - } - return s -} + // Increment rate counter + if logger.IsEnabled(logger.Perf) { + pi.rc.Incr(1) + } -func (listener *sfplListener) reduceList(sl string) []string { - s := []string{} - if l, ok := lists[sl]; ok { - for _, v := range l { - s = append(s, listener.reduceList(v)...) + // Drop record if any drop rule applied + if pi.evalFilters(r) { + continue } - } else { - s = append(s, sl) - } - return s -} -func (listener *sfplListener) visitExpression(ctx parser.IExpressionContext) Criterion { - orCtx := ctx.GetChild(0).(parser.IOr_expressionContext) - orPreds := make([]Criterion, 0) - for _, andCtx := range orCtx.GetChildren() { - if andCtx.GetChildCount() > 0 { - andPreds := make([]Criterion, 0) - for _, termCtx := range andCtx.GetChildren() { - t, isTermCtx := termCtx.(parser.ITermContext) - if isTermCtx { - c := listener.visitTerm(t) - andPreds = append(andPreds, c) + // Enrich mode is non-blocking: Push record even if no rule matches + match := (pi.config.Mode == EnrichMode) + + // Apply rules + for _, rule := range pi.rules { + if rule.Enabled && pi.prefilter.IsApplicable(r, rule) && rule.Condition.Eval(r) { + if pi.ctx != nil { + pi.ctx.AddRules(r, rule) } + pi.ah.HandleActions(rule, r) + match = true } - orPreds = append(orPreds, All(andPreds)) + } + + // Push record if a rule matches (or if mode is enrich) + if match && pi.out != nil { + pi.out(r) } } - return Any(orPreds) + pi.wg.Done() } -func (listener *sfplListener) visitTerm(ctx parser.ITermContext) Criterion { - termCtx := ctx.(*parser.TermContext) - if termCtx.Variable() != nil { - if m, ok := macroCtxs[termCtx.GetText()]; ok { - return listener.visitExpression(m) - } - logger.Error.Println("Unrecognized reference ", termCtx.GetText()) - } else if termCtx.NOT() != nil { - return listener.visitTerm(termCtx.GetChild(1).(parser.ITermContext)).Not() - } else if opCtx, ok := termCtx.Unary_operator().(*parser.Unary_operatorContext); ok { - lop := termCtx.Atom(0).(*parser.AtomContext).GetText() - if opCtx.EXISTS() != nil { - return Exists(lop) - } - logger.Error.Println("Unrecognized unary operator ", opCtx.GetText()) - } else if opCtx, ok := termCtx.Binary_operator().(*parser.Binary_operatorContext); ok { - lop := termCtx.Atom(0).(*parser.AtomContext).GetText() - rop := termCtx.Atom(1).(*parser.AtomContext).GetText() - if opCtx.CONTAINS() != nil { - return Contains(lop, rop) - } else if opCtx.ICONTAINS() != nil { - return IContains(lop, rop) - } else if opCtx.STARTSWITH() != nil { - return StartsWith(lop, rop) - } else if opCtx.EQ() != nil { - return Eq(lop, rop) - } else if opCtx.NEQ() != nil { - return NEq(lop, rop) - } else if opCtx.GT() != nil { - return Gt(lop, rop) - } else if opCtx.GE() != nil { - return Ge(lop, rop) - } else if opCtx.LT() != nil { - return Lt(lop, rop) - } else if opCtx.LE() != nil { - return Le(lop, rop) +// EvalFilters executes compiled policy filters against record r. +func (pi *PolicyInterpreter[R]) evalFilters(r R) bool { + for _, f := range pi.filters { + if f.Enabled && f.Condition.Eval(r) { + return true } - logger.Error.Println("Unrecognized binary operator ", opCtx.GetText()) - } else if termCtx.Expression() != nil { - return listener.visitExpression(termCtx.Expression()) - } else if termCtx.IN() != nil { - lop := termCtx.Atom(0).(*parser.AtomContext).GetText() - rop := termCtx.AllAtom()[1:] - return In(lop, listener.extractListFromAtoms(rop)) - } else if termCtx.PMATCH() != nil { - lop := termCtx.Atom(0).(*parser.AtomContext).GetText() - rop := termCtx.AllAtom()[1:] - return PMatch(lop, listener.extractListFromAtoms(rop)) - } else { - logger.Warn.Println("Unrecognized term ", termCtx.GetText()) } - return False + return false } + +// sampleRules is used in performance benchmarks to randomly sample a subset of rules. +// func (pi *PolicyInterpreter[R]) sampleRules(n int) []policy.Rule[R] { +// rand.Seed(time.Now().Unix()) +// permutation := rand.Perm(len(pi.rules)) +// rules := make([]policy.Rule[R], 0) +// for i := 0; i < n && i < len(pi.rules); i++ { +// rules = append(rules, pi.rules[permutation[i]]) +// } +// return rules +// } diff --git a/core/policyengine/engine/interpreter_test.go b/core/policyengine/engine/interpreter_test.go index 1dc8125b..0302adb9 100644 --- a/core/policyengine/engine/interpreter_test.go +++ b/core/policyengine/engine/interpreter_test.go @@ -1,4 +1,3 @@ -// // Copyright (C) 2020 IBM Corporation. // // Authors: @@ -9,15 +8,14 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package engine_test +package engine import ( "os" @@ -26,19 +24,39 @@ import ( "github.com/stretchr/testify/assert" "github.com/sysflow-telemetry/sf-apis/go/ioutils" "github.com/sysflow-telemetry/sf-apis/go/logger" - . "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/falco" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/sigma" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" ) -var pi PolicyInterpreter +var pi *PolicyInterpreter[*flatrecord.Record] func SetupInterpreter(m *testing.M) { - pi = PolicyInterpreter{} + pc := falco.NewPolicyCompiler(flatrecord.NewOperations()) + pi = NewPolicyInterpreter(Config{}, pc, nil, nil, nil) os.Exit(m.Run()) } func TestCompile(t *testing.T) { logger.Trace.Println("Running test compile") - paths, err := ioutils.ListFilePaths("../../../resources/policies/tests/ma.yaml", ".yaml") + paths, err := ioutils.ListFilePaths("../../../resources/policies/tests", ".yaml") + assert.NoError(t, err) + assert.NoError(t, pi.Compile(paths...)) +} + +func TestCompileDist(t *testing.T) { + logger.Trace.Println("Running test compile") + paths, err := ioutils.ListFilePaths("../../../resources/policies/distribution/filter.yaml", ".yaml") + assert.NoError(t, err) + assert.NoError(t, pi.Compile(paths...)) +} + +func TestCompileSigma(t *testing.T) { + logger.Trace.Println("Running test compile") + pc := sigma.NewPolicyCompiler(flatrecord.NewOperations(), "../../../resources/policies/sigma/config/sysflow.yml") + pi = NewPolicyInterpreter(Config{}, pc, nil, nil, nil) + paths, err := ioutils.ListFilePaths("../../../resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_webshell_detection.yml", ".yml") assert.NoError(t, err) assert.NoError(t, pi.Compile(paths...)) + t.Logf("Rules: %d\n", len(pi.rules)) } diff --git a/core/policyengine/engine/predicates.go b/core/policyengine/engine/predicates.go deleted file mode 100644 index 21d934c2..00000000 --- a/core/policyengine/engine/predicates.go +++ /dev/null @@ -1,212 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package engine - -import ( - "fmt" - "reflect" - "strings" -) - -// Predicate defines the type of a functional predicate. -type Predicate func(*Record) bool - -// True defines a functional predicate that always returns true. -var True = Criterion{func(r *Record) bool { return true }} - -// False defines a functional predicate that always returns false. -var False = Criterion{func(r *Record) bool { return false }} - -// Criterion defines an interface for functional predicate operations. -type Criterion struct { - Pred Predicate -} - -// Eval evaluates a functional predicate. -func (c Criterion) Eval(r *Record) bool { - return c.Pred(r) -} - -// And computes the conjunction of two functional predicates. -func (c Criterion) And(cr Criterion) Criterion { - var p Predicate = func(r *Record) bool { return c.Eval(r) && cr.Eval(r) } - return Criterion{p} -} - -// Or computes the conjunction of two functional predicates. -func (c Criterion) Or(cr Criterion) Criterion { - var p Predicate = func(r *Record) bool { return c.Eval(r) || cr.Eval(r) } - return Criterion{p} -} - -// Not computes the negation of the function predicate. -func (c Criterion) Not() Criterion { - var p Predicate = func(r *Record) bool { return !c.Eval(r) } - return Criterion{p} -} - -// All derives the conjuctive clause of all predicates in a slice of predicates. -func All(criteria []Criterion) Criterion { - all := True - for _, c := range criteria { - all = all.And(c) - } - return all -} - -// Any derives the disjuntive clause of all predicates in a slice of predicates. -func Any(criteria []Criterion) Criterion { - any := False - for _, c := range criteria { - any = any.Or(c) - } - return any -} - -// Exists creates a criterion for an existential predicate. -func Exists(attr string) Criterion { - m := Mapper.Map(attr) - p := func(r *Record) bool { return reflect.ValueOf(m(r)).IsZero() } - return Criterion{p} -} - -// Eq creates a criterion for an equality predicate. -func Eq(lattr string, rattr string) Criterion { - ml := Mapper.MapStr(lattr) - mr := Mapper.MapStr(rattr) - p := func(r *Record) bool { return eval(ml(r), mr(r), ops.eq) } - return Criterion{p} -} - -// NEq creates a criterion for an inequality predicate. -func NEq(lattr string, rattr string) Criterion { - return Eq(lattr, rattr).Not() -} - -// Ge creates a criterion for a greater-or-equal predicate. -func Ge(lattr string, rattr string) Criterion { - ml := Mapper.MapInt(lattr) - mr := Mapper.MapInt(rattr) - p := func(r *Record) bool { return ml(r) >= mr(r) } - return Criterion{p} -} - -// Gt creates a criterion for a greater-than predicate. -func Gt(lattr string, rattr string) Criterion { - ml := Mapper.MapInt(lattr) - mr := Mapper.MapInt(rattr) - p := func(r *Record) bool { return ml(r) > mr(r) } - return Criterion{p} -} - -// Le creates a criterion for a lower-or-equal predicate. -func Le(lattr string, rattr string) Criterion { - return Gt(lattr, rattr).Not() -} - -// Lt creates a criterion for a lower-than predicate. -func Lt(lattr string, rattr string) Criterion { - return Ge(lattr, rattr).Not() -} - -// StartsWith creates a criterion for a starts-with predicate. -func StartsWith(lattr string, rattr string) Criterion { - ml := Mapper.MapStr(lattr) - mr := Mapper.MapStr(rattr) - p := func(r *Record) bool { return eval(ml(r), mr(r), ops.startswith) } - return Criterion{p} -} - -// Contains creates a criterion for a contains predicate. -func Contains(lattr string, rattr string) Criterion { - ml := Mapper.MapStr(lattr) - mr := Mapper.MapStr(rattr) - p := func(r *Record) bool { return eval(ml(r), mr(r), ops.contains) } - return Criterion{p} -} - -// IContains creates a criterion for a case-insensitive contains predicate. -func IContains(lattr string, rattr string) Criterion { - ml := Mapper.MapStr(lattr) - mr := Mapper.MapStr(rattr) - p := func(r *Record) bool { return eval(ml(r), mr(r), ops.icontains) } - return Criterion{p} -} - -// In creates a criterion for a list-inclusion predicate. -func In(attr string, list []string) Criterion { - m := Mapper.MapStr(attr) - p := func(r *Record) bool { - for _, v := range list { - if eval(m(r), v, ops.eq) { - return true - } - } - return false - } - return Criterion{p} -} - -// PMatch creates a criterion for a list-pattern-matching predicate. -func PMatch(attr string, list []string) Criterion { - m := Mapper.MapStr(attr) - p := func(r *Record) bool { - for _, v := range list { - if eval(m(r), v, ops.contains) { - return true - } - } - return false - } - return Criterion{p} -} - -// operator type. -type operator func(string, string) bool - -// operators struct. -type operators struct { - eq operator - contains operator - icontains operator - startswith operator -} - -// ops defines boolean comparison operators over strings. -var ops = operators{ - eq: func(l string, r string) bool { return l == r }, - contains: func(l string, r string) bool { return strings.Contains(l, r) }, - icontains: func(l string, r string) bool { return strings.Contains(strings.ToLower(l), strings.ToLower(r)) }, - startswith: func(l string, r string) bool { return strings.HasPrefix(l, r) }, -} - -// Eval evaluates a boolean operator over two predicates. -func eval(l interface{}, r interface{}, op operator) bool { - lattrs := strings.Split(fmt.Sprintf("%v", l), LISTSEP) - rattrs := strings.Split(fmt.Sprintf("%v", r), LISTSEP) - for _, lattr := range lattrs { - for _, rattr := range rattrs { - if op(lattr, rattr) { - return true - } - } - } - return false -} diff --git a/core/policyengine/engine/predicates_test.go b/core/policyengine/engine/predicates_test.go deleted file mode 100644 index 506e8ea6..00000000 --- a/core/policyengine/engine/predicates_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// -// Copyright (C) 2020 IBM Corporation. -// -// Authors: -// Frederico Araujo -// Teryl Taylor -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -package engine_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - . "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" -) - -func TestNot(t *testing.T) { - c := False - var r *Record - assert.Equal(t, true, c.Not().Eval(r)) -} - -func TestAnd(t *testing.T) { - c := False - var r *Record - assert.Equal(t, false, c.And(c).Eval(r)) - assert.Equal(t, false, c.And(c.Not()).Eval(r)) - assert.Equal(t, false, c.Not().And(c).Eval(r)) - assert.Equal(t, true, c.Not().And(c.Not()).Eval(r)) -} - -func TestOr(t *testing.T) { - c := False - var r *Record - assert.Equal(t, false, c.Or(c).Eval(r)) - assert.Equal(t, true, c.Or(c.Not()).Eval(r)) - assert.Equal(t, true, c.Not().Or(c).Eval(r)) - assert.Equal(t, true, c.Not().Or(c.Not()).Eval(r)) -} - -func TestAll(t *testing.T) { - var r *Record - assert.Equal(t, true, All([]Criterion{True, True}).Eval(r)) - assert.Equal(t, false, All([]Criterion{True, False}).Eval(r)) - assert.Equal(t, false, All([]Criterion{False, True}).Eval(r)) - assert.Equal(t, false, All([]Criterion{False, False}).Eval(r)) -} - -func TestAny(t *testing.T) { - var r *Record - assert.Equal(t, true, Any([]Criterion{True, True}).Eval(r)) - assert.Equal(t, true, Any([]Criterion{True, False}).Eval(r)) - assert.Equal(t, true, Any([]Criterion{False, True}).Eval(r)) - assert.Equal(t, false, Any([]Criterion{False, False}).Eval(r)) -} diff --git a/core/policyengine/engine/setup_test.go b/core/policyengine/engine/setup_test.go index 30e4be19..b1cb1fea 100644 --- a/core/policyengine/engine/setup_test.go +++ b/core/policyengine/engine/setup_test.go @@ -1,4 +1,3 @@ -// // Copyright (C) 2020 IBM Corporation. // // Authors: @@ -9,15 +8,14 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package engine_test +package engine import ( "os" @@ -28,5 +26,6 @@ import ( func TestMain(m *testing.M) { logger.InitLoggers(logger.TRACE) + SetupInterpreter(m) os.Exit(m.Run()) } diff --git a/core/policyengine/lang/generate.sh b/core/policyengine/lang/generate.sh deleted file mode 100755 index 65986aed..00000000 --- a/core/policyengine/lang/generate.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -antlr4='java -Xmx500M -cp ".:/usr/local/lib/antlr-4.8-complete.jar:$CLASSPATH" org.antlr.v4.Tool' -grun='java -Xmx500M -cp ".:/usr/local/lib/antlr-4.8-complete.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' - -$antlr4 -Dlanguage=Go -o parser -package parser -visitor Sfpl.g4 diff --git a/core/policyengine/lang/parser/Sfpl.interp b/core/policyengine/lang/parser/Sfpl.interp deleted file mode 100644 index b5490e72..00000000 --- a/core/policyengine/lang/parser/Sfpl.interp +++ /dev/null @@ -1,132 +0,0 @@ -token literal names: -null -'rule' -'filter' -'macro' -'list' -'name' -'items' -'condition' -'desc' -'action' -'output' -'priority' -'tags' -'prefilter' -'enabled' -'warn_evttypes' -'skip-if-unknown-filter' -'and' -'or' -'not' -'<' -'<=' -'>' -'>=' -'=' -'!=' -'in' -'contains' -'icontains' -'startswith' -'pmatch' -'exists' -'[' -']' -'(' -')' -',' -'-' -null -null -null -null -null -null -null -null -null -null -null -null -null - -token symbolic names: -null -RULE -FILTER -MACRO -LIST -NAME -ITEMS -COND -DESC -ACTION -OUTPUT -PRIORITY -TAGS -PREFILTER -ENABLED -WARNEVTTYPE -SKIPUNKNOWN -AND -OR -NOT -LT -LE -GT -GE -EQ -NEQ -IN -CONTAINS -ICONTAINS -STARTSWITH -PMATCH -EXISTS -LBRACK -RBRACK -LPAREN -RPAREN -LISTSEP -DECL -DEF -SEVERITY -SFSEVERITY -FSEVERITY -ID -NUMBER -PATH -STRING -TAG -WS -NL -COMMENT -ANY - -rule names: -policy -prule -pfilter -pmacro -plist -expression -or_expression -and_expression -term -items -tags -prefilter -severity -enabled -warnevttype -skipunknown -variable -atom -text -binary_operator -unary_operator - - -atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 52, 222, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 3, 2, 3, 2, 3, 2, 3, 2, 6, 2, 49, 10, 2, 13, 2, 14, 2, 50, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 86, 10, 3, 12, 3, 14, 3, 89, 11, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 101, 10, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 7, 8, 124, 10, 8, 12, 8, 14, 8, 127, 11, 8, 3, 9, 3, 9, 3, 9, 7, 9, 132, 10, 9, 12, 9, 14, 9, 135, 11, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 5, 10, 152, 10, 10, 3, 10, 3, 10, 3, 10, 5, 10, 157, 10, 10, 7, 10, 159, 10, 10, 12, 10, 14, 10, 162, 11, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 5, 10, 170, 10, 10, 3, 11, 3, 11, 3, 11, 3, 11, 7, 11, 176, 10, 11, 12, 11, 14, 11, 179, 11, 11, 5, 11, 181, 10, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 12, 7, 12, 189, 10, 12, 12, 12, 14, 12, 192, 11, 12, 5, 12, 194, 10, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 6, 20, 214, 10, 20, 13, 20, 14, 20, 215, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 2, 2, 23, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 2, 6, 3, 2, 11, 12, 4, 2, 28, 28, 32, 32, 5, 2, 22, 22, 24, 24, 44, 48, 4, 2, 22, 27, 29, 31, 2, 225, 2, 48, 3, 2, 2, 2, 4, 54, 3, 2, 2, 2, 6, 90, 3, 2, 2, 2, 8, 102, 3, 2, 2, 2, 10, 110, 3, 2, 2, 2, 12, 118, 3, 2, 2, 2, 14, 120, 3, 2, 2, 2, 16, 128, 3, 2, 2, 2, 18, 169, 3, 2, 2, 2, 20, 171, 3, 2, 2, 2, 22, 184, 3, 2, 2, 2, 24, 197, 3, 2, 2, 2, 26, 199, 3, 2, 2, 2, 28, 201, 3, 2, 2, 2, 30, 203, 3, 2, 2, 2, 32, 205, 3, 2, 2, 2, 34, 207, 3, 2, 2, 2, 36, 209, 3, 2, 2, 2, 38, 213, 3, 2, 2, 2, 40, 217, 3, 2, 2, 2, 42, 219, 3, 2, 2, 2, 44, 49, 5, 4, 3, 2, 45, 49, 5, 6, 4, 2, 46, 49, 5, 8, 5, 2, 47, 49, 5, 10, 6, 2, 48, 44, 3, 2, 2, 2, 48, 45, 3, 2, 2, 2, 48, 46, 3, 2, 2, 2, 48, 47, 3, 2, 2, 2, 49, 50, 3, 2, 2, 2, 50, 48, 3, 2, 2, 2, 50, 51, 3, 2, 2, 2, 51, 52, 3, 2, 2, 2, 52, 53, 7, 2, 2, 3, 53, 3, 3, 2, 2, 2, 54, 55, 7, 39, 2, 2, 55, 56, 7, 3, 2, 2, 56, 57, 7, 40, 2, 2, 57, 58, 5, 38, 20, 2, 58, 59, 7, 10, 2, 2, 59, 60, 7, 40, 2, 2, 60, 61, 5, 38, 20, 2, 61, 62, 7, 9, 2, 2, 62, 63, 7, 40, 2, 2, 63, 64, 5, 12, 7, 2, 64, 65, 9, 2, 2, 2, 65, 66, 7, 40, 2, 2, 66, 67, 5, 38, 20, 2, 67, 68, 7, 13, 2, 2, 68, 69, 7, 40, 2, 2, 69, 87, 5, 26, 14, 2, 70, 71, 7, 14, 2, 2, 71, 72, 7, 40, 2, 2, 72, 86, 5, 22, 12, 2, 73, 74, 7, 15, 2, 2, 74, 75, 7, 40, 2, 2, 75, 86, 5, 24, 13, 2, 76, 77, 7, 16, 2, 2, 77, 78, 7, 40, 2, 2, 78, 86, 5, 28, 15, 2, 79, 80, 7, 17, 2, 2, 80, 81, 7, 40, 2, 2, 81, 86, 5, 30, 16, 2, 82, 83, 7, 18, 2, 2, 83, 84, 7, 40, 2, 2, 84, 86, 5, 32, 17, 2, 85, 70, 3, 2, 2, 2, 85, 73, 3, 2, 2, 2, 85, 76, 3, 2, 2, 2, 85, 79, 3, 2, 2, 2, 85, 82, 3, 2, 2, 2, 86, 89, 3, 2, 2, 2, 87, 85, 3, 2, 2, 2, 87, 88, 3, 2, 2, 2, 88, 5, 3, 2, 2, 2, 89, 87, 3, 2, 2, 2, 90, 91, 7, 39, 2, 2, 91, 92, 7, 4, 2, 2, 92, 93, 7, 40, 2, 2, 93, 94, 7, 44, 2, 2, 94, 95, 7, 9, 2, 2, 95, 96, 7, 40, 2, 2, 96, 100, 5, 12, 7, 2, 97, 98, 7, 16, 2, 2, 98, 99, 7, 40, 2, 2, 99, 101, 5, 28, 15, 2, 100, 97, 3, 2, 2, 2, 100, 101, 3, 2, 2, 2, 101, 7, 3, 2, 2, 2, 102, 103, 7, 39, 2, 2, 103, 104, 7, 5, 2, 2, 104, 105, 7, 40, 2, 2, 105, 106, 7, 44, 2, 2, 106, 107, 7, 9, 2, 2, 107, 108, 7, 40, 2, 2, 108, 109, 5, 12, 7, 2, 109, 9, 3, 2, 2, 2, 110, 111, 7, 39, 2, 2, 111, 112, 7, 6, 2, 2, 112, 113, 7, 40, 2, 2, 113, 114, 7, 44, 2, 2, 114, 115, 7, 8, 2, 2, 115, 116, 7, 40, 2, 2, 116, 117, 5, 20, 11, 2, 117, 11, 3, 2, 2, 2, 118, 119, 5, 14, 8, 2, 119, 13, 3, 2, 2, 2, 120, 125, 5, 16, 9, 2, 121, 122, 7, 20, 2, 2, 122, 124, 5, 16, 9, 2, 123, 121, 3, 2, 2, 2, 124, 127, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126, 15, 3, 2, 2, 2, 127, 125, 3, 2, 2, 2, 128, 133, 5, 18, 10, 2, 129, 130, 7, 19, 2, 2, 130, 132, 5, 18, 10, 2, 131, 129, 3, 2, 2, 2, 132, 135, 3, 2, 2, 2, 133, 131, 3, 2, 2, 2, 133, 134, 3, 2, 2, 2, 134, 17, 3, 2, 2, 2, 135, 133, 3, 2, 2, 2, 136, 170, 5, 34, 18, 2, 137, 138, 7, 21, 2, 2, 138, 170, 5, 18, 10, 2, 139, 140, 5, 36, 19, 2, 140, 141, 5, 42, 22, 2, 141, 170, 3, 2, 2, 2, 142, 143, 5, 36, 19, 2, 143, 144, 5, 40, 21, 2, 144, 145, 5, 36, 19, 2, 145, 170, 3, 2, 2, 2, 146, 147, 5, 36, 19, 2, 147, 148, 9, 3, 2, 2, 148, 151, 7, 36, 2, 2, 149, 152, 5, 36, 19, 2, 150, 152, 5, 20, 11, 2, 151, 149, 3, 2, 2, 2, 151, 150, 3, 2, 2, 2, 152, 160, 3, 2, 2, 2, 153, 156, 7, 38, 2, 2, 154, 157, 5, 36, 19, 2, 155, 157, 5, 20, 11, 2, 156, 154, 3, 2, 2, 2, 156, 155, 3, 2, 2, 2, 157, 159, 3, 2, 2, 2, 158, 153, 3, 2, 2, 2, 159, 162, 3, 2, 2, 2, 160, 158, 3, 2, 2, 2, 160, 161, 3, 2, 2, 2, 161, 163, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 163, 164, 7, 37, 2, 2, 164, 170, 3, 2, 2, 2, 165, 166, 7, 36, 2, 2, 166, 167, 5, 12, 7, 2, 167, 168, 7, 37, 2, 2, 168, 170, 3, 2, 2, 2, 169, 136, 3, 2, 2, 2, 169, 137, 3, 2, 2, 2, 169, 139, 3, 2, 2, 2, 169, 142, 3, 2, 2, 2, 169, 146, 3, 2, 2, 2, 169, 165, 3, 2, 2, 2, 170, 19, 3, 2, 2, 2, 171, 180, 7, 34, 2, 2, 172, 177, 5, 36, 19, 2, 173, 174, 7, 38, 2, 2, 174, 176, 5, 36, 19, 2, 175, 173, 3, 2, 2, 2, 176, 179, 3, 2, 2, 2, 177, 175, 3, 2, 2, 2, 177, 178, 3, 2, 2, 2, 178, 181, 3, 2, 2, 2, 179, 177, 3, 2, 2, 2, 180, 172, 3, 2, 2, 2, 180, 181, 3, 2, 2, 2, 181, 182, 3, 2, 2, 2, 182, 183, 7, 35, 2, 2, 183, 21, 3, 2, 2, 2, 184, 193, 7, 34, 2, 2, 185, 190, 5, 36, 19, 2, 186, 187, 7, 38, 2, 2, 187, 189, 5, 36, 19, 2, 188, 186, 3, 2, 2, 2, 189, 192, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 194, 3, 2, 2, 2, 192, 190, 3, 2, 2, 2, 193, 185, 3, 2, 2, 2, 193, 194, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 196, 7, 35, 2, 2, 196, 23, 3, 2, 2, 2, 197, 198, 5, 20, 11, 2, 198, 25, 3, 2, 2, 2, 199, 200, 7, 41, 2, 2, 200, 27, 3, 2, 2, 2, 201, 202, 5, 36, 19, 2, 202, 29, 3, 2, 2, 2, 203, 204, 5, 36, 19, 2, 204, 31, 3, 2, 2, 2, 205, 206, 5, 36, 19, 2, 206, 33, 3, 2, 2, 2, 207, 208, 7, 44, 2, 2, 208, 35, 3, 2, 2, 2, 209, 210, 9, 4, 2, 2, 210, 37, 3, 2, 2, 2, 211, 212, 6, 20, 2, 2, 212, 214, 11, 2, 2, 2, 213, 211, 3, 2, 2, 2, 214, 215, 3, 2, 2, 2, 215, 213, 3, 2, 2, 2, 215, 216, 3, 2, 2, 2, 216, 39, 3, 2, 2, 2, 217, 218, 9, 5, 2, 2, 218, 41, 3, 2, 2, 2, 219, 220, 7, 33, 2, 2, 220, 43, 3, 2, 2, 2, 18, 48, 50, 85, 87, 100, 125, 133, 151, 156, 160, 169, 177, 180, 190, 193, 215] \ No newline at end of file diff --git a/core/policyengine/lang/parser/Sfpl.tokens b/core/policyengine/lang/parser/Sfpl.tokens deleted file mode 100644 index fed61ec3..00000000 --- a/core/policyengine/lang/parser/Sfpl.tokens +++ /dev/null @@ -1,87 +0,0 @@ -RULE=1 -FILTER=2 -MACRO=3 -LIST=4 -NAME=5 -ITEMS=6 -COND=7 -DESC=8 -ACTION=9 -OUTPUT=10 -PRIORITY=11 -TAGS=12 -PREFILTER=13 -ENABLED=14 -WARNEVTTYPE=15 -SKIPUNKNOWN=16 -AND=17 -OR=18 -NOT=19 -LT=20 -LE=21 -GT=22 -GE=23 -EQ=24 -NEQ=25 -IN=26 -CONTAINS=27 -ICONTAINS=28 -STARTSWITH=29 -PMATCH=30 -EXISTS=31 -LBRACK=32 -RBRACK=33 -LPAREN=34 -RPAREN=35 -LISTSEP=36 -DECL=37 -DEF=38 -SEVERITY=39 -SFSEVERITY=40 -FSEVERITY=41 -ID=42 -NUMBER=43 -PATH=44 -STRING=45 -TAG=46 -WS=47 -NL=48 -COMMENT=49 -ANY=50 -'rule'=1 -'filter'=2 -'macro'=3 -'list'=4 -'name'=5 -'items'=6 -'condition'=7 -'desc'=8 -'action'=9 -'output'=10 -'priority'=11 -'tags'=12 -'prefilter'=13 -'enabled'=14 -'warn_evttypes'=15 -'skip-if-unknown-filter'=16 -'and'=17 -'or'=18 -'not'=19 -'<'=20 -'<='=21 -'>'=22 -'>='=23 -'='=24 -'!='=25 -'in'=26 -'contains'=27 -'icontains'=28 -'startswith'=29 -'pmatch'=30 -'exists'=31 -'['=32 -']'=33 -'('=34 -')'=35 -','=36 -'-'=37 diff --git a/core/policyengine/lang/parser/SfplLexer.interp b/core/policyengine/lang/parser/SfplLexer.interp deleted file mode 100644 index c352a58c..00000000 --- a/core/policyengine/lang/parser/SfplLexer.interp +++ /dev/null @@ -1,169 +0,0 @@ -token literal names: -null -'rule' -'filter' -'macro' -'list' -'name' -'items' -'condition' -'desc' -'action' -'output' -'priority' -'tags' -'prefilter' -'enabled' -'warn_evttypes' -'skip-if-unknown-filter' -'and' -'or' -'not' -'<' -'<=' -'>' -'>=' -'=' -'!=' -'in' -'contains' -'icontains' -'startswith' -'pmatch' -'exists' -'[' -']' -'(' -')' -',' -'-' -null -null -null -null -null -null -null -null -null -null -null -null -null - -token symbolic names: -null -RULE -FILTER -MACRO -LIST -NAME -ITEMS -COND -DESC -ACTION -OUTPUT -PRIORITY -TAGS -PREFILTER -ENABLED -WARNEVTTYPE -SKIPUNKNOWN -AND -OR -NOT -LT -LE -GT -GE -EQ -NEQ -IN -CONTAINS -ICONTAINS -STARTSWITH -PMATCH -EXISTS -LBRACK -RBRACK -LPAREN -RPAREN -LISTSEP -DECL -DEF -SEVERITY -SFSEVERITY -FSEVERITY -ID -NUMBER -PATH -STRING -TAG -WS -NL -COMMENT -ANY - -rule names: -RULE -FILTER -MACRO -LIST -NAME -ITEMS -COND -DESC -ACTION -OUTPUT -PRIORITY -TAGS -PREFILTER -ENABLED -WARNEVTTYPE -SKIPUNKNOWN -AND -OR -NOT -LT -LE -GT -GE -EQ -NEQ -IN -CONTAINS -ICONTAINS -STARTSWITH -PMATCH -EXISTS -LBRACK -RBRACK -LPAREN -RPAREN -LISTSEP -DECL -DEF -SEVERITY -SFSEVERITY -FSEVERITY -ID -NUMBER -PATH -STRING -TAG -STRLIT -ESC -WS -NL -COMMENT -ANY - -channel names: -DEFAULT_TOKEN_CHANNEL -HIDDEN - -mode names: -DEFAULT_MODE - -atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 52, 535, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 37, 3, 37, 3, 38, 3, 38, 3, 39, 3, 39, 7, 39, 327, 10, 39, 12, 39, 14, 39, 330, 11, 39, 3, 39, 5, 39, 333, 10, 39, 3, 40, 3, 40, 5, 40, 337, 10, 40, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 5, 41, 352, 10, 41, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 5, 42, 412, 10, 42, 3, 43, 3, 43, 3, 43, 5, 43, 417, 10, 43, 3, 43, 3, 43, 3, 43, 5, 43, 422, 10, 43, 3, 43, 3, 43, 7, 43, 426, 10, 43, 12, 43, 14, 43, 429, 11, 43, 3, 43, 3, 43, 3, 43, 7, 43, 434, 10, 43, 12, 43, 14, 43, 437, 11, 43, 3, 44, 6, 44, 440, 10, 44, 13, 44, 14, 44, 441, 3, 44, 3, 44, 6, 44, 446, 10, 44, 13, 44, 14, 44, 447, 5, 44, 450, 10, 44, 3, 45, 3, 45, 7, 45, 454, 10, 45, 12, 45, 14, 45, 457, 11, 45, 3, 46, 3, 46, 3, 46, 5, 46, 462, 10, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 469, 10, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 478, 10, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 488, 10, 46, 3, 46, 3, 46, 3, 46, 5, 46, 493, 10, 46, 3, 47, 3, 47, 3, 47, 3, 47, 3, 48, 7, 48, 500, 10, 48, 12, 48, 14, 48, 503, 11, 48, 3, 49, 3, 49, 3, 49, 3, 49, 5, 49, 509, 10, 49, 3, 50, 6, 50, 512, 10, 50, 13, 50, 14, 50, 513, 3, 50, 3, 50, 3, 51, 5, 51, 519, 10, 51, 3, 51, 3, 51, 3, 51, 3, 51, 3, 52, 3, 52, 7, 52, 527, 10, 52, 12, 52, 14, 52, 530, 11, 52, 3, 52, 3, 52, 3, 53, 3, 53, 3, 501, 2, 54, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 69, 36, 71, 37, 73, 38, 75, 39, 77, 40, 79, 41, 81, 42, 83, 43, 85, 44, 87, 45, 89, 46, 91, 47, 93, 48, 95, 2, 97, 2, 99, 49, 101, 50, 103, 51, 105, 52, 3, 2, 8, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 49, 49, 67, 92, 99, 124, 7, 2, 44, 44, 47, 59, 67, 92, 97, 97, 99, 124, 4, 2, 12, 12, 15, 15, 5, 2, 11, 12, 14, 15, 34, 34, 2, 566, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73, 3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 2, 81, 3, 2, 2, 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, 2, 2, 87, 3, 2, 2, 2, 2, 89, 3, 2, 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, 2, 2, 2, 99, 3, 2, 2, 2, 2, 101, 3, 2, 2, 2, 2, 103, 3, 2, 2, 2, 2, 105, 3, 2, 2, 2, 3, 107, 3, 2, 2, 2, 5, 112, 3, 2, 2, 2, 7, 119, 3, 2, 2, 2, 9, 125, 3, 2, 2, 2, 11, 130, 3, 2, 2, 2, 13, 135, 3, 2, 2, 2, 15, 141, 3, 2, 2, 2, 17, 151, 3, 2, 2, 2, 19, 156, 3, 2, 2, 2, 21, 163, 3, 2, 2, 2, 23, 170, 3, 2, 2, 2, 25, 179, 3, 2, 2, 2, 27, 184, 3, 2, 2, 2, 29, 194, 3, 2, 2, 2, 31, 202, 3, 2, 2, 2, 33, 216, 3, 2, 2, 2, 35, 239, 3, 2, 2, 2, 37, 243, 3, 2, 2, 2, 39, 246, 3, 2, 2, 2, 41, 250, 3, 2, 2, 2, 43, 252, 3, 2, 2, 2, 45, 255, 3, 2, 2, 2, 47, 257, 3, 2, 2, 2, 49, 260, 3, 2, 2, 2, 51, 262, 3, 2, 2, 2, 53, 265, 3, 2, 2, 2, 55, 268, 3, 2, 2, 2, 57, 277, 3, 2, 2, 2, 59, 287, 3, 2, 2, 2, 61, 298, 3, 2, 2, 2, 63, 305, 3, 2, 2, 2, 65, 312, 3, 2, 2, 2, 67, 314, 3, 2, 2, 2, 69, 316, 3, 2, 2, 2, 71, 318, 3, 2, 2, 2, 73, 320, 3, 2, 2, 2, 75, 322, 3, 2, 2, 2, 77, 324, 3, 2, 2, 2, 79, 336, 3, 2, 2, 2, 81, 351, 3, 2, 2, 2, 83, 411, 3, 2, 2, 2, 85, 413, 3, 2, 2, 2, 87, 439, 3, 2, 2, 2, 89, 451, 3, 2, 2, 2, 91, 492, 3, 2, 2, 2, 93, 494, 3, 2, 2, 2, 95, 501, 3, 2, 2, 2, 97, 508, 3, 2, 2, 2, 99, 511, 3, 2, 2, 2, 101, 518, 3, 2, 2, 2, 103, 524, 3, 2, 2, 2, 105, 533, 3, 2, 2, 2, 107, 108, 7, 116, 2, 2, 108, 109, 7, 119, 2, 2, 109, 110, 7, 110, 2, 2, 110, 111, 7, 103, 2, 2, 111, 4, 3, 2, 2, 2, 112, 113, 7, 104, 2, 2, 113, 114, 7, 107, 2, 2, 114, 115, 7, 110, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 103, 2, 2, 117, 118, 7, 116, 2, 2, 118, 6, 3, 2, 2, 2, 119, 120, 7, 111, 2, 2, 120, 121, 7, 99, 2, 2, 121, 122, 7, 101, 2, 2, 122, 123, 7, 116, 2, 2, 123, 124, 7, 113, 2, 2, 124, 8, 3, 2, 2, 2, 125, 126, 7, 110, 2, 2, 126, 127, 7, 107, 2, 2, 127, 128, 7, 117, 2, 2, 128, 129, 7, 118, 2, 2, 129, 10, 3, 2, 2, 2, 130, 131, 7, 112, 2, 2, 131, 132, 7, 99, 2, 2, 132, 133, 7, 111, 2, 2, 133, 134, 7, 103, 2, 2, 134, 12, 3, 2, 2, 2, 135, 136, 7, 107, 2, 2, 136, 137, 7, 118, 2, 2, 137, 138, 7, 103, 2, 2, 138, 139, 7, 111, 2, 2, 139, 140, 7, 117, 2, 2, 140, 14, 3, 2, 2, 2, 141, 142, 7, 101, 2, 2, 142, 143, 7, 113, 2, 2, 143, 144, 7, 112, 2, 2, 144, 145, 7, 102, 2, 2, 145, 146, 7, 107, 2, 2, 146, 147, 7, 118, 2, 2, 147, 148, 7, 107, 2, 2, 148, 149, 7, 113, 2, 2, 149, 150, 7, 112, 2, 2, 150, 16, 3, 2, 2, 2, 151, 152, 7, 102, 2, 2, 152, 153, 7, 103, 2, 2, 153, 154, 7, 117, 2, 2, 154, 155, 7, 101, 2, 2, 155, 18, 3, 2, 2, 2, 156, 157, 7, 99, 2, 2, 157, 158, 7, 101, 2, 2, 158, 159, 7, 118, 2, 2, 159, 160, 7, 107, 2, 2, 160, 161, 7, 113, 2, 2, 161, 162, 7, 112, 2, 2, 162, 20, 3, 2, 2, 2, 163, 164, 7, 113, 2, 2, 164, 165, 7, 119, 2, 2, 165, 166, 7, 118, 2, 2, 166, 167, 7, 114, 2, 2, 167, 168, 7, 119, 2, 2, 168, 169, 7, 118, 2, 2, 169, 22, 3, 2, 2, 2, 170, 171, 7, 114, 2, 2, 171, 172, 7, 116, 2, 2, 172, 173, 7, 107, 2, 2, 173, 174, 7, 113, 2, 2, 174, 175, 7, 116, 2, 2, 175, 176, 7, 107, 2, 2, 176, 177, 7, 118, 2, 2, 177, 178, 7, 123, 2, 2, 178, 24, 3, 2, 2, 2, 179, 180, 7, 118, 2, 2, 180, 181, 7, 99, 2, 2, 181, 182, 7, 105, 2, 2, 182, 183, 7, 117, 2, 2, 183, 26, 3, 2, 2, 2, 184, 185, 7, 114, 2, 2, 185, 186, 7, 116, 2, 2, 186, 187, 7, 103, 2, 2, 187, 188, 7, 104, 2, 2, 188, 189, 7, 107, 2, 2, 189, 190, 7, 110, 2, 2, 190, 191, 7, 118, 2, 2, 191, 192, 7, 103, 2, 2, 192, 193, 7, 116, 2, 2, 193, 28, 3, 2, 2, 2, 194, 195, 7, 103, 2, 2, 195, 196, 7, 112, 2, 2, 196, 197, 7, 99, 2, 2, 197, 198, 7, 100, 2, 2, 198, 199, 7, 110, 2, 2, 199, 200, 7, 103, 2, 2, 200, 201, 7, 102, 2, 2, 201, 30, 3, 2, 2, 2, 202, 203, 7, 121, 2, 2, 203, 204, 7, 99, 2, 2, 204, 205, 7, 116, 2, 2, 205, 206, 7, 112, 2, 2, 206, 207, 7, 97, 2, 2, 207, 208, 7, 103, 2, 2, 208, 209, 7, 120, 2, 2, 209, 210, 7, 118, 2, 2, 210, 211, 7, 118, 2, 2, 211, 212, 7, 123, 2, 2, 212, 213, 7, 114, 2, 2, 213, 214, 7, 103, 2, 2, 214, 215, 7, 117, 2, 2, 215, 32, 3, 2, 2, 2, 216, 217, 7, 117, 2, 2, 217, 218, 7, 109, 2, 2, 218, 219, 7, 107, 2, 2, 219, 220, 7, 114, 2, 2, 220, 221, 7, 47, 2, 2, 221, 222, 7, 107, 2, 2, 222, 223, 7, 104, 2, 2, 223, 224, 7, 47, 2, 2, 224, 225, 7, 119, 2, 2, 225, 226, 7, 112, 2, 2, 226, 227, 7, 109, 2, 2, 227, 228, 7, 112, 2, 2, 228, 229, 7, 113, 2, 2, 229, 230, 7, 121, 2, 2, 230, 231, 7, 112, 2, 2, 231, 232, 7, 47, 2, 2, 232, 233, 7, 104, 2, 2, 233, 234, 7, 107, 2, 2, 234, 235, 7, 110, 2, 2, 235, 236, 7, 118, 2, 2, 236, 237, 7, 103, 2, 2, 237, 238, 7, 116, 2, 2, 238, 34, 3, 2, 2, 2, 239, 240, 7, 99, 2, 2, 240, 241, 7, 112, 2, 2, 241, 242, 7, 102, 2, 2, 242, 36, 3, 2, 2, 2, 243, 244, 7, 113, 2, 2, 244, 245, 7, 116, 2, 2, 245, 38, 3, 2, 2, 2, 246, 247, 7, 112, 2, 2, 247, 248, 7, 113, 2, 2, 248, 249, 7, 118, 2, 2, 249, 40, 3, 2, 2, 2, 250, 251, 7, 62, 2, 2, 251, 42, 3, 2, 2, 2, 252, 253, 7, 62, 2, 2, 253, 254, 7, 63, 2, 2, 254, 44, 3, 2, 2, 2, 255, 256, 7, 64, 2, 2, 256, 46, 3, 2, 2, 2, 257, 258, 7, 64, 2, 2, 258, 259, 7, 63, 2, 2, 259, 48, 3, 2, 2, 2, 260, 261, 7, 63, 2, 2, 261, 50, 3, 2, 2, 2, 262, 263, 7, 35, 2, 2, 263, 264, 7, 63, 2, 2, 264, 52, 3, 2, 2, 2, 265, 266, 7, 107, 2, 2, 266, 267, 7, 112, 2, 2, 267, 54, 3, 2, 2, 2, 268, 269, 7, 101, 2, 2, 269, 270, 7, 113, 2, 2, 270, 271, 7, 112, 2, 2, 271, 272, 7, 118, 2, 2, 272, 273, 7, 99, 2, 2, 273, 274, 7, 107, 2, 2, 274, 275, 7, 112, 2, 2, 275, 276, 7, 117, 2, 2, 276, 56, 3, 2, 2, 2, 277, 278, 7, 107, 2, 2, 278, 279, 7, 101, 2, 2, 279, 280, 7, 113, 2, 2, 280, 281, 7, 112, 2, 2, 281, 282, 7, 118, 2, 2, 282, 283, 7, 99, 2, 2, 283, 284, 7, 107, 2, 2, 284, 285, 7, 112, 2, 2, 285, 286, 7, 117, 2, 2, 286, 58, 3, 2, 2, 2, 287, 288, 7, 117, 2, 2, 288, 289, 7, 118, 2, 2, 289, 290, 7, 99, 2, 2, 290, 291, 7, 116, 2, 2, 291, 292, 7, 118, 2, 2, 292, 293, 7, 117, 2, 2, 293, 294, 7, 121, 2, 2, 294, 295, 7, 107, 2, 2, 295, 296, 7, 118, 2, 2, 296, 297, 7, 106, 2, 2, 297, 60, 3, 2, 2, 2, 298, 299, 7, 114, 2, 2, 299, 300, 7, 111, 2, 2, 300, 301, 7, 99, 2, 2, 301, 302, 7, 118, 2, 2, 302, 303, 7, 101, 2, 2, 303, 304, 7, 106, 2, 2, 304, 62, 3, 2, 2, 2, 305, 306, 7, 103, 2, 2, 306, 307, 7, 122, 2, 2, 307, 308, 7, 107, 2, 2, 308, 309, 7, 117, 2, 2, 309, 310, 7, 118, 2, 2, 310, 311, 7, 117, 2, 2, 311, 64, 3, 2, 2, 2, 312, 313, 7, 93, 2, 2, 313, 66, 3, 2, 2, 2, 314, 315, 7, 95, 2, 2, 315, 68, 3, 2, 2, 2, 316, 317, 7, 42, 2, 2, 317, 70, 3, 2, 2, 2, 318, 319, 7, 43, 2, 2, 319, 72, 3, 2, 2, 2, 320, 321, 7, 46, 2, 2, 321, 74, 3, 2, 2, 2, 322, 323, 7, 47, 2, 2, 323, 76, 3, 2, 2, 2, 324, 332, 7, 60, 2, 2, 325, 327, 7, 34, 2, 2, 326, 325, 3, 2, 2, 2, 327, 330, 3, 2, 2, 2, 328, 326, 3, 2, 2, 2, 328, 329, 3, 2, 2, 2, 329, 331, 3, 2, 2, 2, 330, 328, 3, 2, 2, 2, 331, 333, 7, 64, 2, 2, 332, 328, 3, 2, 2, 2, 332, 333, 3, 2, 2, 2, 333, 78, 3, 2, 2, 2, 334, 337, 5, 81, 41, 2, 335, 337, 5, 83, 42, 2, 336, 334, 3, 2, 2, 2, 336, 335, 3, 2, 2, 2, 337, 80, 3, 2, 2, 2, 338, 339, 7, 106, 2, 2, 339, 340, 7, 107, 2, 2, 340, 341, 7, 105, 2, 2, 341, 352, 7, 106, 2, 2, 342, 343, 7, 111, 2, 2, 343, 344, 7, 103, 2, 2, 344, 345, 7, 102, 2, 2, 345, 346, 7, 107, 2, 2, 346, 347, 7, 119, 2, 2, 347, 352, 7, 111, 2, 2, 348, 349, 7, 110, 2, 2, 349, 350, 7, 113, 2, 2, 350, 352, 7, 121, 2, 2, 351, 338, 3, 2, 2, 2, 351, 342, 3, 2, 2, 2, 351, 348, 3, 2, 2, 2, 352, 82, 3, 2, 2, 2, 353, 354, 7, 103, 2, 2, 354, 355, 7, 111, 2, 2, 355, 356, 7, 103, 2, 2, 356, 357, 7, 116, 2, 2, 357, 358, 7, 105, 2, 2, 358, 359, 7, 103, 2, 2, 359, 360, 7, 112, 2, 2, 360, 361, 7, 101, 2, 2, 361, 412, 7, 123, 2, 2, 362, 363, 7, 99, 2, 2, 363, 364, 7, 110, 2, 2, 364, 365, 7, 103, 2, 2, 365, 366, 7, 116, 2, 2, 366, 412, 7, 118, 2, 2, 367, 368, 7, 101, 2, 2, 368, 369, 7, 116, 2, 2, 369, 370, 7, 107, 2, 2, 370, 371, 7, 118, 2, 2, 371, 372, 7, 107, 2, 2, 372, 373, 7, 101, 2, 2, 373, 374, 7, 99, 2, 2, 374, 412, 7, 110, 2, 2, 375, 376, 7, 103, 2, 2, 376, 377, 7, 116, 2, 2, 377, 378, 7, 116, 2, 2, 378, 379, 7, 113, 2, 2, 379, 412, 7, 116, 2, 2, 380, 381, 7, 121, 2, 2, 381, 382, 7, 99, 2, 2, 382, 383, 7, 116, 2, 2, 383, 384, 7, 112, 2, 2, 384, 385, 7, 107, 2, 2, 385, 386, 7, 112, 2, 2, 386, 412, 7, 105, 2, 2, 387, 388, 7, 112, 2, 2, 388, 389, 7, 113, 2, 2, 389, 390, 7, 118, 2, 2, 390, 391, 7, 107, 2, 2, 391, 392, 7, 101, 2, 2, 392, 412, 7, 103, 2, 2, 393, 394, 7, 107, 2, 2, 394, 395, 7, 112, 2, 2, 395, 396, 7, 104, 2, 2, 396, 397, 7, 113, 2, 2, 397, 398, 7, 116, 2, 2, 398, 399, 7, 111, 2, 2, 399, 400, 7, 99, 2, 2, 400, 401, 7, 118, 2, 2, 401, 402, 7, 107, 2, 2, 402, 403, 7, 113, 2, 2, 403, 404, 7, 112, 2, 2, 404, 405, 7, 99, 2, 2, 405, 412, 7, 110, 2, 2, 406, 407, 7, 102, 2, 2, 407, 408, 7, 103, 2, 2, 408, 409, 7, 100, 2, 2, 409, 410, 7, 119, 2, 2, 410, 412, 7, 105, 2, 2, 411, 353, 3, 2, 2, 2, 411, 362, 3, 2, 2, 2, 411, 367, 3, 2, 2, 2, 411, 375, 3, 2, 2, 2, 411, 380, 3, 2, 2, 2, 411, 387, 3, 2, 2, 2, 411, 393, 3, 2, 2, 2, 411, 406, 3, 2, 2, 2, 412, 84, 3, 2, 2, 2, 413, 435, 9, 2, 2, 2, 414, 434, 9, 3, 2, 2, 415, 417, 7, 60, 2, 2, 416, 415, 3, 2, 2, 2, 416, 417, 3, 2, 2, 2, 417, 418, 3, 2, 2, 2, 418, 421, 7, 93, 2, 2, 419, 422, 5, 87, 44, 2, 420, 422, 5, 89, 45, 2, 421, 419, 3, 2, 2, 2, 421, 420, 3, 2, 2, 2, 422, 427, 3, 2, 2, 2, 423, 424, 7, 60, 2, 2, 424, 426, 5, 89, 45, 2, 425, 423, 3, 2, 2, 2, 426, 429, 3, 2, 2, 2, 427, 425, 3, 2, 2, 2, 427, 428, 3, 2, 2, 2, 428, 430, 3, 2, 2, 2, 429, 427, 3, 2, 2, 2, 430, 431, 7, 95, 2, 2, 431, 434, 3, 2, 2, 2, 432, 434, 7, 44, 2, 2, 433, 414, 3, 2, 2, 2, 433, 416, 3, 2, 2, 2, 433, 432, 3, 2, 2, 2, 434, 437, 3, 2, 2, 2, 435, 433, 3, 2, 2, 2, 435, 436, 3, 2, 2, 2, 436, 86, 3, 2, 2, 2, 437, 435, 3, 2, 2, 2, 438, 440, 4, 50, 59, 2, 439, 438, 3, 2, 2, 2, 440, 441, 3, 2, 2, 2, 441, 439, 3, 2, 2, 2, 441, 442, 3, 2, 2, 2, 442, 449, 3, 2, 2, 2, 443, 445, 7, 48, 2, 2, 444, 446, 4, 50, 59, 2, 445, 444, 3, 2, 2, 2, 446, 447, 3, 2, 2, 2, 447, 445, 3, 2, 2, 2, 447, 448, 3, 2, 2, 2, 448, 450, 3, 2, 2, 2, 449, 443, 3, 2, 2, 2, 449, 450, 3, 2, 2, 2, 450, 88, 3, 2, 2, 2, 451, 455, 9, 4, 2, 2, 452, 454, 9, 5, 2, 2, 453, 452, 3, 2, 2, 2, 454, 457, 3, 2, 2, 2, 455, 453, 3, 2, 2, 2, 455, 456, 3, 2, 2, 2, 456, 90, 3, 2, 2, 2, 457, 455, 3, 2, 2, 2, 458, 461, 7, 36, 2, 2, 459, 462, 5, 91, 46, 2, 460, 462, 5, 95, 48, 2, 461, 459, 3, 2, 2, 2, 461, 460, 3, 2, 2, 2, 462, 463, 3, 2, 2, 2, 463, 464, 7, 36, 2, 2, 464, 493, 3, 2, 2, 2, 465, 468, 7, 41, 2, 2, 466, 469, 5, 91, 46, 2, 467, 469, 5, 95, 48, 2, 468, 466, 3, 2, 2, 2, 468, 467, 3, 2, 2, 2, 469, 470, 3, 2, 2, 2, 470, 471, 7, 41, 2, 2, 471, 493, 3, 2, 2, 2, 472, 473, 7, 94, 2, 2, 473, 474, 7, 36, 2, 2, 474, 477, 3, 2, 2, 2, 475, 478, 5, 91, 46, 2, 476, 478, 5, 95, 48, 2, 477, 475, 3, 2, 2, 2, 477, 476, 3, 2, 2, 2, 478, 479, 3, 2, 2, 2, 479, 480, 7, 94, 2, 2, 480, 481, 7, 36, 2, 2, 481, 493, 3, 2, 2, 2, 482, 483, 7, 41, 2, 2, 483, 484, 7, 41, 2, 2, 484, 487, 3, 2, 2, 2, 485, 488, 5, 91, 46, 2, 486, 488, 5, 95, 48, 2, 487, 485, 3, 2, 2, 2, 487, 486, 3, 2, 2, 2, 488, 489, 3, 2, 2, 2, 489, 490, 7, 41, 2, 2, 490, 491, 7, 41, 2, 2, 491, 493, 3, 2, 2, 2, 492, 458, 3, 2, 2, 2, 492, 465, 3, 2, 2, 2, 492, 472, 3, 2, 2, 2, 492, 482, 3, 2, 2, 2, 493, 92, 3, 2, 2, 2, 494, 495, 5, 85, 43, 2, 495, 496, 7, 60, 2, 2, 496, 497, 5, 85, 43, 2, 497, 94, 3, 2, 2, 2, 498, 500, 10, 6, 2, 2, 499, 498, 3, 2, 2, 2, 500, 503, 3, 2, 2, 2, 501, 502, 3, 2, 2, 2, 501, 499, 3, 2, 2, 2, 502, 96, 3, 2, 2, 2, 503, 501, 3, 2, 2, 2, 504, 505, 7, 94, 2, 2, 505, 509, 7, 36, 2, 2, 506, 507, 7, 41, 2, 2, 507, 509, 7, 41, 2, 2, 508, 504, 3, 2, 2, 2, 508, 506, 3, 2, 2, 2, 509, 98, 3, 2, 2, 2, 510, 512, 9, 7, 2, 2, 511, 510, 3, 2, 2, 2, 512, 513, 3, 2, 2, 2, 513, 511, 3, 2, 2, 2, 513, 514, 3, 2, 2, 2, 514, 515, 3, 2, 2, 2, 515, 516, 8, 50, 2, 2, 516, 100, 3, 2, 2, 2, 517, 519, 7, 15, 2, 2, 518, 517, 3, 2, 2, 2, 518, 519, 3, 2, 2, 2, 519, 520, 3, 2, 2, 2, 520, 521, 7, 12, 2, 2, 521, 522, 3, 2, 2, 2, 522, 523, 8, 51, 2, 2, 523, 102, 3, 2, 2, 2, 524, 528, 7, 37, 2, 2, 525, 527, 10, 6, 2, 2, 526, 525, 3, 2, 2, 2, 527, 530, 3, 2, 2, 2, 528, 526, 3, 2, 2, 2, 528, 529, 3, 2, 2, 2, 529, 531, 3, 2, 2, 2, 530, 528, 3, 2, 2, 2, 531, 532, 8, 52, 2, 2, 532, 104, 3, 2, 2, 2, 533, 534, 11, 2, 2, 2, 534, 106, 3, 2, 2, 2, 27, 2, 328, 332, 336, 351, 411, 416, 421, 427, 433, 435, 441, 447, 449, 455, 461, 468, 477, 487, 492, 501, 508, 513, 518, 528, 3, 2, 3, 2] \ No newline at end of file diff --git a/core/policyengine/lang/parser/SfplLexer.tokens b/core/policyengine/lang/parser/SfplLexer.tokens deleted file mode 100644 index fed61ec3..00000000 --- a/core/policyengine/lang/parser/SfplLexer.tokens +++ /dev/null @@ -1,87 +0,0 @@ -RULE=1 -FILTER=2 -MACRO=3 -LIST=4 -NAME=5 -ITEMS=6 -COND=7 -DESC=8 -ACTION=9 -OUTPUT=10 -PRIORITY=11 -TAGS=12 -PREFILTER=13 -ENABLED=14 -WARNEVTTYPE=15 -SKIPUNKNOWN=16 -AND=17 -OR=18 -NOT=19 -LT=20 -LE=21 -GT=22 -GE=23 -EQ=24 -NEQ=25 -IN=26 -CONTAINS=27 -ICONTAINS=28 -STARTSWITH=29 -PMATCH=30 -EXISTS=31 -LBRACK=32 -RBRACK=33 -LPAREN=34 -RPAREN=35 -LISTSEP=36 -DECL=37 -DEF=38 -SEVERITY=39 -SFSEVERITY=40 -FSEVERITY=41 -ID=42 -NUMBER=43 -PATH=44 -STRING=45 -TAG=46 -WS=47 -NL=48 -COMMENT=49 -ANY=50 -'rule'=1 -'filter'=2 -'macro'=3 -'list'=4 -'name'=5 -'items'=6 -'condition'=7 -'desc'=8 -'action'=9 -'output'=10 -'priority'=11 -'tags'=12 -'prefilter'=13 -'enabled'=14 -'warn_evttypes'=15 -'skip-if-unknown-filter'=16 -'and'=17 -'or'=18 -'not'=19 -'<'=20 -'<='=21 -'>'=22 -'>='=23 -'='=24 -'!='=25 -'in'=26 -'contains'=27 -'icontains'=28 -'startswith'=29 -'pmatch'=30 -'exists'=31 -'['=32 -']'=33 -'('=34 -')'=35 -','=36 -'-'=37 diff --git a/core/policyengine/lang/parser/sfpl_lexer.go b/core/policyengine/lang/parser/sfpl_lexer.go deleted file mode 100644 index eba57c7c..00000000 --- a/core/policyengine/lang/parser/sfpl_lexer.go +++ /dev/null @@ -1,385 +0,0 @@ -// Code generated from Sfpl.g4 by ANTLR 4.8. DO NOT EDIT. - -package parser - -import ( - "fmt" - "unicode" - - "github.com/antlr/antlr4/runtime/Go/antlr" -) - -// Suppress unused import error -var _ = fmt.Printf -var _ = unicode.IsLetter - -var serializedLexerAtn = []uint16{ - 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 52, 535, - 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, - 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, - 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, - 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, - 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, - 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, - 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, - 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, - 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, - 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 3, 2, 3, 2, - 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, - 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, - 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 3, 8, - 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 10, - 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, - 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, - 3, 12, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, - 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, - 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, - 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, - 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, - 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, - 3, 18, 3, 18, 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 21, 3, - 21, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, - 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, - 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, - 3, 29, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, - 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, - 3, 31, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 33, 3, 33, 3, - 34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 37, 3, 37, 3, 38, 3, 38, 3, 39, - 3, 39, 7, 39, 327, 10, 39, 12, 39, 14, 39, 330, 11, 39, 3, 39, 5, 39, 333, - 10, 39, 3, 40, 3, 40, 5, 40, 337, 10, 40, 3, 41, 3, 41, 3, 41, 3, 41, 3, - 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 5, 41, 352, - 10, 41, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, - 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, - 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, - 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, - 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, - 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 5, 42, 412, 10, 42, 3, - 43, 3, 43, 3, 43, 5, 43, 417, 10, 43, 3, 43, 3, 43, 3, 43, 5, 43, 422, - 10, 43, 3, 43, 3, 43, 7, 43, 426, 10, 43, 12, 43, 14, 43, 429, 11, 43, - 3, 43, 3, 43, 3, 43, 7, 43, 434, 10, 43, 12, 43, 14, 43, 437, 11, 43, 3, - 44, 6, 44, 440, 10, 44, 13, 44, 14, 44, 441, 3, 44, 3, 44, 6, 44, 446, - 10, 44, 13, 44, 14, 44, 447, 5, 44, 450, 10, 44, 3, 45, 3, 45, 7, 45, 454, - 10, 45, 12, 45, 14, 45, 457, 11, 45, 3, 46, 3, 46, 3, 46, 5, 46, 462, 10, - 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 469, 10, 46, 3, 46, 3, 46, - 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 478, 10, 46, 3, 46, 3, 46, 3, - 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 488, 10, 46, 3, 46, 3, 46, - 3, 46, 5, 46, 493, 10, 46, 3, 47, 3, 47, 3, 47, 3, 47, 3, 48, 7, 48, 500, - 10, 48, 12, 48, 14, 48, 503, 11, 48, 3, 49, 3, 49, 3, 49, 3, 49, 5, 49, - 509, 10, 49, 3, 50, 6, 50, 512, 10, 50, 13, 50, 14, 50, 513, 3, 50, 3, - 50, 3, 51, 5, 51, 519, 10, 51, 3, 51, 3, 51, 3, 51, 3, 51, 3, 52, 3, 52, - 7, 52, 527, 10, 52, 12, 52, 14, 52, 530, 11, 52, 3, 52, 3, 52, 3, 53, 3, - 53, 3, 501, 2, 54, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, - 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, - 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, - 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 69, 36, 71, 37, - 73, 38, 75, 39, 77, 40, 79, 41, 81, 42, 83, 43, 85, 44, 87, 45, 89, 46, - 91, 47, 93, 48, 95, 2, 97, 2, 99, 49, 101, 50, 103, 51, 105, 52, 3, 2, - 8, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, - 97, 97, 99, 124, 5, 2, 49, 49, 67, 92, 99, 124, 7, 2, 44, 44, 47, 59, 67, - 92, 97, 97, 99, 124, 4, 2, 12, 12, 15, 15, 5, 2, 11, 12, 14, 15, 34, 34, - 2, 566, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, - 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, - 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, - 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, - 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, - 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, - 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, - 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, - 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, - 71, 3, 2, 2, 2, 2, 73, 3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, - 2, 79, 3, 2, 2, 2, 2, 81, 3, 2, 2, 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, - 2, 2, 87, 3, 2, 2, 2, 2, 89, 3, 2, 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, - 2, 2, 2, 99, 3, 2, 2, 2, 2, 101, 3, 2, 2, 2, 2, 103, 3, 2, 2, 2, 2, 105, - 3, 2, 2, 2, 3, 107, 3, 2, 2, 2, 5, 112, 3, 2, 2, 2, 7, 119, 3, 2, 2, 2, - 9, 125, 3, 2, 2, 2, 11, 130, 3, 2, 2, 2, 13, 135, 3, 2, 2, 2, 15, 141, - 3, 2, 2, 2, 17, 151, 3, 2, 2, 2, 19, 156, 3, 2, 2, 2, 21, 163, 3, 2, 2, - 2, 23, 170, 3, 2, 2, 2, 25, 179, 3, 2, 2, 2, 27, 184, 3, 2, 2, 2, 29, 194, - 3, 2, 2, 2, 31, 202, 3, 2, 2, 2, 33, 216, 3, 2, 2, 2, 35, 239, 3, 2, 2, - 2, 37, 243, 3, 2, 2, 2, 39, 246, 3, 2, 2, 2, 41, 250, 3, 2, 2, 2, 43, 252, - 3, 2, 2, 2, 45, 255, 3, 2, 2, 2, 47, 257, 3, 2, 2, 2, 49, 260, 3, 2, 2, - 2, 51, 262, 3, 2, 2, 2, 53, 265, 3, 2, 2, 2, 55, 268, 3, 2, 2, 2, 57, 277, - 3, 2, 2, 2, 59, 287, 3, 2, 2, 2, 61, 298, 3, 2, 2, 2, 63, 305, 3, 2, 2, - 2, 65, 312, 3, 2, 2, 2, 67, 314, 3, 2, 2, 2, 69, 316, 3, 2, 2, 2, 71, 318, - 3, 2, 2, 2, 73, 320, 3, 2, 2, 2, 75, 322, 3, 2, 2, 2, 77, 324, 3, 2, 2, - 2, 79, 336, 3, 2, 2, 2, 81, 351, 3, 2, 2, 2, 83, 411, 3, 2, 2, 2, 85, 413, - 3, 2, 2, 2, 87, 439, 3, 2, 2, 2, 89, 451, 3, 2, 2, 2, 91, 492, 3, 2, 2, - 2, 93, 494, 3, 2, 2, 2, 95, 501, 3, 2, 2, 2, 97, 508, 3, 2, 2, 2, 99, 511, - 3, 2, 2, 2, 101, 518, 3, 2, 2, 2, 103, 524, 3, 2, 2, 2, 105, 533, 3, 2, - 2, 2, 107, 108, 7, 116, 2, 2, 108, 109, 7, 119, 2, 2, 109, 110, 7, 110, - 2, 2, 110, 111, 7, 103, 2, 2, 111, 4, 3, 2, 2, 2, 112, 113, 7, 104, 2, - 2, 113, 114, 7, 107, 2, 2, 114, 115, 7, 110, 2, 2, 115, 116, 7, 118, 2, - 2, 116, 117, 7, 103, 2, 2, 117, 118, 7, 116, 2, 2, 118, 6, 3, 2, 2, 2, - 119, 120, 7, 111, 2, 2, 120, 121, 7, 99, 2, 2, 121, 122, 7, 101, 2, 2, - 122, 123, 7, 116, 2, 2, 123, 124, 7, 113, 2, 2, 124, 8, 3, 2, 2, 2, 125, - 126, 7, 110, 2, 2, 126, 127, 7, 107, 2, 2, 127, 128, 7, 117, 2, 2, 128, - 129, 7, 118, 2, 2, 129, 10, 3, 2, 2, 2, 130, 131, 7, 112, 2, 2, 131, 132, - 7, 99, 2, 2, 132, 133, 7, 111, 2, 2, 133, 134, 7, 103, 2, 2, 134, 12, 3, - 2, 2, 2, 135, 136, 7, 107, 2, 2, 136, 137, 7, 118, 2, 2, 137, 138, 7, 103, - 2, 2, 138, 139, 7, 111, 2, 2, 139, 140, 7, 117, 2, 2, 140, 14, 3, 2, 2, - 2, 141, 142, 7, 101, 2, 2, 142, 143, 7, 113, 2, 2, 143, 144, 7, 112, 2, - 2, 144, 145, 7, 102, 2, 2, 145, 146, 7, 107, 2, 2, 146, 147, 7, 118, 2, - 2, 147, 148, 7, 107, 2, 2, 148, 149, 7, 113, 2, 2, 149, 150, 7, 112, 2, - 2, 150, 16, 3, 2, 2, 2, 151, 152, 7, 102, 2, 2, 152, 153, 7, 103, 2, 2, - 153, 154, 7, 117, 2, 2, 154, 155, 7, 101, 2, 2, 155, 18, 3, 2, 2, 2, 156, - 157, 7, 99, 2, 2, 157, 158, 7, 101, 2, 2, 158, 159, 7, 118, 2, 2, 159, - 160, 7, 107, 2, 2, 160, 161, 7, 113, 2, 2, 161, 162, 7, 112, 2, 2, 162, - 20, 3, 2, 2, 2, 163, 164, 7, 113, 2, 2, 164, 165, 7, 119, 2, 2, 165, 166, - 7, 118, 2, 2, 166, 167, 7, 114, 2, 2, 167, 168, 7, 119, 2, 2, 168, 169, - 7, 118, 2, 2, 169, 22, 3, 2, 2, 2, 170, 171, 7, 114, 2, 2, 171, 172, 7, - 116, 2, 2, 172, 173, 7, 107, 2, 2, 173, 174, 7, 113, 2, 2, 174, 175, 7, - 116, 2, 2, 175, 176, 7, 107, 2, 2, 176, 177, 7, 118, 2, 2, 177, 178, 7, - 123, 2, 2, 178, 24, 3, 2, 2, 2, 179, 180, 7, 118, 2, 2, 180, 181, 7, 99, - 2, 2, 181, 182, 7, 105, 2, 2, 182, 183, 7, 117, 2, 2, 183, 26, 3, 2, 2, - 2, 184, 185, 7, 114, 2, 2, 185, 186, 7, 116, 2, 2, 186, 187, 7, 103, 2, - 2, 187, 188, 7, 104, 2, 2, 188, 189, 7, 107, 2, 2, 189, 190, 7, 110, 2, - 2, 190, 191, 7, 118, 2, 2, 191, 192, 7, 103, 2, 2, 192, 193, 7, 116, 2, - 2, 193, 28, 3, 2, 2, 2, 194, 195, 7, 103, 2, 2, 195, 196, 7, 112, 2, 2, - 196, 197, 7, 99, 2, 2, 197, 198, 7, 100, 2, 2, 198, 199, 7, 110, 2, 2, - 199, 200, 7, 103, 2, 2, 200, 201, 7, 102, 2, 2, 201, 30, 3, 2, 2, 2, 202, - 203, 7, 121, 2, 2, 203, 204, 7, 99, 2, 2, 204, 205, 7, 116, 2, 2, 205, - 206, 7, 112, 2, 2, 206, 207, 7, 97, 2, 2, 207, 208, 7, 103, 2, 2, 208, - 209, 7, 120, 2, 2, 209, 210, 7, 118, 2, 2, 210, 211, 7, 118, 2, 2, 211, - 212, 7, 123, 2, 2, 212, 213, 7, 114, 2, 2, 213, 214, 7, 103, 2, 2, 214, - 215, 7, 117, 2, 2, 215, 32, 3, 2, 2, 2, 216, 217, 7, 117, 2, 2, 217, 218, - 7, 109, 2, 2, 218, 219, 7, 107, 2, 2, 219, 220, 7, 114, 2, 2, 220, 221, - 7, 47, 2, 2, 221, 222, 7, 107, 2, 2, 222, 223, 7, 104, 2, 2, 223, 224, - 7, 47, 2, 2, 224, 225, 7, 119, 2, 2, 225, 226, 7, 112, 2, 2, 226, 227, - 7, 109, 2, 2, 227, 228, 7, 112, 2, 2, 228, 229, 7, 113, 2, 2, 229, 230, - 7, 121, 2, 2, 230, 231, 7, 112, 2, 2, 231, 232, 7, 47, 2, 2, 232, 233, - 7, 104, 2, 2, 233, 234, 7, 107, 2, 2, 234, 235, 7, 110, 2, 2, 235, 236, - 7, 118, 2, 2, 236, 237, 7, 103, 2, 2, 237, 238, 7, 116, 2, 2, 238, 34, - 3, 2, 2, 2, 239, 240, 7, 99, 2, 2, 240, 241, 7, 112, 2, 2, 241, 242, 7, - 102, 2, 2, 242, 36, 3, 2, 2, 2, 243, 244, 7, 113, 2, 2, 244, 245, 7, 116, - 2, 2, 245, 38, 3, 2, 2, 2, 246, 247, 7, 112, 2, 2, 247, 248, 7, 113, 2, - 2, 248, 249, 7, 118, 2, 2, 249, 40, 3, 2, 2, 2, 250, 251, 7, 62, 2, 2, - 251, 42, 3, 2, 2, 2, 252, 253, 7, 62, 2, 2, 253, 254, 7, 63, 2, 2, 254, - 44, 3, 2, 2, 2, 255, 256, 7, 64, 2, 2, 256, 46, 3, 2, 2, 2, 257, 258, 7, - 64, 2, 2, 258, 259, 7, 63, 2, 2, 259, 48, 3, 2, 2, 2, 260, 261, 7, 63, - 2, 2, 261, 50, 3, 2, 2, 2, 262, 263, 7, 35, 2, 2, 263, 264, 7, 63, 2, 2, - 264, 52, 3, 2, 2, 2, 265, 266, 7, 107, 2, 2, 266, 267, 7, 112, 2, 2, 267, - 54, 3, 2, 2, 2, 268, 269, 7, 101, 2, 2, 269, 270, 7, 113, 2, 2, 270, 271, - 7, 112, 2, 2, 271, 272, 7, 118, 2, 2, 272, 273, 7, 99, 2, 2, 273, 274, - 7, 107, 2, 2, 274, 275, 7, 112, 2, 2, 275, 276, 7, 117, 2, 2, 276, 56, - 3, 2, 2, 2, 277, 278, 7, 107, 2, 2, 278, 279, 7, 101, 2, 2, 279, 280, 7, - 113, 2, 2, 280, 281, 7, 112, 2, 2, 281, 282, 7, 118, 2, 2, 282, 283, 7, - 99, 2, 2, 283, 284, 7, 107, 2, 2, 284, 285, 7, 112, 2, 2, 285, 286, 7, - 117, 2, 2, 286, 58, 3, 2, 2, 2, 287, 288, 7, 117, 2, 2, 288, 289, 7, 118, - 2, 2, 289, 290, 7, 99, 2, 2, 290, 291, 7, 116, 2, 2, 291, 292, 7, 118, - 2, 2, 292, 293, 7, 117, 2, 2, 293, 294, 7, 121, 2, 2, 294, 295, 7, 107, - 2, 2, 295, 296, 7, 118, 2, 2, 296, 297, 7, 106, 2, 2, 297, 60, 3, 2, 2, - 2, 298, 299, 7, 114, 2, 2, 299, 300, 7, 111, 2, 2, 300, 301, 7, 99, 2, - 2, 301, 302, 7, 118, 2, 2, 302, 303, 7, 101, 2, 2, 303, 304, 7, 106, 2, - 2, 304, 62, 3, 2, 2, 2, 305, 306, 7, 103, 2, 2, 306, 307, 7, 122, 2, 2, - 307, 308, 7, 107, 2, 2, 308, 309, 7, 117, 2, 2, 309, 310, 7, 118, 2, 2, - 310, 311, 7, 117, 2, 2, 311, 64, 3, 2, 2, 2, 312, 313, 7, 93, 2, 2, 313, - 66, 3, 2, 2, 2, 314, 315, 7, 95, 2, 2, 315, 68, 3, 2, 2, 2, 316, 317, 7, - 42, 2, 2, 317, 70, 3, 2, 2, 2, 318, 319, 7, 43, 2, 2, 319, 72, 3, 2, 2, - 2, 320, 321, 7, 46, 2, 2, 321, 74, 3, 2, 2, 2, 322, 323, 7, 47, 2, 2, 323, - 76, 3, 2, 2, 2, 324, 332, 7, 60, 2, 2, 325, 327, 7, 34, 2, 2, 326, 325, - 3, 2, 2, 2, 327, 330, 3, 2, 2, 2, 328, 326, 3, 2, 2, 2, 328, 329, 3, 2, - 2, 2, 329, 331, 3, 2, 2, 2, 330, 328, 3, 2, 2, 2, 331, 333, 7, 64, 2, 2, - 332, 328, 3, 2, 2, 2, 332, 333, 3, 2, 2, 2, 333, 78, 3, 2, 2, 2, 334, 337, - 5, 81, 41, 2, 335, 337, 5, 83, 42, 2, 336, 334, 3, 2, 2, 2, 336, 335, 3, - 2, 2, 2, 337, 80, 3, 2, 2, 2, 338, 339, 7, 106, 2, 2, 339, 340, 7, 107, - 2, 2, 340, 341, 7, 105, 2, 2, 341, 352, 7, 106, 2, 2, 342, 343, 7, 111, - 2, 2, 343, 344, 7, 103, 2, 2, 344, 345, 7, 102, 2, 2, 345, 346, 7, 107, - 2, 2, 346, 347, 7, 119, 2, 2, 347, 352, 7, 111, 2, 2, 348, 349, 7, 110, - 2, 2, 349, 350, 7, 113, 2, 2, 350, 352, 7, 121, 2, 2, 351, 338, 3, 2, 2, - 2, 351, 342, 3, 2, 2, 2, 351, 348, 3, 2, 2, 2, 352, 82, 3, 2, 2, 2, 353, - 354, 7, 103, 2, 2, 354, 355, 7, 111, 2, 2, 355, 356, 7, 103, 2, 2, 356, - 357, 7, 116, 2, 2, 357, 358, 7, 105, 2, 2, 358, 359, 7, 103, 2, 2, 359, - 360, 7, 112, 2, 2, 360, 361, 7, 101, 2, 2, 361, 412, 7, 123, 2, 2, 362, - 363, 7, 99, 2, 2, 363, 364, 7, 110, 2, 2, 364, 365, 7, 103, 2, 2, 365, - 366, 7, 116, 2, 2, 366, 412, 7, 118, 2, 2, 367, 368, 7, 101, 2, 2, 368, - 369, 7, 116, 2, 2, 369, 370, 7, 107, 2, 2, 370, 371, 7, 118, 2, 2, 371, - 372, 7, 107, 2, 2, 372, 373, 7, 101, 2, 2, 373, 374, 7, 99, 2, 2, 374, - 412, 7, 110, 2, 2, 375, 376, 7, 103, 2, 2, 376, 377, 7, 116, 2, 2, 377, - 378, 7, 116, 2, 2, 378, 379, 7, 113, 2, 2, 379, 412, 7, 116, 2, 2, 380, - 381, 7, 121, 2, 2, 381, 382, 7, 99, 2, 2, 382, 383, 7, 116, 2, 2, 383, - 384, 7, 112, 2, 2, 384, 385, 7, 107, 2, 2, 385, 386, 7, 112, 2, 2, 386, - 412, 7, 105, 2, 2, 387, 388, 7, 112, 2, 2, 388, 389, 7, 113, 2, 2, 389, - 390, 7, 118, 2, 2, 390, 391, 7, 107, 2, 2, 391, 392, 7, 101, 2, 2, 392, - 412, 7, 103, 2, 2, 393, 394, 7, 107, 2, 2, 394, 395, 7, 112, 2, 2, 395, - 396, 7, 104, 2, 2, 396, 397, 7, 113, 2, 2, 397, 398, 7, 116, 2, 2, 398, - 399, 7, 111, 2, 2, 399, 400, 7, 99, 2, 2, 400, 401, 7, 118, 2, 2, 401, - 402, 7, 107, 2, 2, 402, 403, 7, 113, 2, 2, 403, 404, 7, 112, 2, 2, 404, - 405, 7, 99, 2, 2, 405, 412, 7, 110, 2, 2, 406, 407, 7, 102, 2, 2, 407, - 408, 7, 103, 2, 2, 408, 409, 7, 100, 2, 2, 409, 410, 7, 119, 2, 2, 410, - 412, 7, 105, 2, 2, 411, 353, 3, 2, 2, 2, 411, 362, 3, 2, 2, 2, 411, 367, - 3, 2, 2, 2, 411, 375, 3, 2, 2, 2, 411, 380, 3, 2, 2, 2, 411, 387, 3, 2, - 2, 2, 411, 393, 3, 2, 2, 2, 411, 406, 3, 2, 2, 2, 412, 84, 3, 2, 2, 2, - 413, 435, 9, 2, 2, 2, 414, 434, 9, 3, 2, 2, 415, 417, 7, 60, 2, 2, 416, - 415, 3, 2, 2, 2, 416, 417, 3, 2, 2, 2, 417, 418, 3, 2, 2, 2, 418, 421, - 7, 93, 2, 2, 419, 422, 5, 87, 44, 2, 420, 422, 5, 89, 45, 2, 421, 419, - 3, 2, 2, 2, 421, 420, 3, 2, 2, 2, 422, 427, 3, 2, 2, 2, 423, 424, 7, 60, - 2, 2, 424, 426, 5, 89, 45, 2, 425, 423, 3, 2, 2, 2, 426, 429, 3, 2, 2, - 2, 427, 425, 3, 2, 2, 2, 427, 428, 3, 2, 2, 2, 428, 430, 3, 2, 2, 2, 429, - 427, 3, 2, 2, 2, 430, 431, 7, 95, 2, 2, 431, 434, 3, 2, 2, 2, 432, 434, - 7, 44, 2, 2, 433, 414, 3, 2, 2, 2, 433, 416, 3, 2, 2, 2, 433, 432, 3, 2, - 2, 2, 434, 437, 3, 2, 2, 2, 435, 433, 3, 2, 2, 2, 435, 436, 3, 2, 2, 2, - 436, 86, 3, 2, 2, 2, 437, 435, 3, 2, 2, 2, 438, 440, 4, 50, 59, 2, 439, - 438, 3, 2, 2, 2, 440, 441, 3, 2, 2, 2, 441, 439, 3, 2, 2, 2, 441, 442, - 3, 2, 2, 2, 442, 449, 3, 2, 2, 2, 443, 445, 7, 48, 2, 2, 444, 446, 4, 50, - 59, 2, 445, 444, 3, 2, 2, 2, 446, 447, 3, 2, 2, 2, 447, 445, 3, 2, 2, 2, - 447, 448, 3, 2, 2, 2, 448, 450, 3, 2, 2, 2, 449, 443, 3, 2, 2, 2, 449, - 450, 3, 2, 2, 2, 450, 88, 3, 2, 2, 2, 451, 455, 9, 4, 2, 2, 452, 454, 9, - 5, 2, 2, 453, 452, 3, 2, 2, 2, 454, 457, 3, 2, 2, 2, 455, 453, 3, 2, 2, - 2, 455, 456, 3, 2, 2, 2, 456, 90, 3, 2, 2, 2, 457, 455, 3, 2, 2, 2, 458, - 461, 7, 36, 2, 2, 459, 462, 5, 91, 46, 2, 460, 462, 5, 95, 48, 2, 461, - 459, 3, 2, 2, 2, 461, 460, 3, 2, 2, 2, 462, 463, 3, 2, 2, 2, 463, 464, - 7, 36, 2, 2, 464, 493, 3, 2, 2, 2, 465, 468, 7, 41, 2, 2, 466, 469, 5, - 91, 46, 2, 467, 469, 5, 95, 48, 2, 468, 466, 3, 2, 2, 2, 468, 467, 3, 2, - 2, 2, 469, 470, 3, 2, 2, 2, 470, 471, 7, 41, 2, 2, 471, 493, 3, 2, 2, 2, - 472, 473, 7, 94, 2, 2, 473, 474, 7, 36, 2, 2, 474, 477, 3, 2, 2, 2, 475, - 478, 5, 91, 46, 2, 476, 478, 5, 95, 48, 2, 477, 475, 3, 2, 2, 2, 477, 476, - 3, 2, 2, 2, 478, 479, 3, 2, 2, 2, 479, 480, 7, 94, 2, 2, 480, 481, 7, 36, - 2, 2, 481, 493, 3, 2, 2, 2, 482, 483, 7, 41, 2, 2, 483, 484, 7, 41, 2, - 2, 484, 487, 3, 2, 2, 2, 485, 488, 5, 91, 46, 2, 486, 488, 5, 95, 48, 2, - 487, 485, 3, 2, 2, 2, 487, 486, 3, 2, 2, 2, 488, 489, 3, 2, 2, 2, 489, - 490, 7, 41, 2, 2, 490, 491, 7, 41, 2, 2, 491, 493, 3, 2, 2, 2, 492, 458, - 3, 2, 2, 2, 492, 465, 3, 2, 2, 2, 492, 472, 3, 2, 2, 2, 492, 482, 3, 2, - 2, 2, 493, 92, 3, 2, 2, 2, 494, 495, 5, 85, 43, 2, 495, 496, 7, 60, 2, - 2, 496, 497, 5, 85, 43, 2, 497, 94, 3, 2, 2, 2, 498, 500, 10, 6, 2, 2, - 499, 498, 3, 2, 2, 2, 500, 503, 3, 2, 2, 2, 501, 502, 3, 2, 2, 2, 501, - 499, 3, 2, 2, 2, 502, 96, 3, 2, 2, 2, 503, 501, 3, 2, 2, 2, 504, 505, 7, - 94, 2, 2, 505, 509, 7, 36, 2, 2, 506, 507, 7, 41, 2, 2, 507, 509, 7, 41, - 2, 2, 508, 504, 3, 2, 2, 2, 508, 506, 3, 2, 2, 2, 509, 98, 3, 2, 2, 2, - 510, 512, 9, 7, 2, 2, 511, 510, 3, 2, 2, 2, 512, 513, 3, 2, 2, 2, 513, - 511, 3, 2, 2, 2, 513, 514, 3, 2, 2, 2, 514, 515, 3, 2, 2, 2, 515, 516, - 8, 50, 2, 2, 516, 100, 3, 2, 2, 2, 517, 519, 7, 15, 2, 2, 518, 517, 3, - 2, 2, 2, 518, 519, 3, 2, 2, 2, 519, 520, 3, 2, 2, 2, 520, 521, 7, 12, 2, - 2, 521, 522, 3, 2, 2, 2, 522, 523, 8, 51, 2, 2, 523, 102, 3, 2, 2, 2, 524, - 528, 7, 37, 2, 2, 525, 527, 10, 6, 2, 2, 526, 525, 3, 2, 2, 2, 527, 530, - 3, 2, 2, 2, 528, 526, 3, 2, 2, 2, 528, 529, 3, 2, 2, 2, 529, 531, 3, 2, - 2, 2, 530, 528, 3, 2, 2, 2, 531, 532, 8, 52, 2, 2, 532, 104, 3, 2, 2, 2, - 533, 534, 11, 2, 2, 2, 534, 106, 3, 2, 2, 2, 27, 2, 328, 332, 336, 351, - 411, 416, 421, 427, 433, 435, 441, 447, 449, 455, 461, 468, 477, 487, 492, - 501, 508, 513, 518, 528, 3, 2, 3, 2, -} - -var lexerDeserializer = antlr.NewATNDeserializer(nil) -var lexerAtn = lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn) - -var lexerChannelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", -} - -var lexerModeNames = []string{ - "DEFAULT_MODE", -} - -var lexerLiteralNames = []string{ - "", "'rule'", "'filter'", "'macro'", "'list'", "'name'", "'items'", "'condition'", - "'desc'", "'action'", "'output'", "'priority'", "'tags'", "'prefilter'", - "'enabled'", "'warn_evttypes'", "'skip-if-unknown-filter'", "'and'", "'or'", - "'not'", "'<'", "'<='", "'>'", "'>='", "'='", "'!='", "'in'", "'contains'", - "'icontains'", "'startswith'", "'pmatch'", "'exists'", "'['", "']'", "'('", - "')'", "','", "'-'", -} - -var lexerSymbolicNames = []string{ - "", "RULE", "FILTER", "MACRO", "LIST", "NAME", "ITEMS", "COND", "DESC", - "ACTION", "OUTPUT", "PRIORITY", "TAGS", "PREFILTER", "ENABLED", "WARNEVTTYPE", - "SKIPUNKNOWN", "AND", "OR", "NOT", "LT", "LE", "GT", "GE", "EQ", "NEQ", - "IN", "CONTAINS", "ICONTAINS", "STARTSWITH", "PMATCH", "EXISTS", "LBRACK", - "RBRACK", "LPAREN", "RPAREN", "LISTSEP", "DECL", "DEF", "SEVERITY", "SFSEVERITY", - "FSEVERITY", "ID", "NUMBER", "PATH", "STRING", "TAG", "WS", "NL", "COMMENT", - "ANY", -} - -var lexerRuleNames = []string{ - "RULE", "FILTER", "MACRO", "LIST", "NAME", "ITEMS", "COND", "DESC", "ACTION", - "OUTPUT", "PRIORITY", "TAGS", "PREFILTER", "ENABLED", "WARNEVTTYPE", "SKIPUNKNOWN", - "AND", "OR", "NOT", "LT", "LE", "GT", "GE", "EQ", "NEQ", "IN", "CONTAINS", - "ICONTAINS", "STARTSWITH", "PMATCH", "EXISTS", "LBRACK", "RBRACK", "LPAREN", - "RPAREN", "LISTSEP", "DECL", "DEF", "SEVERITY", "SFSEVERITY", "FSEVERITY", - "ID", "NUMBER", "PATH", "STRING", "TAG", "STRLIT", "ESC", "WS", "NL", "COMMENT", - "ANY", -} - -type SfplLexer struct { - *antlr.BaseLexer - channelNames []string - modeNames []string - // TODO: EOF string -} - -var lexerDecisionToDFA = make([]*antlr.DFA, len(lexerAtn.DecisionToState)) - -func init() { - for index, ds := range lexerAtn.DecisionToState { - lexerDecisionToDFA[index] = antlr.NewDFA(ds, index) - } -} - -func NewSfplLexer(input antlr.CharStream) *SfplLexer { - - l := new(SfplLexer) - - l.BaseLexer = antlr.NewBaseLexer(input) - l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache()) - - l.channelNames = lexerChannelNames - l.modeNames = lexerModeNames - l.RuleNames = lexerRuleNames - l.LiteralNames = lexerLiteralNames - l.SymbolicNames = lexerSymbolicNames - l.GrammarFileName = "Sfpl.g4" - // TODO: l.EOF = antlr.TokenEOF - - return l -} - -// SfplLexer tokens. -const ( - SfplLexerRULE = 1 - SfplLexerFILTER = 2 - SfplLexerMACRO = 3 - SfplLexerLIST = 4 - SfplLexerNAME = 5 - SfplLexerITEMS = 6 - SfplLexerCOND = 7 - SfplLexerDESC = 8 - SfplLexerACTION = 9 - SfplLexerOUTPUT = 10 - SfplLexerPRIORITY = 11 - SfplLexerTAGS = 12 - SfplLexerPREFILTER = 13 - SfplLexerENABLED = 14 - SfplLexerWARNEVTTYPE = 15 - SfplLexerSKIPUNKNOWN = 16 - SfplLexerAND = 17 - SfplLexerOR = 18 - SfplLexerNOT = 19 - SfplLexerLT = 20 - SfplLexerLE = 21 - SfplLexerGT = 22 - SfplLexerGE = 23 - SfplLexerEQ = 24 - SfplLexerNEQ = 25 - SfplLexerIN = 26 - SfplLexerCONTAINS = 27 - SfplLexerICONTAINS = 28 - SfplLexerSTARTSWITH = 29 - SfplLexerPMATCH = 30 - SfplLexerEXISTS = 31 - SfplLexerLBRACK = 32 - SfplLexerRBRACK = 33 - SfplLexerLPAREN = 34 - SfplLexerRPAREN = 35 - SfplLexerLISTSEP = 36 - SfplLexerDECL = 37 - SfplLexerDEF = 38 - SfplLexerSEVERITY = 39 - SfplLexerSFSEVERITY = 40 - SfplLexerFSEVERITY = 41 - SfplLexerID = 42 - SfplLexerNUMBER = 43 - SfplLexerPATH = 44 - SfplLexerSTRING = 45 - SfplLexerTAG = 46 - SfplLexerWS = 47 - SfplLexerNL = 48 - SfplLexerCOMMENT = 49 - SfplLexerANY = 50 -) diff --git a/core/policyengine/monitor/localpolicymonitor.go b/core/policyengine/monitor/localpolicymonitor.go new file mode 100644 index 00000000..a42f827b --- /dev/null +++ b/core/policyengine/monitor/localpolicymonitor.go @@ -0,0 +1,231 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package monitor implements a policy monitor for the policy engine. +package monitor + +import ( + "bytes" + "crypto/sha256" + "errors" + "io" + "os" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/sysflow-telemetry/sf-apis/go/ioutils" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/engine" +) + +// LocalPolicyMonitor is an object that monitors the local policy file +// directory for changes and compiles a new policy engine if changes occur. +type LocalPolicyMonitor[R any] struct { + config engine.Config + interChan chan *engine.PolicyInterpreter[R] + watcher *fsnotify.Watcher + started bool + done chan bool + policies map[string][]byte + createInter func() (*engine.PolicyInterpreter[R], error) + out func(R) +} + +// NewLocalPolicyMonitor returns a new policy monitor object given an engine configuration. +func NewLocalPolicyMonitor[R any](config engine.Config, createInter func() (*engine.PolicyInterpreter[R], error), out func(R)) (PolicyMonitor[R], error) { + lpm := &LocalPolicyMonitor[R]{config: config, interChan: make(chan *engine.PolicyInterpreter[R], 10), started: false, + done: make(chan bool), policies: make(map[string][]byte), createInter: createInter, out: out} + watcher, err := fsnotify.NewWatcher() + if err != nil { + logger.Error.Printf("Unable to create policy watcher object %v", err) + return nil, err + } + lpm.watcher = watcher + err = lpm.CheckForPolicyUpdate() + if err != nil { + return nil, err + } + return lpm, err +} + +// GetInterpreterChan returns a channel of the policy engine after they have been built. +// This channel can be checked for policy engines that are ready to be used. +func (p *LocalPolicyMonitor[R]) GetInterpreterChan() chan *engine.PolicyInterpreter[R] { + return p.interChan +} + +func (p *LocalPolicyMonitor[R]) dequeueFileEvents() int { + count := 0 + i := 0 + for i < 1000 { + select { + case ev := <-p.watcher.Events: + logger.Trace.Printf("Queued Event %#v, Operation: %s\n", ev, ev.Op.String()) + if hasModifiedYaml(ev) { + count++ + } + default: + time.Sleep(10 * time.Millisecond) + i++ + } + } + return count +} + +func hasModifiedYaml(event fsnotify.Event) bool { + result := false + if (event.Op == fsnotify.Create || event.Op == fsnotify.Remove || + event.Op == fsnotify.Write || event.Op == fsnotify.Rename) && (strings.HasSuffix(event.Name, ".yaml") || + strings.HasSuffix(event.Name, ".yml")) { + result = true + } + return result +} + +func checksum(path string) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + logger.Error.Printf("Unable to open file %s for checksum, %v", path, err) + return nil, err + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + logger.Error.Printf("Unable to calculate sha256 for file %s, %v", path, err) + return nil, err + } + return h.Sum(nil), nil +} + +func (p *LocalPolicyMonitor[R]) calculateChecksum() (bool, []string, error) { + paths, err := ioutils.ListFilePaths(p.config.PoliciesPath, ".yaml") + if err != nil { + return false, nil, err + } + if len(paths) == 0 { + p.policies = make(map[string][]byte) + return false, make([]string, 0), errors.New("no policy files with extension .yaml found in policy directory: " + p.config.PoliciesPath) + } + newPolicies := make(map[string][]byte) + changes := false + for _, policy := range paths { + cs, err := checksum(policy) + if err != nil { + p.policies = make(map[string][]byte) + return false, nil, err + } + if val, ok := p.policies[policy]; ok { + if !bytes.Equal(val, cs) { + changes = true + } + } else { + changes = true + } + newPolicies[policy] = cs + } + + if len(p.policies) != len(newPolicies) { + changes = true + } + p.policies = newPolicies + return changes, paths, nil +} + +// StartMonitor starts a thread to monitor the local policy directory. +func (p *LocalPolicyMonitor[R]) StartMonitor() error { + if p.started { + return nil + } + go func() { + for { + yamlCount := 0 + select { + case <-p.done: + logger.Trace.Printf("Policy monitor received done event... exiting...") + return + // watch for events + case event := <-p.watcher.Events: + logger.Trace.Printf("Event: %#v, Operation: %s\n", event, event.Op.String()) + if hasModifiedYaml(event) { + yamlCount++ + } + yamlCount += p.dequeueFileEvents() + logger.Trace.Printf("Received %d more file events.\n", yamlCount) + if yamlCount > 0 { + changes, policyFiles, err := p.calculateChecksum() + if err != nil { + if policyFiles != nil && len(policyFiles) == 0 { + logger.Error.Printf("There are no policy files in the policy path %s. Waiting for policies to be added.", p.config.PoliciesPath) + continue + } else { + logger.Error.Printf("Unable to calculate checksums on policies.. attempting to compile policies") + } + } + if changes || err != nil { + logger.Info.Println("Checking for policy update") + p.CheckForPolicyUpdate() //nolint:errcheck + } + } + // watch for errors + case err := <-p.watcher.Errors: + logger.Error.Printf("Error while watching policy directory %s, %v", p.config.PoliciesPath, err) + } + } + }() + p.started = true + if err := p.watcher.Add(p.config.PoliciesPath); err != nil { + logger.Error.Printf("Unable to add watch to directory %s, %v", p.config.PoliciesPath, err) + return err + } + return nil +} + +// StopMonitor sends a signal to exit the monitor thread. +func (p *LocalPolicyMonitor[R]) StopMonitor() error { + p.started = false + p.done <- true + return nil +} + +// CheckForPolicyUpdate creates a new policy engine based on updated policies. +func (p *LocalPolicyMonitor[R]) CheckForPolicyUpdate() error { + paths, err := ioutils.ListFilePaths(p.config.PoliciesPath, ".yaml") + if err != nil { + return err + } + if len(paths) == 0 { + return errors.New("no policy files with extension .yaml found in policy directory: " + p.config.PoliciesPath) + } + logger.Info.Println("Creating new policy interpreter") + pi, err := p.createInter() + if err != nil { + logger.Error.Printf("Unable to create a new policy interpreter using policy files in directory %s. Not using new policy files. %v", p.config.PoliciesPath, err) + return err + } + select { + case p.interChan <- pi: + logger.Info.Printf("Pushed new policy interpreter on channel") + default: + logger.Error.Printf("Unable to push new policy interpreter to policy thread.") + } + + return nil +} diff --git a/core/policyengine/monitor/policymonitor.go b/core/policyengine/monitor/policymonitor.go new file mode 100644 index 00000000..9c299931 --- /dev/null +++ b/core/policyengine/monitor/policymonitor.go @@ -0,0 +1,44 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package monitor implements a policy monitor for the policy engine. +package monitor + +import ( + "errors" + + "github.com/sysflow-telemetry/sf-processor/core/policyengine/engine" +) + +// PolicyMonitor is an interface representing policy monitor objects. +// Currently the interface supports a local directory policy monitor. +type PolicyMonitor[R any] interface { + GetInterpreterChan() chan *engine.PolicyInterpreter[R] + StartMonitor() error + StopMonitor() error + CheckForPolicyUpdate() error +} + +// NewPolicyMonitor creates a new policy monitor based on the engine configuration. +func NewPolicyMonitor[R any](config engine.Config, createInter func() (*engine.PolicyInterpreter[R], error), out func(R)) (PolicyMonitor[R], error) { + if config.Monitor == engine.LocalType { + return NewLocalPolicyMonitor(config, createInter, out) + } + return nil, errors.New("Policy monitor of type: " + config.Monitor.String() + " is not supported.") +} diff --git a/core/policyengine/policy/compiler.go b/core/policyengine/policy/compiler.go new file mode 100644 index 00000000..e5ed5fc1 --- /dev/null +++ b/core/policyengine/policy/compiler.go @@ -0,0 +1,26 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package policy implements input policy translation for the rules engine. +package policy + +type PolicyCompiler[R any] interface { + // Compile reads one or more input policy files, parses, and translates them to internal criteria objects. + Compile(paths ...string) ([]Rule[R], []Filter[R], error) +} diff --git a/core/policyengine/policy/falco/compiler.go b/core/policyengine/policy/falco/compiler.go new file mode 100644 index 00000000..9f83d890 --- /dev/null +++ b/core/policyengine/policy/falco/compiler.go @@ -0,0 +1,370 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package falco implements a frontend for (extended) Falco rules engine. +package falco + +import ( + "errors" + "regexp" + "strconv" + "strings" + + "github.com/antlr/antlr4/runtime/Go/antlr" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/common" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/falco/lang/errorhandler" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/falco/lang/parser" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source" +) + +// Regular expression for parsing lists. +var itemsre = regexp.MustCompile(`(^\[)(.*)(\]$?)`) + +// PolicyCompiler defines a compiler for extended Falco rules. +type PolicyCompiler[R any] struct { + *parser.BaseSfplListener + + // Operations + ops source.Operations[R] + + // Parsed rule and filter object maps + rules []policy.Rule[R] + filters []policy.Filter[R] + + // Accessory parsing maps + lists map[string][]string + macroCtxs map[string]parser.IExpressionContext +} + +// NewPolicyCompiler constructs a new compiler instance. +func NewPolicyCompiler[R any](ops source.Operations[R]) policy.PolicyCompiler[R] { + pc := new(PolicyCompiler[R]) + pc.ops = ops + pc.rules = make([]policy.Rule[R], 0) + pc.filters = make([]policy.Filter[R], 0) + pc.lists = make(map[string][]string) + pc.macroCtxs = make(map[string]parser.IExpressionContext) + return pc +} + +// Compile parses and interprets an input policy defined in path. +func (pc *PolicyCompiler[R]) compile(path string) error { + // Setup the input + is, err := antlr.NewFileStream(path) + if err != nil { + logger.Error.Println("Error reading policy from path", path) + return err + } + + // Create the Lexer + lexerErrors := &errorhandler.SfplErrorListener{} + lexer := parser.NewSfplLexer(is) + lexer.RemoveErrorListeners() + lexer.AddErrorListener(lexerErrors) + stream := antlr.NewCommonTokenStream(lexer, antlr.TokenDefaultChannel) + + // Create the Parser + parserErrors := &errorhandler.SfplErrorListener{} + p := parser.NewSfplParser(stream) + p.RemoveErrorListeners() + p.AddErrorListener(parserErrors) + + // Pre-processing (to deal with usage before definitions of macros and lists) + antlr.ParseTreeWalkerDefault.Walk(pc, p.Defs()) + p.GetInputStream().Seek(0) + + // Parse the policy + antlr.ParseTreeWalkerDefault.Walk(pc, p.Policy()) + + errFound := false + if len(lexerErrors.Errors) > 0 { + logger.Error.Printf("Lexer %d errors found\n", len(lexerErrors.Errors)) + for _, e := range lexerErrors.Errors { + logger.Error.Println("\t", e.Error()) + } + errFound = true + } + if len(parserErrors.Errors) > 0 { + logger.Error.Printf("Parser %d errors found\n", len(parserErrors.Errors)) + for _, e := range parserErrors.Errors { + logger.Error.Println("\t", e.Error()) + } + errFound = true + } + + if errFound { + return errors.New("errors found during compilation of policies. check logs for detail") + } + + return nil +} + +// Compile parses a set of input policies defined in paths. +func (pc *PolicyCompiler[R]) Compile(paths ...string) ([]policy.Rule[R], []policy.Filter[R], error) { + for _, path := range paths { + logger.Trace.Println("Parsing policy file ", path) + if err := pc.compile(path); err != nil { + return nil, nil, err + } + } + return pc.rules, pc.filters, nil +} + +// ExitList is called when production list is exited. +func (pc *PolicyCompiler[R]) ExitPlist(ctx *parser.PlistContext) { + logger.Trace.Println("Parsing list ", ctx.GetText()) + pc.lists[ctx.ID().GetText()] = pc.extractListFromItems(ctx.Items()) +} + +// ExitMacro is called when production macro is exited. +func (pc *PolicyCompiler[R]) ExitPmacro(ctx *parser.PmacroContext) { + logger.Trace.Println("Parsing macro ", ctx.GetText()) + pc.macroCtxs[ctx.ID().GetText()] = ctx.Expression() +} + +// ExitFilter is called when production filter is exited. +func (pc *PolicyCompiler[R]) ExitPfilter(ctx *parser.PfilterContext) { + logger.Trace.Println("Parsing filter ", ctx.GetText()) + f := policy.Filter[R]{ + Name: ctx.ID().GetText(), + Condition: pc.visitExpression(ctx.Expression()), + Enabled: ctx.ENABLED() == nil || pc.getEnabledFlag(ctx.Enabled()), + } + pc.filters = append(pc.filters, f) +} + +// ExitFilter is called when production filter is exited. +func (pc *PolicyCompiler[R]) ExitPrule(ctx *parser.PruleContext) { + logger.Trace.Println("Parsing rule ", ctx.GetText()) + r := policy.Rule[R]{ + Name: pc.getOffChannelText(ctx.Text(0)), + Desc: pc.getOffChannelText(ctx.Text(1)), + Condition: pc.visitExpression(ctx.Expression()), + Actions: pc.getActions(ctx), + Tags: pc.getTags(ctx), + Priority: pc.getPriority(ctx), + Prefilter: pc.getPrefilter(ctx), + Enabled: ctx.ENABLED(0) == nil || pc.getEnabledFlag(ctx.Enabled(0)), + } + pc.rules = append(pc.rules, r) +} + +func (pc *PolicyCompiler[R]) getEnabledFlag(ctx parser.IEnabledContext) bool { + flag := common.TrimBoundingQuotes(ctx.GetText()) + if b, err := strconv.ParseBool(flag); err == nil { + return b + } + logger.Warn.Println("Unrecognized enabled flag: ", flag) + return true +} + +func (pc *PolicyCompiler[R]) getOffChannelText(ctx parser.ITextContext) string { + a := ctx.GetStart().GetStart() + b := ctx.GetStop().GetStop() + interval := antlr.Interval{Start: a, Stop: b} + return ctx.GetStart().GetInputStream().GetTextFromInterval(&interval) +} + +func (pc *PolicyCompiler[R]) getTags(ctx *parser.PruleContext) []policy.EnrichmentTag { + var tags = make([]policy.EnrichmentTag, 0) + ictx := ctx.Tags(0) + if ictx != nil { + return append(tags, pc.extractTags(ictx)) + } + return tags +} + +func (pc *PolicyCompiler[R]) getPrefilter(ctx *parser.PruleContext) []string { + var pfs = make([]string, 0) + ictx := ctx.Prefilter(0) + if ictx != nil { + return append(pfs, pc.extractList(ictx.GetText())...) + } + return pfs +} + +// Fix: fix handling of priority levels. +func (pc *PolicyCompiler[R]) getPriority(ctx *parser.PruleContext) policy.Priority { + ictx := ctx.Severity(0) + if ictx != nil { + p := ictx.GetText() + switch strings.ToLower(p) { + case policy.Low.String(): + return policy.Low + case policy.Medium.String(): + return policy.Medium + case policy.High.String(): + return policy.High + case FPriorityDebug: + return policy.Low + case FPriorityInfo: + return policy.Low + case FPriorityInformational: + return policy.Low + case FPriorityNotice: + return policy.Low + case FPriorityWarning: + return policy.Medium + case FPriorityError: + return policy.High + case FPriorityCritical: + return policy.High + case FPriorityEmergency: + return policy.High + default: + logger.Warn.Printf("Unrecognized priority value %s. Deferring to %s\n", p, policy.Low.String()) + } + } + return policy.Low +} + +func (pc *PolicyCompiler[R]) getActions(ctx *parser.PruleContext) []string { + var actions []string + ictx := ctx.Actions(0) + if ictx != nil { + return append(actions, pc.extractActions(ictx)...) + } + return actions +} + +func (pc *PolicyCompiler[R]) extractList(str string) []string { + var items []string + for _, i := range strings.Split(itemsre.ReplaceAllString(str, "$2"), common.LISTSEP) { + items = append(items, common.TrimBoundingQuotes(i)) + } + return items +} + +func (pc *PolicyCompiler[R]) extractListFromItems(ctx parser.IItemsContext) []string { + if ctx != nil { + return pc.extractList(ctx.GetText()) + } + return []string{} +} + +func (pc *PolicyCompiler[R]) extractTags(ctx parser.ITagsContext) []string { + if ctx != nil { + return pc.extractList(ctx.GetText()) + } + return []string{} +} + +func (pc *PolicyCompiler[R]) extractActions(ctx parser.IActionsContext) []string { + if ctx != nil { + return pc.extractList(ctx.GetText()) + } + return []string{} +} + +func (pc *PolicyCompiler[R]) extractListFromAtoms(ctxs []parser.IAtomContext) []string { + s := []string{} + for _, v := range ctxs { + s = append(s, pc.reduceList(v.GetText())...) + } + return s +} + +func (pc *PolicyCompiler[R]) reduceList(sl string) []string { + s := []string{} + if l, ok := pc.lists[sl]; ok { + for _, v := range l { + s = append(s, pc.reduceList(v)...) + } + } else { + s = append(s, common.TrimBoundingQuotes(sl)) + } + return s +} + +func (pc *PolicyCompiler[R]) visitExpression(ctx parser.IExpressionContext) policy.Criterion[R] { + orCtx := ctx.GetChild(0).(parser.IOr_expressionContext) + orPreds := make([]policy.Criterion[R], 0) + for _, andCtx := range orCtx.GetChildren() { + if andCtx.GetChildCount() > 0 { + andPreds := make([]policy.Criterion[R], 0) + for _, termCtx := range andCtx.GetChildren() { + t, isTermCtx := termCtx.(parser.ITermContext) + if isTermCtx { + c := pc.visitTerm(t) + andPreds = append(andPreds, c) + } + } + orPreds = append(orPreds, policy.All(andPreds)) + } + } + return policy.Any(orPreds) +} + +func (pc *PolicyCompiler[R]) visitTerm(ctx parser.ITermContext) policy.Criterion[R] { + termCtx := ctx.(*parser.TermContext) + if termCtx.Variable() != nil { + if m, ok := pc.macroCtxs[termCtx.GetText()]; ok { + return pc.visitExpression(m) + } + logger.Error.Println("Unrecognized reference ", termCtx.GetText()) + } else if termCtx.NOT() != nil { + return pc.visitTerm(termCtx.GetChild(1).(parser.ITermContext)).Not() + } else if opCtx, ok := termCtx.Unary_operator().(*parser.Unary_operatorContext); ok { + lop := termCtx.Atom(0).(*parser.AtomContext).GetText() + if opCtx.EXISTS() != nil { + return policy.First(pc.ops.Exists(lop)) + } + logger.Error.Println("Unrecognized unary operator ", opCtx.GetText()) + } else if opCtx, ok := termCtx.Binary_operator().(*parser.Binary_operatorContext); ok { + lop := termCtx.Atom(0).(*parser.AtomContext).GetText() + rop := termCtx.Atom(1).(*parser.AtomContext).GetText() + if opCtx.CONTAINS() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.Contains)) + } else if opCtx.ICONTAINS() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.IContains)) + } else if opCtx.STARTSWITH() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.Startswith)) + } else if opCtx.ENDSWITH() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.Endswith)) + } else if opCtx.EQ() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.Eq)) + } else if opCtx.NEQ() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.Eq)).Not() + } else if opCtx.GT() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.Gt)) + } else if opCtx.GE() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.GEq)) + } else if opCtx.LT() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.Lt)) + } else if opCtx.LE() != nil { + return policy.First(pc.ops.Compare(lop, rop, source.LEq)) + } + logger.Error.Println("Unrecognized binary operator ", opCtx.GetText()) + } else if termCtx.Expression() != nil { + return pc.visitExpression(termCtx.Expression()) + } else if termCtx.IN() != nil { + lop := termCtx.Atom(0).(*parser.AtomContext).GetText() + rop := termCtx.AllAtom()[1:] + return policy.First(pc.ops.FoldAny(lop, pc.extractListFromAtoms(rop), source.Eq)) + } else if termCtx.PMATCH() != nil { + lop := termCtx.Atom(0).(*parser.AtomContext).GetText() + rop := termCtx.AllAtom()[1:] + return policy.First(pc.ops.FoldAny(lop, pc.extractListFromAtoms(rop), source.Contains)) + } else { + logger.Warn.Println("Unrecognized term ", termCtx.GetText()) + } + return policy.False[R]() +} diff --git a/core/policyengine/policy/falco/compiler_test.go b/core/policyengine/policy/falco/compiler_test.go new file mode 100644 index 00000000..00450eea --- /dev/null +++ b/core/policyengine/policy/falco/compiler_test.go @@ -0,0 +1,47 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package falco implements a frontend for (extended) Falco rules engine. +package falco_test + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/sysflow-telemetry/sf-apis/go/ioutils" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/falco" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +var rulesPath string = "../../../../resources/policies/runtimeintegrity" + +func TestMain(m *testing.M) { + logger.InitLoggers(logger.TRACE) + os.Exit(m.Run()) +} + +func TestCompiler(t *testing.T) { + pc := falco.NewPolicyCompiler(flatrecord.NewOperations()) + paths, err := ioutils.ListRecursiveFilePaths(rulesPath, ".yaml") + assert.NoError(t, err) + _, _, err = pc.Compile(paths...) + assert.NoError(t, err) +} diff --git a/core/policyengine/policy/falco/constants.go b/core/policyengine/policy/falco/constants.go new file mode 100644 index 00000000..94276be1 --- /dev/null +++ b/core/policyengine/policy/falco/constants.go @@ -0,0 +1,34 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package falco implements a frontend for (extended) Falco rules engine. +package falco + +// Falco priority values. +const ( + FPriorityEmergency = "emergency" + FPriorityAlert = "alert" + FPriorityCritical = "critical" + FPriorityError = "error" + FPriorityWarning = "warning" + FPriorityNotice = "notice" + FPriorityInfo = "info" + FPriorityInformational = "informational" + FPriorityDebug = "debug" +) diff --git a/core/policyengine/lang/Sfpl.g4 b/core/policyengine/policy/falco/lang/Sfpl.g4 similarity index 54% rename from core/policyengine/lang/Sfpl.g4 rename to core/policyengine/policy/falco/lang/Sfpl.g4 index a0b219be..a5f35697 100644 --- a/core/policyengine/lang/Sfpl.g4 +++ b/core/policyengine/policy/falco/lang/Sfpl.g4 @@ -2,13 +2,14 @@ grammar Sfpl; RULE: 'rule'; FILTER: 'filter'; +DROP: 'drop'; MACRO: 'macro'; LIST: 'list'; NAME: 'name'; ITEMS: 'items'; COND: 'condition'; DESC: 'desc' ; -ACTION: 'action'; +ACTIONS: 'actions'; OUTPUT: 'output'; PRIORITY: 'priority'; TAGS: 'tags'; @@ -16,26 +17,48 @@ PREFILTER: 'prefilter'; ENABLED: 'enabled'; WARNEVTTYPE: 'warn_evttypes'; SKIPUNKNOWN: 'skip-if-unknown-filter'; +FAPPEND: 'append'; +REQ: 'required_engine_version'; policy - : (prule | pfilter | pmacro | plist)+ EOF + : (prule | pfilter | pmacro | plist | preq)+ EOF ; -prule - : DECL RULE DEF text DESC DEF text COND DEF expression (ACTION|OUTPUT) DEF text PRIORITY DEF severity (TAGS DEF tags | PREFILTER DEF prefilter | ENABLED DEF enabled | WARNEVTTYPE DEF warnevttype | SKIPUNKNOWN DEF skipunknown)* +defs + : (srule | sfilter | pmacro | plist | preq)* EOF + ; + +prule + : DECL RULE DEF text DESC DEF text COND DEF expression (OUTPUT DEF text | ACTIONS DEF actions | PRIORITY DEF severity | TAGS DEF tags | PREFILTER DEF prefilter | ENABLED DEF enabled | WARNEVTTYPE DEF warnevttype | SKIPUNKNOWN DEF skipunknown)* + ; + +srule + : DECL RULE DEF text DESC DEF text COND DEF expression (OUTPUT DEF text | ACTIONS DEF actions | PRIORITY DEF severity | TAGS DEF tags | PREFILTER DEF prefilter | ENABLED DEF enabled | WARNEVTTYPE DEF warnevttype | SKIPUNKNOWN DEF skipunknown)* ; pfilter - : DECL FILTER DEF ID COND DEF expression (ENABLED DEF enabled)? + : DECL drop_keyword DEF ID COND DEF expression (ENABLED DEF enabled)? + ; + +sfilter + : DECL drop_keyword DEF ID COND DEF expression (ENABLED DEF enabled)? + ; + +drop_keyword + : DROP | FILTER ; pmacro - : DECL MACRO DEF ID COND DEF expression + : DECL MACRO DEF ID COND DEF expression (FAPPEND DEF fappend)? ; plist : DECL LIST DEF ID ITEMS DEF items ; + +preq + : DECL REQ DEF atom + ; expression : or_expression @@ -59,11 +82,15 @@ term ; items - : LBRACK (atom (LISTSEP atom)*)? RBRACK + : LBRACK (atom (LISTSEP atom)*)? (LISTSEP)? RBRACK + ; + +actions + : LBRACK (atom (LISTSEP atom)*)? (LISTSEP)? RBRACK ; tags - : LBRACK (atom (LISTSEP atom)*)? RBRACK + : LBRACK (atom (LISTSEP atom)*)? (LISTSEP)? RBRACK ; prefilter @@ -86,6 +113,10 @@ skipunknown : atom ; +fappend + : atom + ; + variable : ID ; @@ -103,16 +134,17 @@ atom text : ({!(p.GetCurrentToken().GetText() == "desc" || p.GetCurrentToken().GetText() == "condition" || - p.GetCurrentToken().GetText() == "action" || + p.GetCurrentToken().GetText() == "actions" || p.GetCurrentToken().GetText() == "output" || p.GetCurrentToken().GetText() == "priority" || p.GetCurrentToken().GetText() == "tags" || p.GetCurrentToken().GetText() == "prefilter" || p.GetCurrentToken().GetText() == "enabled" || p.GetCurrentToken().GetText() == "warn_evttypes" || - p.GetCurrentToken().GetText() == "skip-if-unknown-filter")}? .)+ + p.GetCurrentToken().GetText() == "skip-if-unknown-filter" || + p.GetCurrentToken().GetText() == "append" )}? .)+ ; - + binary_operator : LT | LE @@ -123,6 +155,7 @@ binary_operator | CONTAINS | ICONTAINS | STARTSWITH + | ENDSWITH ; unary_operator @@ -180,6 +213,10 @@ ICONTAINS STARTSWITH : 'startswith' ; + +ENDSWITH + : 'endswith' + ; PMATCH : 'pmatch' @@ -223,20 +260,21 @@ SEVERITY ; SFSEVERITY - : 'high' - | 'medium' - | 'low' + : H I G H + | M E D I U M + | L O W ; FSEVERITY - : 'emergency' - | 'alert' - | 'critical' - | 'error' - | 'warning' - | 'notice' - | 'informational' - | 'debug' + : E M E R G E N C Y + | A L E R T + | C R I T I C A L + | E R R O R + | W A R N I N G + | N O T I C E + | I N F O + | I N F O R M A T I O N A L + | D E B U G ; ID @@ -248,7 +286,7 @@ NUMBER ; PATH - : ('a'..'z' | 'A'..'Z' | '/' ) ('a'..'z' | 'A'..'Z' | '0'..'9' | '_' | '-' | '.' | '/' | '*' )* + : ('a'..'z' | 'A'..'Z' | '0'..'9' | '/' | '.') ('a'..'z' | 'A'..'Z' | '0'..'9' | '_' | '-' | '.' | '/' | '*' )* ; STRING @@ -268,7 +306,7 @@ fragment STRLIT ; fragment ESC : '\\"' | '\'\'' ; - + WS : [ \t\r\n\u000C]+ -> channel(HIDDEN) ; @@ -282,3 +320,30 @@ COMMENT ; ANY : . ; + +fragment A : [aA]; // match either an 'a' or 'A' +fragment B : [bB]; +fragment C : [cC]; +fragment D : [dD]; +fragment E : [eE]; +fragment F : [fF]; +fragment G : [gG]; +fragment H : [hH]; +fragment I : [iI]; +fragment J : [jJ]; +fragment K : [kK]; +fragment L : [lL]; +fragment M : [mM]; +fragment N : [nN]; +fragment O : [oO]; +fragment P : [pP]; +fragment Q : [qQ]; +fragment R : [rR]; +fragment S : [sS]; +fragment T : [tT]; +fragment U : [uU]; +fragment V : [vV]; +fragment W : [wW]; +fragment X : [xX]; +fragment Y : [yY]; +fragment Z : [zZ]; diff --git a/core/policyengine/policy/falco/lang/errorhandler/sfpl_errorlistener.go b/core/policyengine/policy/falco/lang/errorhandler/sfpl_errorlistener.go new file mode 100644 index 00000000..ef064ba8 --- /dev/null +++ b/core/policyengine/policy/falco/lang/errorhandler/sfpl_errorlistener.go @@ -0,0 +1,35 @@ +package errorhandler + +import ( + "fmt" + + "github.com/antlr/antlr4/runtime/Go/antlr" +) + +// SfplSyntaxError stores syntax error information during +// policy parsing +type SfplSyntaxError struct { + line, column int + msg string +} + +// Error returns a formatted string representing the syntax error +func (s *SfplSyntaxError) Error() string { + return fmt.Sprintf("line: %d column: %d %s", s.line, s.column, s.msg) +} + +// SfplErrorListener monitors errors during the policy parsing process +// and stores them in an error list +type SfplErrorListener struct { + *antlr.DefaultErrorListener // Embed default which ensures we fit the interface + Errors []error +} + +// SyntaxError is called by the antlr lexer and parser when it encounters and error +func (l *SfplErrorListener) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) { + l.Errors = append(l.Errors, &SfplSyntaxError{ + line: line, + column: column, + msg: msg, + }) +} diff --git a/core/policyengine/policy/falco/lang/generate.sh b/core/policyengine/policy/falco/lang/generate.sh new file mode 100755 index 00000000..11f84b1b --- /dev/null +++ b/core/policyengine/policy/falco/lang/generate.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +antlr4='java -Xmx500M -cp ".:/usr/local/lib/antlr-4.9.2-complete.jar:$CLASSPATH" org.antlr.v4.Tool' +grun='java -Xmx500M -cp ".:/usr/local/lib/antlr-4.9.2-complete.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' + +$antlr4 -Dlanguage=Go -o parser -package parser -visitor Sfpl.g4 diff --git a/core/policyengine/policy/falco/lang/parser/Sfpl.interp b/core/policyengine/policy/falco/lang/parser/Sfpl.interp new file mode 100644 index 00000000..b8d03eb6 --- /dev/null +++ b/core/policyengine/policy/falco/lang/parser/Sfpl.interp @@ -0,0 +1,147 @@ +token literal names: +null +'rule' +'filter' +'drop' +'macro' +'list' +'name' +'items' +'condition' +'desc' +'actions' +'output' +'priority' +'tags' +'prefilter' +'enabled' +'warn_evttypes' +'skip-if-unknown-filter' +'append' +'required_engine_version' +'and' +'or' +'not' +'<' +'<=' +'>' +'>=' +'=' +'!=' +'in' +'contains' +'icontains' +'startswith' +'endswith' +'pmatch' +'exists' +'[' +']' +'(' +')' +',' +'-' +null +null +null +null +null +null +null +null +null +null +null +null +null + +token symbolic names: +null +RULE +FILTER +DROP +MACRO +LIST +NAME +ITEMS +COND +DESC +ACTIONS +OUTPUT +PRIORITY +TAGS +PREFILTER +ENABLED +WARNEVTTYPE +SKIPUNKNOWN +FAPPEND +REQ +AND +OR +NOT +LT +LE +GT +GE +EQ +NEQ +IN +CONTAINS +ICONTAINS +STARTSWITH +ENDSWITH +PMATCH +EXISTS +LBRACK +RBRACK +LPAREN +RPAREN +LISTSEP +DECL +DEF +SEVERITY +SFSEVERITY +FSEVERITY +ID +NUMBER +PATH +STRING +TAG +WS +NL +COMMENT +ANY + +rule names: +policy +defs +prule +srule +pfilter +sfilter +drop_keyword +pmacro +plist +preq +expression +or_expression +and_expression +term +items +actions +tags +prefilter +severity +enabled +warnevttype +skipunknown +fappend +variable +atom +text +binary_operator +unary_operator + + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 56, 338, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 6, 2, 64, 10, 2, 13, 2, 14, 2, 65, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 75, 10, 3, 12, 3, 14, 3, 78, 11, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 7, 4, 116, 10, 4, 12, 4, 14, 4, 119, 11, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 155, 10, 5, 12, 5, 14, 5, 158, 11, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 5, 6, 170, 10, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 182, 10, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 196, 10, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 7, 13, 216, 10, 13, 12, 13, 14, 13, 219, 11, 13, 3, 14, 3, 14, 3, 14, 7, 14, 224, 10, 14, 12, 14, 14, 14, 227, 11, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 5, 15, 244, 10, 15, 3, 15, 3, 15, 3, 15, 5, 15, 249, 10, 15, 7, 15, 251, 10, 15, 12, 15, 14, 15, 254, 11, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 5, 15, 262, 10, 15, 3, 16, 3, 16, 3, 16, 3, 16, 7, 16, 268, 10, 16, 12, 16, 14, 16, 271, 11, 16, 5, 16, 273, 10, 16, 3, 16, 5, 16, 276, 10, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 7, 17, 284, 10, 17, 12, 17, 14, 17, 287, 11, 17, 5, 17, 289, 10, 17, 3, 17, 5, 17, 292, 10, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 7, 18, 300, 10, 18, 12, 18, 14, 18, 303, 11, 18, 5, 18, 305, 10, 18, 3, 18, 5, 18, 308, 10, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 27, 3, 27, 6, 27, 330, 10, 27, 13, 27, 14, 27, 331, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 2, 2, 30, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 2, 6, 3, 2, 4, 5, 4, 2, 31, 31, 36, 36, 5, 2, 25, 25, 27, 27, 48, 52, 4, 2, 25, 30, 32, 35, 2, 358, 2, 63, 3, 2, 2, 2, 4, 76, 3, 2, 2, 2, 6, 81, 3, 2, 2, 2, 8, 120, 3, 2, 2, 2, 10, 159, 3, 2, 2, 2, 12, 171, 3, 2, 2, 2, 14, 183, 3, 2, 2, 2, 16, 185, 3, 2, 2, 2, 18, 197, 3, 2, 2, 2, 20, 205, 3, 2, 2, 2, 22, 210, 3, 2, 2, 2, 24, 212, 3, 2, 2, 2, 26, 220, 3, 2, 2, 2, 28, 261, 3, 2, 2, 2, 30, 263, 3, 2, 2, 2, 32, 279, 3, 2, 2, 2, 34, 295, 3, 2, 2, 2, 36, 311, 3, 2, 2, 2, 38, 313, 3, 2, 2, 2, 40, 315, 3, 2, 2, 2, 42, 317, 3, 2, 2, 2, 44, 319, 3, 2, 2, 2, 46, 321, 3, 2, 2, 2, 48, 323, 3, 2, 2, 2, 50, 325, 3, 2, 2, 2, 52, 329, 3, 2, 2, 2, 54, 333, 3, 2, 2, 2, 56, 335, 3, 2, 2, 2, 58, 64, 5, 6, 4, 2, 59, 64, 5, 10, 6, 2, 60, 64, 5, 16, 9, 2, 61, 64, 5, 18, 10, 2, 62, 64, 5, 20, 11, 2, 63, 58, 3, 2, 2, 2, 63, 59, 3, 2, 2, 2, 63, 60, 3, 2, 2, 2, 63, 61, 3, 2, 2, 2, 63, 62, 3, 2, 2, 2, 64, 65, 3, 2, 2, 2, 65, 63, 3, 2, 2, 2, 65, 66, 3, 2, 2, 2, 66, 67, 3, 2, 2, 2, 67, 68, 7, 2, 2, 3, 68, 3, 3, 2, 2, 2, 69, 75, 5, 8, 5, 2, 70, 75, 5, 12, 7, 2, 71, 75, 5, 16, 9, 2, 72, 75, 5, 18, 10, 2, 73, 75, 5, 20, 11, 2, 74, 69, 3, 2, 2, 2, 74, 70, 3, 2, 2, 2, 74, 71, 3, 2, 2, 2, 74, 72, 3, 2, 2, 2, 74, 73, 3, 2, 2, 2, 75, 78, 3, 2, 2, 2, 76, 74, 3, 2, 2, 2, 76, 77, 3, 2, 2, 2, 77, 79, 3, 2, 2, 2, 78, 76, 3, 2, 2, 2, 79, 80, 7, 2, 2, 3, 80, 5, 3, 2, 2, 2, 81, 82, 7, 43, 2, 2, 82, 83, 7, 3, 2, 2, 83, 84, 7, 44, 2, 2, 84, 85, 5, 52, 27, 2, 85, 86, 7, 11, 2, 2, 86, 87, 7, 44, 2, 2, 87, 88, 5, 52, 27, 2, 88, 89, 7, 10, 2, 2, 89, 90, 7, 44, 2, 2, 90, 117, 5, 22, 12, 2, 91, 92, 7, 13, 2, 2, 92, 93, 7, 44, 2, 2, 93, 116, 5, 52, 27, 2, 94, 95, 7, 12, 2, 2, 95, 96, 7, 44, 2, 2, 96, 116, 5, 32, 17, 2, 97, 98, 7, 14, 2, 2, 98, 99, 7, 44, 2, 2, 99, 116, 5, 38, 20, 2, 100, 101, 7, 15, 2, 2, 101, 102, 7, 44, 2, 2, 102, 116, 5, 34, 18, 2, 103, 104, 7, 16, 2, 2, 104, 105, 7, 44, 2, 2, 105, 116, 5, 36, 19, 2, 106, 107, 7, 17, 2, 2, 107, 108, 7, 44, 2, 2, 108, 116, 5, 40, 21, 2, 109, 110, 7, 18, 2, 2, 110, 111, 7, 44, 2, 2, 111, 116, 5, 42, 22, 2, 112, 113, 7, 19, 2, 2, 113, 114, 7, 44, 2, 2, 114, 116, 5, 44, 23, 2, 115, 91, 3, 2, 2, 2, 115, 94, 3, 2, 2, 2, 115, 97, 3, 2, 2, 2, 115, 100, 3, 2, 2, 2, 115, 103, 3, 2, 2, 2, 115, 106, 3, 2, 2, 2, 115, 109, 3, 2, 2, 2, 115, 112, 3, 2, 2, 2, 116, 119, 3, 2, 2, 2, 117, 115, 3, 2, 2, 2, 117, 118, 3, 2, 2, 2, 118, 7, 3, 2, 2, 2, 119, 117, 3, 2, 2, 2, 120, 121, 7, 43, 2, 2, 121, 122, 7, 3, 2, 2, 122, 123, 7, 44, 2, 2, 123, 124, 5, 52, 27, 2, 124, 125, 7, 11, 2, 2, 125, 126, 7, 44, 2, 2, 126, 127, 5, 52, 27, 2, 127, 128, 7, 10, 2, 2, 128, 129, 7, 44, 2, 2, 129, 156, 5, 22, 12, 2, 130, 131, 7, 13, 2, 2, 131, 132, 7, 44, 2, 2, 132, 155, 5, 52, 27, 2, 133, 134, 7, 12, 2, 2, 134, 135, 7, 44, 2, 2, 135, 155, 5, 32, 17, 2, 136, 137, 7, 14, 2, 2, 137, 138, 7, 44, 2, 2, 138, 155, 5, 38, 20, 2, 139, 140, 7, 15, 2, 2, 140, 141, 7, 44, 2, 2, 141, 155, 5, 34, 18, 2, 142, 143, 7, 16, 2, 2, 143, 144, 7, 44, 2, 2, 144, 155, 5, 36, 19, 2, 145, 146, 7, 17, 2, 2, 146, 147, 7, 44, 2, 2, 147, 155, 5, 40, 21, 2, 148, 149, 7, 18, 2, 2, 149, 150, 7, 44, 2, 2, 150, 155, 5, 42, 22, 2, 151, 152, 7, 19, 2, 2, 152, 153, 7, 44, 2, 2, 153, 155, 5, 44, 23, 2, 154, 130, 3, 2, 2, 2, 154, 133, 3, 2, 2, 2, 154, 136, 3, 2, 2, 2, 154, 139, 3, 2, 2, 2, 154, 142, 3, 2, 2, 2, 154, 145, 3, 2, 2, 2, 154, 148, 3, 2, 2, 2, 154, 151, 3, 2, 2, 2, 155, 158, 3, 2, 2, 2, 156, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 9, 3, 2, 2, 2, 158, 156, 3, 2, 2, 2, 159, 160, 7, 43, 2, 2, 160, 161, 5, 14, 8, 2, 161, 162, 7, 44, 2, 2, 162, 163, 7, 48, 2, 2, 163, 164, 7, 10, 2, 2, 164, 165, 7, 44, 2, 2, 165, 169, 5, 22, 12, 2, 166, 167, 7, 17, 2, 2, 167, 168, 7, 44, 2, 2, 168, 170, 5, 40, 21, 2, 169, 166, 3, 2, 2, 2, 169, 170, 3, 2, 2, 2, 170, 11, 3, 2, 2, 2, 171, 172, 7, 43, 2, 2, 172, 173, 5, 14, 8, 2, 173, 174, 7, 44, 2, 2, 174, 175, 7, 48, 2, 2, 175, 176, 7, 10, 2, 2, 176, 177, 7, 44, 2, 2, 177, 181, 5, 22, 12, 2, 178, 179, 7, 17, 2, 2, 179, 180, 7, 44, 2, 2, 180, 182, 5, 40, 21, 2, 181, 178, 3, 2, 2, 2, 181, 182, 3, 2, 2, 2, 182, 13, 3, 2, 2, 2, 183, 184, 9, 2, 2, 2, 184, 15, 3, 2, 2, 2, 185, 186, 7, 43, 2, 2, 186, 187, 7, 6, 2, 2, 187, 188, 7, 44, 2, 2, 188, 189, 7, 48, 2, 2, 189, 190, 7, 10, 2, 2, 190, 191, 7, 44, 2, 2, 191, 195, 5, 22, 12, 2, 192, 193, 7, 20, 2, 2, 193, 194, 7, 44, 2, 2, 194, 196, 5, 46, 24, 2, 195, 192, 3, 2, 2, 2, 195, 196, 3, 2, 2, 2, 196, 17, 3, 2, 2, 2, 197, 198, 7, 43, 2, 2, 198, 199, 7, 7, 2, 2, 199, 200, 7, 44, 2, 2, 200, 201, 7, 48, 2, 2, 201, 202, 7, 9, 2, 2, 202, 203, 7, 44, 2, 2, 203, 204, 5, 30, 16, 2, 204, 19, 3, 2, 2, 2, 205, 206, 7, 43, 2, 2, 206, 207, 7, 21, 2, 2, 207, 208, 7, 44, 2, 2, 208, 209, 5, 50, 26, 2, 209, 21, 3, 2, 2, 2, 210, 211, 5, 24, 13, 2, 211, 23, 3, 2, 2, 2, 212, 217, 5, 26, 14, 2, 213, 214, 7, 23, 2, 2, 214, 216, 5, 26, 14, 2, 215, 213, 3, 2, 2, 2, 216, 219, 3, 2, 2, 2, 217, 215, 3, 2, 2, 2, 217, 218, 3, 2, 2, 2, 218, 25, 3, 2, 2, 2, 219, 217, 3, 2, 2, 2, 220, 225, 5, 28, 15, 2, 221, 222, 7, 22, 2, 2, 222, 224, 5, 28, 15, 2, 223, 221, 3, 2, 2, 2, 224, 227, 3, 2, 2, 2, 225, 223, 3, 2, 2, 2, 225, 226, 3, 2, 2, 2, 226, 27, 3, 2, 2, 2, 227, 225, 3, 2, 2, 2, 228, 262, 5, 48, 25, 2, 229, 230, 7, 24, 2, 2, 230, 262, 5, 28, 15, 2, 231, 232, 5, 50, 26, 2, 232, 233, 5, 56, 29, 2, 233, 262, 3, 2, 2, 2, 234, 235, 5, 50, 26, 2, 235, 236, 5, 54, 28, 2, 236, 237, 5, 50, 26, 2, 237, 262, 3, 2, 2, 2, 238, 239, 5, 50, 26, 2, 239, 240, 9, 3, 2, 2, 240, 243, 7, 40, 2, 2, 241, 244, 5, 50, 26, 2, 242, 244, 5, 30, 16, 2, 243, 241, 3, 2, 2, 2, 243, 242, 3, 2, 2, 2, 244, 252, 3, 2, 2, 2, 245, 248, 7, 42, 2, 2, 246, 249, 5, 50, 26, 2, 247, 249, 5, 30, 16, 2, 248, 246, 3, 2, 2, 2, 248, 247, 3, 2, 2, 2, 249, 251, 3, 2, 2, 2, 250, 245, 3, 2, 2, 2, 251, 254, 3, 2, 2, 2, 252, 250, 3, 2, 2, 2, 252, 253, 3, 2, 2, 2, 253, 255, 3, 2, 2, 2, 254, 252, 3, 2, 2, 2, 255, 256, 7, 41, 2, 2, 256, 262, 3, 2, 2, 2, 257, 258, 7, 40, 2, 2, 258, 259, 5, 22, 12, 2, 259, 260, 7, 41, 2, 2, 260, 262, 3, 2, 2, 2, 261, 228, 3, 2, 2, 2, 261, 229, 3, 2, 2, 2, 261, 231, 3, 2, 2, 2, 261, 234, 3, 2, 2, 2, 261, 238, 3, 2, 2, 2, 261, 257, 3, 2, 2, 2, 262, 29, 3, 2, 2, 2, 263, 272, 7, 38, 2, 2, 264, 269, 5, 50, 26, 2, 265, 266, 7, 42, 2, 2, 266, 268, 5, 50, 26, 2, 267, 265, 3, 2, 2, 2, 268, 271, 3, 2, 2, 2, 269, 267, 3, 2, 2, 2, 269, 270, 3, 2, 2, 2, 270, 273, 3, 2, 2, 2, 271, 269, 3, 2, 2, 2, 272, 264, 3, 2, 2, 2, 272, 273, 3, 2, 2, 2, 273, 275, 3, 2, 2, 2, 274, 276, 7, 42, 2, 2, 275, 274, 3, 2, 2, 2, 275, 276, 3, 2, 2, 2, 276, 277, 3, 2, 2, 2, 277, 278, 7, 39, 2, 2, 278, 31, 3, 2, 2, 2, 279, 288, 7, 38, 2, 2, 280, 285, 5, 50, 26, 2, 281, 282, 7, 42, 2, 2, 282, 284, 5, 50, 26, 2, 283, 281, 3, 2, 2, 2, 284, 287, 3, 2, 2, 2, 285, 283, 3, 2, 2, 2, 285, 286, 3, 2, 2, 2, 286, 289, 3, 2, 2, 2, 287, 285, 3, 2, 2, 2, 288, 280, 3, 2, 2, 2, 288, 289, 3, 2, 2, 2, 289, 291, 3, 2, 2, 2, 290, 292, 7, 42, 2, 2, 291, 290, 3, 2, 2, 2, 291, 292, 3, 2, 2, 2, 292, 293, 3, 2, 2, 2, 293, 294, 7, 39, 2, 2, 294, 33, 3, 2, 2, 2, 295, 304, 7, 38, 2, 2, 296, 301, 5, 50, 26, 2, 297, 298, 7, 42, 2, 2, 298, 300, 5, 50, 26, 2, 299, 297, 3, 2, 2, 2, 300, 303, 3, 2, 2, 2, 301, 299, 3, 2, 2, 2, 301, 302, 3, 2, 2, 2, 302, 305, 3, 2, 2, 2, 303, 301, 3, 2, 2, 2, 304, 296, 3, 2, 2, 2, 304, 305, 3, 2, 2, 2, 305, 307, 3, 2, 2, 2, 306, 308, 7, 42, 2, 2, 307, 306, 3, 2, 2, 2, 307, 308, 3, 2, 2, 2, 308, 309, 3, 2, 2, 2, 309, 310, 7, 39, 2, 2, 310, 35, 3, 2, 2, 2, 311, 312, 5, 30, 16, 2, 312, 37, 3, 2, 2, 2, 313, 314, 7, 45, 2, 2, 314, 39, 3, 2, 2, 2, 315, 316, 5, 50, 26, 2, 316, 41, 3, 2, 2, 2, 317, 318, 5, 50, 26, 2, 318, 43, 3, 2, 2, 2, 319, 320, 5, 50, 26, 2, 320, 45, 3, 2, 2, 2, 321, 322, 5, 50, 26, 2, 322, 47, 3, 2, 2, 2, 323, 324, 7, 48, 2, 2, 324, 49, 3, 2, 2, 2, 325, 326, 9, 4, 2, 2, 326, 51, 3, 2, 2, 2, 327, 328, 6, 27, 2, 2, 328, 330, 11, 2, 2, 2, 329, 327, 3, 2, 2, 2, 330, 331, 3, 2, 2, 2, 331, 329, 3, 2, 2, 2, 331, 332, 3, 2, 2, 2, 332, 53, 3, 2, 2, 2, 333, 334, 9, 5, 2, 2, 334, 55, 3, 2, 2, 2, 335, 336, 7, 37, 2, 2, 336, 57, 3, 2, 2, 2, 29, 63, 65, 74, 76, 115, 117, 154, 156, 169, 181, 195, 217, 225, 243, 248, 252, 261, 269, 272, 275, 285, 288, 291, 301, 304, 307, 331] \ No newline at end of file diff --git a/core/policyengine/policy/falco/lang/parser/Sfpl.tokens b/core/policyengine/policy/falco/lang/parser/Sfpl.tokens new file mode 100644 index 00000000..e74d954b --- /dev/null +++ b/core/policyengine/policy/falco/lang/parser/Sfpl.tokens @@ -0,0 +1,95 @@ +RULE=1 +FILTER=2 +DROP=3 +MACRO=4 +LIST=5 +NAME=6 +ITEMS=7 +COND=8 +DESC=9 +ACTIONS=10 +OUTPUT=11 +PRIORITY=12 +TAGS=13 +PREFILTER=14 +ENABLED=15 +WARNEVTTYPE=16 +SKIPUNKNOWN=17 +FAPPEND=18 +REQ=19 +AND=20 +OR=21 +NOT=22 +LT=23 +LE=24 +GT=25 +GE=26 +EQ=27 +NEQ=28 +IN=29 +CONTAINS=30 +ICONTAINS=31 +STARTSWITH=32 +ENDSWITH=33 +PMATCH=34 +EXISTS=35 +LBRACK=36 +RBRACK=37 +LPAREN=38 +RPAREN=39 +LISTSEP=40 +DECL=41 +DEF=42 +SEVERITY=43 +SFSEVERITY=44 +FSEVERITY=45 +ID=46 +NUMBER=47 +PATH=48 +STRING=49 +TAG=50 +WS=51 +NL=52 +COMMENT=53 +ANY=54 +'rule'=1 +'filter'=2 +'drop'=3 +'macro'=4 +'list'=5 +'name'=6 +'items'=7 +'condition'=8 +'desc'=9 +'actions'=10 +'output'=11 +'priority'=12 +'tags'=13 +'prefilter'=14 +'enabled'=15 +'warn_evttypes'=16 +'skip-if-unknown-filter'=17 +'append'=18 +'required_engine_version'=19 +'and'=20 +'or'=21 +'not'=22 +'<'=23 +'<='=24 +'>'=25 +'>='=26 +'='=27 +'!='=28 +'in'=29 +'contains'=30 +'icontains'=31 +'startswith'=32 +'endswith'=33 +'pmatch'=34 +'exists'=35 +'['=36 +']'=37 +'('=38 +')'=39 +','=40 +'-'=41 diff --git a/core/policyengine/policy/falco/lang/parser/SfplLexer.interp b/core/policyengine/policy/falco/lang/parser/SfplLexer.interp new file mode 100644 index 00000000..a2299a79 --- /dev/null +++ b/core/policyengine/policy/falco/lang/parser/SfplLexer.interp @@ -0,0 +1,207 @@ +token literal names: +null +'rule' +'filter' +'drop' +'macro' +'list' +'name' +'items' +'condition' +'desc' +'actions' +'output' +'priority' +'tags' +'prefilter' +'enabled' +'warn_evttypes' +'skip-if-unknown-filter' +'append' +'required_engine_version' +'and' +'or' +'not' +'<' +'<=' +'>' +'>=' +'=' +'!=' +'in' +'contains' +'icontains' +'startswith' +'endswith' +'pmatch' +'exists' +'[' +']' +'(' +')' +',' +'-' +null +null +null +null +null +null +null +null +null +null +null +null +null + +token symbolic names: +null +RULE +FILTER +DROP +MACRO +LIST +NAME +ITEMS +COND +DESC +ACTIONS +OUTPUT +PRIORITY +TAGS +PREFILTER +ENABLED +WARNEVTTYPE +SKIPUNKNOWN +FAPPEND +REQ +AND +OR +NOT +LT +LE +GT +GE +EQ +NEQ +IN +CONTAINS +ICONTAINS +STARTSWITH +ENDSWITH +PMATCH +EXISTS +LBRACK +RBRACK +LPAREN +RPAREN +LISTSEP +DECL +DEF +SEVERITY +SFSEVERITY +FSEVERITY +ID +NUMBER +PATH +STRING +TAG +WS +NL +COMMENT +ANY + +rule names: +RULE +FILTER +DROP +MACRO +LIST +NAME +ITEMS +COND +DESC +ACTIONS +OUTPUT +PRIORITY +TAGS +PREFILTER +ENABLED +WARNEVTTYPE +SKIPUNKNOWN +FAPPEND +REQ +AND +OR +NOT +LT +LE +GT +GE +EQ +NEQ +IN +CONTAINS +ICONTAINS +STARTSWITH +ENDSWITH +PMATCH +EXISTS +LBRACK +RBRACK +LPAREN +RPAREN +LISTSEP +DECL +DEF +SEVERITY +SFSEVERITY +FSEVERITY +ID +NUMBER +PATH +STRING +TAG +STRLIT +ESC +WS +NL +COMMENT +ANY +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 56, 709, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 4, 54, 9, 54, 4, 55, 9, 55, 4, 56, 9, 56, 4, 57, 9, 57, 4, 58, 9, 58, 4, 59, 9, 59, 4, 60, 9, 60, 4, 61, 9, 61, 4, 62, 9, 62, 4, 63, 9, 63, 4, 64, 9, 64, 4, 65, 9, 65, 4, 66, 9, 66, 4, 67, 9, 67, 4, 68, 9, 68, 4, 69, 9, 69, 4, 70, 9, 70, 4, 71, 9, 71, 4, 72, 9, 72, 4, 73, 9, 73, 4, 74, 9, 74, 4, 75, 9, 75, 4, 76, 9, 76, 4, 77, 9, 77, 4, 78, 9, 78, 4, 79, 9, 79, 4, 80, 9, 80, 4, 81, 9, 81, 4, 82, 9, 82, 4, 83, 9, 83, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 37, 3, 37, 3, 38, 3, 38, 3, 39, 3, 39, 3, 40, 3, 40, 3, 41, 3, 41, 3, 42, 3, 42, 3, 43, 3, 43, 7, 43, 433, 10, 43, 12, 43, 14, 43, 436, 11, 43, 3, 43, 5, 43, 439, 10, 43, 3, 44, 3, 44, 5, 44, 443, 10, 44, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 5, 45, 461, 10, 45, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 534, 10, 46, 3, 47, 3, 47, 3, 47, 5, 47, 539, 10, 47, 3, 47, 3, 47, 3, 47, 5, 47, 544, 10, 47, 3, 47, 3, 47, 7, 47, 548, 10, 47, 12, 47, 14, 47, 551, 11, 47, 3, 47, 3, 47, 3, 47, 7, 47, 556, 10, 47, 12, 47, 14, 47, 559, 11, 47, 3, 48, 6, 48, 562, 10, 48, 13, 48, 14, 48, 563, 3, 48, 3, 48, 6, 48, 568, 10, 48, 13, 48, 14, 48, 569, 5, 48, 572, 10, 48, 3, 49, 3, 49, 7, 49, 576, 10, 49, 12, 49, 14, 49, 579, 11, 49, 3, 50, 3, 50, 3, 50, 5, 50, 584, 10, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 5, 50, 591, 10, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 5, 50, 600, 10, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 5, 50, 610, 10, 50, 3, 50, 3, 50, 3, 50, 5, 50, 615, 10, 50, 3, 51, 3, 51, 3, 51, 3, 51, 3, 52, 7, 52, 622, 10, 52, 12, 52, 14, 52, 625, 11, 52, 3, 53, 3, 53, 3, 53, 3, 53, 5, 53, 631, 10, 53, 3, 54, 6, 54, 634, 10, 54, 13, 54, 14, 54, 635, 3, 54, 3, 54, 3, 55, 5, 55, 641, 10, 55, 3, 55, 3, 55, 3, 55, 3, 55, 3, 56, 3, 56, 7, 56, 649, 10, 56, 12, 56, 14, 56, 652, 11, 56, 3, 56, 3, 56, 3, 57, 3, 57, 3, 58, 3, 58, 3, 59, 3, 59, 3, 60, 3, 60, 3, 61, 3, 61, 3, 62, 3, 62, 3, 63, 3, 63, 3, 64, 3, 64, 3, 65, 3, 65, 3, 66, 3, 66, 3, 67, 3, 67, 3, 68, 3, 68, 3, 69, 3, 69, 3, 70, 3, 70, 3, 71, 3, 71, 3, 72, 3, 72, 3, 73, 3, 73, 3, 74, 3, 74, 3, 75, 3, 75, 3, 76, 3, 76, 3, 77, 3, 77, 3, 78, 3, 78, 3, 79, 3, 79, 3, 80, 3, 80, 3, 81, 3, 81, 3, 82, 3, 82, 3, 83, 3, 83, 3, 623, 2, 84, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 69, 36, 71, 37, 73, 38, 75, 39, 77, 40, 79, 41, 81, 42, 83, 43, 85, 44, 87, 45, 89, 46, 91, 47, 93, 48, 95, 49, 97, 50, 99, 51, 101, 52, 103, 2, 105, 2, 107, 53, 109, 54, 111, 55, 113, 56, 115, 2, 117, 2, 119, 2, 121, 2, 123, 2, 125, 2, 127, 2, 129, 2, 131, 2, 133, 2, 135, 2, 137, 2, 139, 2, 141, 2, 143, 2, 145, 2, 147, 2, 149, 2, 151, 2, 153, 2, 155, 2, 157, 2, 159, 2, 161, 2, 163, 2, 165, 2, 3, 2, 34, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 48, 59, 67, 92, 99, 124, 7, 2, 44, 44, 47, 59, 67, 92, 97, 97, 99, 124, 4, 2, 12, 12, 15, 15, 5, 2, 11, 12, 14, 15, 34, 34, 4, 2, 67, 67, 99, 99, 4, 2, 68, 68, 100, 100, 4, 2, 69, 69, 101, 101, 4, 2, 70, 70, 102, 102, 4, 2, 71, 71, 103, 103, 4, 2, 72, 72, 104, 104, 4, 2, 73, 73, 105, 105, 4, 2, 74, 74, 106, 106, 4, 2, 75, 75, 107, 107, 4, 2, 76, 76, 108, 108, 4, 2, 77, 77, 109, 109, 4, 2, 78, 78, 110, 110, 4, 2, 79, 79, 111, 111, 4, 2, 80, 80, 112, 112, 4, 2, 81, 81, 113, 113, 4, 2, 82, 82, 114, 114, 4, 2, 83, 83, 115, 115, 4, 2, 84, 84, 116, 116, 4, 2, 85, 85, 117, 117, 4, 2, 86, 86, 118, 118, 4, 2, 87, 87, 119, 119, 4, 2, 88, 88, 120, 120, 4, 2, 89, 89, 121, 121, 4, 2, 90, 90, 122, 122, 4, 2, 91, 91, 123, 123, 4, 2, 92, 92, 124, 124, 2, 715, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73, 3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 2, 81, 3, 2, 2, 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, 2, 2, 87, 3, 2, 2, 2, 2, 89, 3, 2, 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, 2, 2, 2, 95, 3, 2, 2, 2, 2, 97, 3, 2, 2, 2, 2, 99, 3, 2, 2, 2, 2, 101, 3, 2, 2, 2, 2, 107, 3, 2, 2, 2, 2, 109, 3, 2, 2, 2, 2, 111, 3, 2, 2, 2, 2, 113, 3, 2, 2, 2, 3, 167, 3, 2, 2, 2, 5, 172, 3, 2, 2, 2, 7, 179, 3, 2, 2, 2, 9, 184, 3, 2, 2, 2, 11, 190, 3, 2, 2, 2, 13, 195, 3, 2, 2, 2, 15, 200, 3, 2, 2, 2, 17, 206, 3, 2, 2, 2, 19, 216, 3, 2, 2, 2, 21, 221, 3, 2, 2, 2, 23, 229, 3, 2, 2, 2, 25, 236, 3, 2, 2, 2, 27, 245, 3, 2, 2, 2, 29, 250, 3, 2, 2, 2, 31, 260, 3, 2, 2, 2, 33, 268, 3, 2, 2, 2, 35, 282, 3, 2, 2, 2, 37, 305, 3, 2, 2, 2, 39, 312, 3, 2, 2, 2, 41, 336, 3, 2, 2, 2, 43, 340, 3, 2, 2, 2, 45, 343, 3, 2, 2, 2, 47, 347, 3, 2, 2, 2, 49, 349, 3, 2, 2, 2, 51, 352, 3, 2, 2, 2, 53, 354, 3, 2, 2, 2, 55, 357, 3, 2, 2, 2, 57, 359, 3, 2, 2, 2, 59, 362, 3, 2, 2, 2, 61, 365, 3, 2, 2, 2, 63, 374, 3, 2, 2, 2, 65, 384, 3, 2, 2, 2, 67, 395, 3, 2, 2, 2, 69, 404, 3, 2, 2, 2, 71, 411, 3, 2, 2, 2, 73, 418, 3, 2, 2, 2, 75, 420, 3, 2, 2, 2, 77, 422, 3, 2, 2, 2, 79, 424, 3, 2, 2, 2, 81, 426, 3, 2, 2, 2, 83, 428, 3, 2, 2, 2, 85, 430, 3, 2, 2, 2, 87, 442, 3, 2, 2, 2, 89, 460, 3, 2, 2, 2, 91, 533, 3, 2, 2, 2, 93, 535, 3, 2, 2, 2, 95, 561, 3, 2, 2, 2, 97, 573, 3, 2, 2, 2, 99, 614, 3, 2, 2, 2, 101, 616, 3, 2, 2, 2, 103, 623, 3, 2, 2, 2, 105, 630, 3, 2, 2, 2, 107, 633, 3, 2, 2, 2, 109, 640, 3, 2, 2, 2, 111, 646, 3, 2, 2, 2, 113, 655, 3, 2, 2, 2, 115, 657, 3, 2, 2, 2, 117, 659, 3, 2, 2, 2, 119, 661, 3, 2, 2, 2, 121, 663, 3, 2, 2, 2, 123, 665, 3, 2, 2, 2, 125, 667, 3, 2, 2, 2, 127, 669, 3, 2, 2, 2, 129, 671, 3, 2, 2, 2, 131, 673, 3, 2, 2, 2, 133, 675, 3, 2, 2, 2, 135, 677, 3, 2, 2, 2, 137, 679, 3, 2, 2, 2, 139, 681, 3, 2, 2, 2, 141, 683, 3, 2, 2, 2, 143, 685, 3, 2, 2, 2, 145, 687, 3, 2, 2, 2, 147, 689, 3, 2, 2, 2, 149, 691, 3, 2, 2, 2, 151, 693, 3, 2, 2, 2, 153, 695, 3, 2, 2, 2, 155, 697, 3, 2, 2, 2, 157, 699, 3, 2, 2, 2, 159, 701, 3, 2, 2, 2, 161, 703, 3, 2, 2, 2, 163, 705, 3, 2, 2, 2, 165, 707, 3, 2, 2, 2, 167, 168, 7, 116, 2, 2, 168, 169, 7, 119, 2, 2, 169, 170, 7, 110, 2, 2, 170, 171, 7, 103, 2, 2, 171, 4, 3, 2, 2, 2, 172, 173, 7, 104, 2, 2, 173, 174, 7, 107, 2, 2, 174, 175, 7, 110, 2, 2, 175, 176, 7, 118, 2, 2, 176, 177, 7, 103, 2, 2, 177, 178, 7, 116, 2, 2, 178, 6, 3, 2, 2, 2, 179, 180, 7, 102, 2, 2, 180, 181, 7, 116, 2, 2, 181, 182, 7, 113, 2, 2, 182, 183, 7, 114, 2, 2, 183, 8, 3, 2, 2, 2, 184, 185, 7, 111, 2, 2, 185, 186, 7, 99, 2, 2, 186, 187, 7, 101, 2, 2, 187, 188, 7, 116, 2, 2, 188, 189, 7, 113, 2, 2, 189, 10, 3, 2, 2, 2, 190, 191, 7, 110, 2, 2, 191, 192, 7, 107, 2, 2, 192, 193, 7, 117, 2, 2, 193, 194, 7, 118, 2, 2, 194, 12, 3, 2, 2, 2, 195, 196, 7, 112, 2, 2, 196, 197, 7, 99, 2, 2, 197, 198, 7, 111, 2, 2, 198, 199, 7, 103, 2, 2, 199, 14, 3, 2, 2, 2, 200, 201, 7, 107, 2, 2, 201, 202, 7, 118, 2, 2, 202, 203, 7, 103, 2, 2, 203, 204, 7, 111, 2, 2, 204, 205, 7, 117, 2, 2, 205, 16, 3, 2, 2, 2, 206, 207, 7, 101, 2, 2, 207, 208, 7, 113, 2, 2, 208, 209, 7, 112, 2, 2, 209, 210, 7, 102, 2, 2, 210, 211, 7, 107, 2, 2, 211, 212, 7, 118, 2, 2, 212, 213, 7, 107, 2, 2, 213, 214, 7, 113, 2, 2, 214, 215, 7, 112, 2, 2, 215, 18, 3, 2, 2, 2, 216, 217, 7, 102, 2, 2, 217, 218, 7, 103, 2, 2, 218, 219, 7, 117, 2, 2, 219, 220, 7, 101, 2, 2, 220, 20, 3, 2, 2, 2, 221, 222, 7, 99, 2, 2, 222, 223, 7, 101, 2, 2, 223, 224, 7, 118, 2, 2, 224, 225, 7, 107, 2, 2, 225, 226, 7, 113, 2, 2, 226, 227, 7, 112, 2, 2, 227, 228, 7, 117, 2, 2, 228, 22, 3, 2, 2, 2, 229, 230, 7, 113, 2, 2, 230, 231, 7, 119, 2, 2, 231, 232, 7, 118, 2, 2, 232, 233, 7, 114, 2, 2, 233, 234, 7, 119, 2, 2, 234, 235, 7, 118, 2, 2, 235, 24, 3, 2, 2, 2, 236, 237, 7, 114, 2, 2, 237, 238, 7, 116, 2, 2, 238, 239, 7, 107, 2, 2, 239, 240, 7, 113, 2, 2, 240, 241, 7, 116, 2, 2, 241, 242, 7, 107, 2, 2, 242, 243, 7, 118, 2, 2, 243, 244, 7, 123, 2, 2, 244, 26, 3, 2, 2, 2, 245, 246, 7, 118, 2, 2, 246, 247, 7, 99, 2, 2, 247, 248, 7, 105, 2, 2, 248, 249, 7, 117, 2, 2, 249, 28, 3, 2, 2, 2, 250, 251, 7, 114, 2, 2, 251, 252, 7, 116, 2, 2, 252, 253, 7, 103, 2, 2, 253, 254, 7, 104, 2, 2, 254, 255, 7, 107, 2, 2, 255, 256, 7, 110, 2, 2, 256, 257, 7, 118, 2, 2, 257, 258, 7, 103, 2, 2, 258, 259, 7, 116, 2, 2, 259, 30, 3, 2, 2, 2, 260, 261, 7, 103, 2, 2, 261, 262, 7, 112, 2, 2, 262, 263, 7, 99, 2, 2, 263, 264, 7, 100, 2, 2, 264, 265, 7, 110, 2, 2, 265, 266, 7, 103, 2, 2, 266, 267, 7, 102, 2, 2, 267, 32, 3, 2, 2, 2, 268, 269, 7, 121, 2, 2, 269, 270, 7, 99, 2, 2, 270, 271, 7, 116, 2, 2, 271, 272, 7, 112, 2, 2, 272, 273, 7, 97, 2, 2, 273, 274, 7, 103, 2, 2, 274, 275, 7, 120, 2, 2, 275, 276, 7, 118, 2, 2, 276, 277, 7, 118, 2, 2, 277, 278, 7, 123, 2, 2, 278, 279, 7, 114, 2, 2, 279, 280, 7, 103, 2, 2, 280, 281, 7, 117, 2, 2, 281, 34, 3, 2, 2, 2, 282, 283, 7, 117, 2, 2, 283, 284, 7, 109, 2, 2, 284, 285, 7, 107, 2, 2, 285, 286, 7, 114, 2, 2, 286, 287, 7, 47, 2, 2, 287, 288, 7, 107, 2, 2, 288, 289, 7, 104, 2, 2, 289, 290, 7, 47, 2, 2, 290, 291, 7, 119, 2, 2, 291, 292, 7, 112, 2, 2, 292, 293, 7, 109, 2, 2, 293, 294, 7, 112, 2, 2, 294, 295, 7, 113, 2, 2, 295, 296, 7, 121, 2, 2, 296, 297, 7, 112, 2, 2, 297, 298, 7, 47, 2, 2, 298, 299, 7, 104, 2, 2, 299, 300, 7, 107, 2, 2, 300, 301, 7, 110, 2, 2, 301, 302, 7, 118, 2, 2, 302, 303, 7, 103, 2, 2, 303, 304, 7, 116, 2, 2, 304, 36, 3, 2, 2, 2, 305, 306, 7, 99, 2, 2, 306, 307, 7, 114, 2, 2, 307, 308, 7, 114, 2, 2, 308, 309, 7, 103, 2, 2, 309, 310, 7, 112, 2, 2, 310, 311, 7, 102, 2, 2, 311, 38, 3, 2, 2, 2, 312, 313, 7, 116, 2, 2, 313, 314, 7, 103, 2, 2, 314, 315, 7, 115, 2, 2, 315, 316, 7, 119, 2, 2, 316, 317, 7, 107, 2, 2, 317, 318, 7, 116, 2, 2, 318, 319, 7, 103, 2, 2, 319, 320, 7, 102, 2, 2, 320, 321, 7, 97, 2, 2, 321, 322, 7, 103, 2, 2, 322, 323, 7, 112, 2, 2, 323, 324, 7, 105, 2, 2, 324, 325, 7, 107, 2, 2, 325, 326, 7, 112, 2, 2, 326, 327, 7, 103, 2, 2, 327, 328, 7, 97, 2, 2, 328, 329, 7, 120, 2, 2, 329, 330, 7, 103, 2, 2, 330, 331, 7, 116, 2, 2, 331, 332, 7, 117, 2, 2, 332, 333, 7, 107, 2, 2, 333, 334, 7, 113, 2, 2, 334, 335, 7, 112, 2, 2, 335, 40, 3, 2, 2, 2, 336, 337, 7, 99, 2, 2, 337, 338, 7, 112, 2, 2, 338, 339, 7, 102, 2, 2, 339, 42, 3, 2, 2, 2, 340, 341, 7, 113, 2, 2, 341, 342, 7, 116, 2, 2, 342, 44, 3, 2, 2, 2, 343, 344, 7, 112, 2, 2, 344, 345, 7, 113, 2, 2, 345, 346, 7, 118, 2, 2, 346, 46, 3, 2, 2, 2, 347, 348, 7, 62, 2, 2, 348, 48, 3, 2, 2, 2, 349, 350, 7, 62, 2, 2, 350, 351, 7, 63, 2, 2, 351, 50, 3, 2, 2, 2, 352, 353, 7, 64, 2, 2, 353, 52, 3, 2, 2, 2, 354, 355, 7, 64, 2, 2, 355, 356, 7, 63, 2, 2, 356, 54, 3, 2, 2, 2, 357, 358, 7, 63, 2, 2, 358, 56, 3, 2, 2, 2, 359, 360, 7, 35, 2, 2, 360, 361, 7, 63, 2, 2, 361, 58, 3, 2, 2, 2, 362, 363, 7, 107, 2, 2, 363, 364, 7, 112, 2, 2, 364, 60, 3, 2, 2, 2, 365, 366, 7, 101, 2, 2, 366, 367, 7, 113, 2, 2, 367, 368, 7, 112, 2, 2, 368, 369, 7, 118, 2, 2, 369, 370, 7, 99, 2, 2, 370, 371, 7, 107, 2, 2, 371, 372, 7, 112, 2, 2, 372, 373, 7, 117, 2, 2, 373, 62, 3, 2, 2, 2, 374, 375, 7, 107, 2, 2, 375, 376, 7, 101, 2, 2, 376, 377, 7, 113, 2, 2, 377, 378, 7, 112, 2, 2, 378, 379, 7, 118, 2, 2, 379, 380, 7, 99, 2, 2, 380, 381, 7, 107, 2, 2, 381, 382, 7, 112, 2, 2, 382, 383, 7, 117, 2, 2, 383, 64, 3, 2, 2, 2, 384, 385, 7, 117, 2, 2, 385, 386, 7, 118, 2, 2, 386, 387, 7, 99, 2, 2, 387, 388, 7, 116, 2, 2, 388, 389, 7, 118, 2, 2, 389, 390, 7, 117, 2, 2, 390, 391, 7, 121, 2, 2, 391, 392, 7, 107, 2, 2, 392, 393, 7, 118, 2, 2, 393, 394, 7, 106, 2, 2, 394, 66, 3, 2, 2, 2, 395, 396, 7, 103, 2, 2, 396, 397, 7, 112, 2, 2, 397, 398, 7, 102, 2, 2, 398, 399, 7, 117, 2, 2, 399, 400, 7, 121, 2, 2, 400, 401, 7, 107, 2, 2, 401, 402, 7, 118, 2, 2, 402, 403, 7, 106, 2, 2, 403, 68, 3, 2, 2, 2, 404, 405, 7, 114, 2, 2, 405, 406, 7, 111, 2, 2, 406, 407, 7, 99, 2, 2, 407, 408, 7, 118, 2, 2, 408, 409, 7, 101, 2, 2, 409, 410, 7, 106, 2, 2, 410, 70, 3, 2, 2, 2, 411, 412, 7, 103, 2, 2, 412, 413, 7, 122, 2, 2, 413, 414, 7, 107, 2, 2, 414, 415, 7, 117, 2, 2, 415, 416, 7, 118, 2, 2, 416, 417, 7, 117, 2, 2, 417, 72, 3, 2, 2, 2, 418, 419, 7, 93, 2, 2, 419, 74, 3, 2, 2, 2, 420, 421, 7, 95, 2, 2, 421, 76, 3, 2, 2, 2, 422, 423, 7, 42, 2, 2, 423, 78, 3, 2, 2, 2, 424, 425, 7, 43, 2, 2, 425, 80, 3, 2, 2, 2, 426, 427, 7, 46, 2, 2, 427, 82, 3, 2, 2, 2, 428, 429, 7, 47, 2, 2, 429, 84, 3, 2, 2, 2, 430, 438, 7, 60, 2, 2, 431, 433, 7, 34, 2, 2, 432, 431, 3, 2, 2, 2, 433, 436, 3, 2, 2, 2, 434, 432, 3, 2, 2, 2, 434, 435, 3, 2, 2, 2, 435, 437, 3, 2, 2, 2, 436, 434, 3, 2, 2, 2, 437, 439, 7, 64, 2, 2, 438, 434, 3, 2, 2, 2, 438, 439, 3, 2, 2, 2, 439, 86, 3, 2, 2, 2, 440, 443, 5, 89, 45, 2, 441, 443, 5, 91, 46, 2, 442, 440, 3, 2, 2, 2, 442, 441, 3, 2, 2, 2, 443, 88, 3, 2, 2, 2, 444, 445, 5, 129, 65, 2, 445, 446, 5, 131, 66, 2, 446, 447, 5, 127, 64, 2, 447, 448, 5, 129, 65, 2, 448, 461, 3, 2, 2, 2, 449, 450, 5, 139, 70, 2, 450, 451, 5, 123, 62, 2, 451, 452, 5, 121, 61, 2, 452, 453, 5, 131, 66, 2, 453, 454, 5, 155, 78, 2, 454, 455, 5, 139, 70, 2, 455, 461, 3, 2, 2, 2, 456, 457, 5, 137, 69, 2, 457, 458, 5, 143, 72, 2, 458, 459, 5, 159, 80, 2, 459, 461, 3, 2, 2, 2, 460, 444, 3, 2, 2, 2, 460, 449, 3, 2, 2, 2, 460, 456, 3, 2, 2, 2, 461, 90, 3, 2, 2, 2, 462, 463, 5, 123, 62, 2, 463, 464, 5, 139, 70, 2, 464, 465, 5, 123, 62, 2, 465, 466, 5, 149, 75, 2, 466, 467, 5, 127, 64, 2, 467, 468, 5, 123, 62, 2, 468, 469, 5, 141, 71, 2, 469, 470, 5, 119, 60, 2, 470, 471, 5, 163, 82, 2, 471, 534, 3, 2, 2, 2, 472, 473, 5, 115, 58, 2, 473, 474, 5, 137, 69, 2, 474, 475, 5, 123, 62, 2, 475, 476, 5, 149, 75, 2, 476, 477, 5, 153, 77, 2, 477, 534, 3, 2, 2, 2, 478, 479, 5, 119, 60, 2, 479, 480, 5, 149, 75, 2, 480, 481, 5, 131, 66, 2, 481, 482, 5, 153, 77, 2, 482, 483, 5, 131, 66, 2, 483, 484, 5, 119, 60, 2, 484, 485, 5, 115, 58, 2, 485, 486, 5, 137, 69, 2, 486, 534, 3, 2, 2, 2, 487, 488, 5, 123, 62, 2, 488, 489, 5, 149, 75, 2, 489, 490, 5, 149, 75, 2, 490, 491, 5, 143, 72, 2, 491, 492, 5, 149, 75, 2, 492, 534, 3, 2, 2, 2, 493, 494, 5, 159, 80, 2, 494, 495, 5, 115, 58, 2, 495, 496, 5, 149, 75, 2, 496, 497, 5, 141, 71, 2, 497, 498, 5, 131, 66, 2, 498, 499, 5, 141, 71, 2, 499, 500, 5, 127, 64, 2, 500, 534, 3, 2, 2, 2, 501, 502, 5, 141, 71, 2, 502, 503, 5, 143, 72, 2, 503, 504, 5, 153, 77, 2, 504, 505, 5, 131, 66, 2, 505, 506, 5, 119, 60, 2, 506, 507, 5, 123, 62, 2, 507, 534, 3, 2, 2, 2, 508, 509, 5, 131, 66, 2, 509, 510, 5, 141, 71, 2, 510, 511, 5, 125, 63, 2, 511, 512, 5, 143, 72, 2, 512, 534, 3, 2, 2, 2, 513, 514, 5, 131, 66, 2, 514, 515, 5, 141, 71, 2, 515, 516, 5, 125, 63, 2, 516, 517, 5, 143, 72, 2, 517, 518, 5, 149, 75, 2, 518, 519, 5, 139, 70, 2, 519, 520, 5, 115, 58, 2, 520, 521, 5, 153, 77, 2, 521, 522, 5, 131, 66, 2, 522, 523, 5, 143, 72, 2, 523, 524, 5, 141, 71, 2, 524, 525, 5, 115, 58, 2, 525, 526, 5, 137, 69, 2, 526, 534, 3, 2, 2, 2, 527, 528, 5, 121, 61, 2, 528, 529, 5, 123, 62, 2, 529, 530, 5, 117, 59, 2, 530, 531, 5, 155, 78, 2, 531, 532, 5, 127, 64, 2, 532, 534, 3, 2, 2, 2, 533, 462, 3, 2, 2, 2, 533, 472, 3, 2, 2, 2, 533, 478, 3, 2, 2, 2, 533, 487, 3, 2, 2, 2, 533, 493, 3, 2, 2, 2, 533, 501, 3, 2, 2, 2, 533, 508, 3, 2, 2, 2, 533, 513, 3, 2, 2, 2, 533, 527, 3, 2, 2, 2, 534, 92, 3, 2, 2, 2, 535, 557, 9, 2, 2, 2, 536, 556, 9, 3, 2, 2, 537, 539, 7, 60, 2, 2, 538, 537, 3, 2, 2, 2, 538, 539, 3, 2, 2, 2, 539, 540, 3, 2, 2, 2, 540, 543, 7, 93, 2, 2, 541, 544, 5, 95, 48, 2, 542, 544, 5, 97, 49, 2, 543, 541, 3, 2, 2, 2, 543, 542, 3, 2, 2, 2, 544, 549, 3, 2, 2, 2, 545, 546, 7, 60, 2, 2, 546, 548, 5, 97, 49, 2, 547, 545, 3, 2, 2, 2, 548, 551, 3, 2, 2, 2, 549, 547, 3, 2, 2, 2, 549, 550, 3, 2, 2, 2, 550, 552, 3, 2, 2, 2, 551, 549, 3, 2, 2, 2, 552, 553, 7, 95, 2, 2, 553, 556, 3, 2, 2, 2, 554, 556, 7, 44, 2, 2, 555, 536, 3, 2, 2, 2, 555, 538, 3, 2, 2, 2, 555, 554, 3, 2, 2, 2, 556, 559, 3, 2, 2, 2, 557, 555, 3, 2, 2, 2, 557, 558, 3, 2, 2, 2, 558, 94, 3, 2, 2, 2, 559, 557, 3, 2, 2, 2, 560, 562, 4, 50, 59, 2, 561, 560, 3, 2, 2, 2, 562, 563, 3, 2, 2, 2, 563, 561, 3, 2, 2, 2, 563, 564, 3, 2, 2, 2, 564, 571, 3, 2, 2, 2, 565, 567, 7, 48, 2, 2, 566, 568, 4, 50, 59, 2, 567, 566, 3, 2, 2, 2, 568, 569, 3, 2, 2, 2, 569, 567, 3, 2, 2, 2, 569, 570, 3, 2, 2, 2, 570, 572, 3, 2, 2, 2, 571, 565, 3, 2, 2, 2, 571, 572, 3, 2, 2, 2, 572, 96, 3, 2, 2, 2, 573, 577, 9, 4, 2, 2, 574, 576, 9, 5, 2, 2, 575, 574, 3, 2, 2, 2, 576, 579, 3, 2, 2, 2, 577, 575, 3, 2, 2, 2, 577, 578, 3, 2, 2, 2, 578, 98, 3, 2, 2, 2, 579, 577, 3, 2, 2, 2, 580, 583, 7, 36, 2, 2, 581, 584, 5, 99, 50, 2, 582, 584, 5, 103, 52, 2, 583, 581, 3, 2, 2, 2, 583, 582, 3, 2, 2, 2, 584, 585, 3, 2, 2, 2, 585, 586, 7, 36, 2, 2, 586, 615, 3, 2, 2, 2, 587, 590, 7, 41, 2, 2, 588, 591, 5, 99, 50, 2, 589, 591, 5, 103, 52, 2, 590, 588, 3, 2, 2, 2, 590, 589, 3, 2, 2, 2, 591, 592, 3, 2, 2, 2, 592, 593, 7, 41, 2, 2, 593, 615, 3, 2, 2, 2, 594, 595, 7, 94, 2, 2, 595, 596, 7, 36, 2, 2, 596, 599, 3, 2, 2, 2, 597, 600, 5, 99, 50, 2, 598, 600, 5, 103, 52, 2, 599, 597, 3, 2, 2, 2, 599, 598, 3, 2, 2, 2, 600, 601, 3, 2, 2, 2, 601, 602, 7, 94, 2, 2, 602, 603, 7, 36, 2, 2, 603, 615, 3, 2, 2, 2, 604, 605, 7, 41, 2, 2, 605, 606, 7, 41, 2, 2, 606, 609, 3, 2, 2, 2, 607, 610, 5, 99, 50, 2, 608, 610, 5, 103, 52, 2, 609, 607, 3, 2, 2, 2, 609, 608, 3, 2, 2, 2, 610, 611, 3, 2, 2, 2, 611, 612, 7, 41, 2, 2, 612, 613, 7, 41, 2, 2, 613, 615, 3, 2, 2, 2, 614, 580, 3, 2, 2, 2, 614, 587, 3, 2, 2, 2, 614, 594, 3, 2, 2, 2, 614, 604, 3, 2, 2, 2, 615, 100, 3, 2, 2, 2, 616, 617, 5, 93, 47, 2, 617, 618, 7, 60, 2, 2, 618, 619, 5, 93, 47, 2, 619, 102, 3, 2, 2, 2, 620, 622, 10, 6, 2, 2, 621, 620, 3, 2, 2, 2, 622, 625, 3, 2, 2, 2, 623, 624, 3, 2, 2, 2, 623, 621, 3, 2, 2, 2, 624, 104, 3, 2, 2, 2, 625, 623, 3, 2, 2, 2, 626, 627, 7, 94, 2, 2, 627, 631, 7, 36, 2, 2, 628, 629, 7, 41, 2, 2, 629, 631, 7, 41, 2, 2, 630, 626, 3, 2, 2, 2, 630, 628, 3, 2, 2, 2, 631, 106, 3, 2, 2, 2, 632, 634, 9, 7, 2, 2, 633, 632, 3, 2, 2, 2, 634, 635, 3, 2, 2, 2, 635, 633, 3, 2, 2, 2, 635, 636, 3, 2, 2, 2, 636, 637, 3, 2, 2, 2, 637, 638, 8, 54, 2, 2, 638, 108, 3, 2, 2, 2, 639, 641, 7, 15, 2, 2, 640, 639, 3, 2, 2, 2, 640, 641, 3, 2, 2, 2, 641, 642, 3, 2, 2, 2, 642, 643, 7, 12, 2, 2, 643, 644, 3, 2, 2, 2, 644, 645, 8, 55, 2, 2, 645, 110, 3, 2, 2, 2, 646, 650, 7, 37, 2, 2, 647, 649, 10, 6, 2, 2, 648, 647, 3, 2, 2, 2, 649, 652, 3, 2, 2, 2, 650, 648, 3, 2, 2, 2, 650, 651, 3, 2, 2, 2, 651, 653, 3, 2, 2, 2, 652, 650, 3, 2, 2, 2, 653, 654, 8, 56, 2, 2, 654, 112, 3, 2, 2, 2, 655, 656, 11, 2, 2, 2, 656, 114, 3, 2, 2, 2, 657, 658, 9, 8, 2, 2, 658, 116, 3, 2, 2, 2, 659, 660, 9, 9, 2, 2, 660, 118, 3, 2, 2, 2, 661, 662, 9, 10, 2, 2, 662, 120, 3, 2, 2, 2, 663, 664, 9, 11, 2, 2, 664, 122, 3, 2, 2, 2, 665, 666, 9, 12, 2, 2, 666, 124, 3, 2, 2, 2, 667, 668, 9, 13, 2, 2, 668, 126, 3, 2, 2, 2, 669, 670, 9, 14, 2, 2, 670, 128, 3, 2, 2, 2, 671, 672, 9, 15, 2, 2, 672, 130, 3, 2, 2, 2, 673, 674, 9, 16, 2, 2, 674, 132, 3, 2, 2, 2, 675, 676, 9, 17, 2, 2, 676, 134, 3, 2, 2, 2, 677, 678, 9, 18, 2, 2, 678, 136, 3, 2, 2, 2, 679, 680, 9, 19, 2, 2, 680, 138, 3, 2, 2, 2, 681, 682, 9, 20, 2, 2, 682, 140, 3, 2, 2, 2, 683, 684, 9, 21, 2, 2, 684, 142, 3, 2, 2, 2, 685, 686, 9, 22, 2, 2, 686, 144, 3, 2, 2, 2, 687, 688, 9, 23, 2, 2, 688, 146, 3, 2, 2, 2, 689, 690, 9, 24, 2, 2, 690, 148, 3, 2, 2, 2, 691, 692, 9, 25, 2, 2, 692, 150, 3, 2, 2, 2, 693, 694, 9, 26, 2, 2, 694, 152, 3, 2, 2, 2, 695, 696, 9, 27, 2, 2, 696, 154, 3, 2, 2, 2, 697, 698, 9, 28, 2, 2, 698, 156, 3, 2, 2, 2, 699, 700, 9, 29, 2, 2, 700, 158, 3, 2, 2, 2, 701, 702, 9, 30, 2, 2, 702, 160, 3, 2, 2, 2, 703, 704, 9, 31, 2, 2, 704, 162, 3, 2, 2, 2, 705, 706, 9, 32, 2, 2, 706, 164, 3, 2, 2, 2, 707, 708, 9, 33, 2, 2, 708, 166, 3, 2, 2, 2, 27, 2, 434, 438, 442, 460, 533, 538, 543, 549, 555, 557, 563, 569, 571, 577, 583, 590, 599, 609, 614, 623, 630, 635, 640, 650, 3, 2, 3, 2] \ No newline at end of file diff --git a/core/policyengine/policy/falco/lang/parser/SfplLexer.tokens b/core/policyengine/policy/falco/lang/parser/SfplLexer.tokens new file mode 100644 index 00000000..e74d954b --- /dev/null +++ b/core/policyengine/policy/falco/lang/parser/SfplLexer.tokens @@ -0,0 +1,95 @@ +RULE=1 +FILTER=2 +DROP=3 +MACRO=4 +LIST=5 +NAME=6 +ITEMS=7 +COND=8 +DESC=9 +ACTIONS=10 +OUTPUT=11 +PRIORITY=12 +TAGS=13 +PREFILTER=14 +ENABLED=15 +WARNEVTTYPE=16 +SKIPUNKNOWN=17 +FAPPEND=18 +REQ=19 +AND=20 +OR=21 +NOT=22 +LT=23 +LE=24 +GT=25 +GE=26 +EQ=27 +NEQ=28 +IN=29 +CONTAINS=30 +ICONTAINS=31 +STARTSWITH=32 +ENDSWITH=33 +PMATCH=34 +EXISTS=35 +LBRACK=36 +RBRACK=37 +LPAREN=38 +RPAREN=39 +LISTSEP=40 +DECL=41 +DEF=42 +SEVERITY=43 +SFSEVERITY=44 +FSEVERITY=45 +ID=46 +NUMBER=47 +PATH=48 +STRING=49 +TAG=50 +WS=51 +NL=52 +COMMENT=53 +ANY=54 +'rule'=1 +'filter'=2 +'drop'=3 +'macro'=4 +'list'=5 +'name'=6 +'items'=7 +'condition'=8 +'desc'=9 +'actions'=10 +'output'=11 +'priority'=12 +'tags'=13 +'prefilter'=14 +'enabled'=15 +'warn_evttypes'=16 +'skip-if-unknown-filter'=17 +'append'=18 +'required_engine_version'=19 +'and'=20 +'or'=21 +'not'=22 +'<'=23 +'<='=24 +'>'=25 +'>='=26 +'='=27 +'!='=28 +'in'=29 +'contains'=30 +'icontains'=31 +'startswith'=32 +'endswith'=33 +'pmatch'=34 +'exists'=35 +'['=36 +']'=37 +'('=38 +')'=39 +','=40 +'-'=41 diff --git a/core/policyengine/lang/parser/sfpl_base_listener.go b/core/policyengine/policy/falco/lang/parser/sfpl_base_listener.go similarity index 77% rename from core/policyengine/lang/parser/sfpl_base_listener.go rename to core/policyengine/policy/falco/lang/parser/sfpl_base_listener.go index d2750016..776158a6 100644 --- a/core/policyengine/lang/parser/sfpl_base_listener.go +++ b/core/policyengine/policy/falco/lang/parser/sfpl_base_listener.go @@ -1,4 +1,4 @@ -// Code generated from Sfpl.g4 by ANTLR 4.8. DO NOT EDIT. +// Code generated from Sfpl.g4 by ANTLR 4.9.2. DO NOT EDIT. package parser // Sfpl import "github.com/antlr/antlr4/runtime/Go/antlr" @@ -26,18 +26,42 @@ func (s *BaseSfplListener) EnterPolicy(ctx *PolicyContext) {} // ExitPolicy is called when production policy is exited. func (s *BaseSfplListener) ExitPolicy(ctx *PolicyContext) {} +// EnterDefs is called when production defs is entered. +func (s *BaseSfplListener) EnterDefs(ctx *DefsContext) {} + +// ExitDefs is called when production defs is exited. +func (s *BaseSfplListener) ExitDefs(ctx *DefsContext) {} + // EnterPrule is called when production prule is entered. func (s *BaseSfplListener) EnterPrule(ctx *PruleContext) {} // ExitPrule is called when production prule is exited. func (s *BaseSfplListener) ExitPrule(ctx *PruleContext) {} +// EnterSrule is called when production srule is entered. +func (s *BaseSfplListener) EnterSrule(ctx *SruleContext) {} + +// ExitSrule is called when production srule is exited. +func (s *BaseSfplListener) ExitSrule(ctx *SruleContext) {} + // EnterPfilter is called when production pfilter is entered. func (s *BaseSfplListener) EnterPfilter(ctx *PfilterContext) {} // ExitPfilter is called when production pfilter is exited. func (s *BaseSfplListener) ExitPfilter(ctx *PfilterContext) {} +// EnterSfilter is called when production sfilter is entered. +func (s *BaseSfplListener) EnterSfilter(ctx *SfilterContext) {} + +// ExitSfilter is called when production sfilter is exited. +func (s *BaseSfplListener) ExitSfilter(ctx *SfilterContext) {} + +// EnterDrop_keyword is called when production drop_keyword is entered. +func (s *BaseSfplListener) EnterDrop_keyword(ctx *Drop_keywordContext) {} + +// ExitDrop_keyword is called when production drop_keyword is exited. +func (s *BaseSfplListener) ExitDrop_keyword(ctx *Drop_keywordContext) {} + // EnterPmacro is called when production pmacro is entered. func (s *BaseSfplListener) EnterPmacro(ctx *PmacroContext) {} @@ -50,6 +74,12 @@ func (s *BaseSfplListener) EnterPlist(ctx *PlistContext) {} // ExitPlist is called when production plist is exited. func (s *BaseSfplListener) ExitPlist(ctx *PlistContext) {} +// EnterPreq is called when production preq is entered. +func (s *BaseSfplListener) EnterPreq(ctx *PreqContext) {} + +// ExitPreq is called when production preq is exited. +func (s *BaseSfplListener) ExitPreq(ctx *PreqContext) {} + // EnterExpression is called when production expression is entered. func (s *BaseSfplListener) EnterExpression(ctx *ExpressionContext) {} @@ -80,6 +110,12 @@ func (s *BaseSfplListener) EnterItems(ctx *ItemsContext) {} // ExitItems is called when production items is exited. func (s *BaseSfplListener) ExitItems(ctx *ItemsContext) {} +// EnterActions is called when production actions is entered. +func (s *BaseSfplListener) EnterActions(ctx *ActionsContext) {} + +// ExitActions is called when production actions is exited. +func (s *BaseSfplListener) ExitActions(ctx *ActionsContext) {} + // EnterTags is called when production tags is entered. func (s *BaseSfplListener) EnterTags(ctx *TagsContext) {} @@ -116,6 +152,12 @@ func (s *BaseSfplListener) EnterSkipunknown(ctx *SkipunknownContext) {} // ExitSkipunknown is called when production skipunknown is exited. func (s *BaseSfplListener) ExitSkipunknown(ctx *SkipunknownContext) {} +// EnterFappend is called when production fappend is entered. +func (s *BaseSfplListener) EnterFappend(ctx *FappendContext) {} + +// ExitFappend is called when production fappend is exited. +func (s *BaseSfplListener) ExitFappend(ctx *FappendContext) {} + // EnterVariable is called when production variable is entered. func (s *BaseSfplListener) EnterVariable(ctx *VariableContext) {} diff --git a/core/policyengine/lang/parser/sfpl_base_visitor.go b/core/policyengine/policy/falco/lang/parser/sfpl_base_visitor.go similarity index 75% rename from core/policyengine/lang/parser/sfpl_base_visitor.go rename to core/policyengine/policy/falco/lang/parser/sfpl_base_visitor.go index cf6692f6..d3e26033 100644 --- a/core/policyengine/lang/parser/sfpl_base_visitor.go +++ b/core/policyengine/policy/falco/lang/parser/sfpl_base_visitor.go @@ -1,4 +1,4 @@ -// Code generated from Sfpl.g4 by ANTLR 4.8. DO NOT EDIT. +// Code generated from Sfpl.g4 by ANTLR 4.9.2. DO NOT EDIT. package parser // Sfpl import "github.com/antlr/antlr4/runtime/Go/antlr" @@ -11,14 +11,30 @@ func (v *BaseSfplVisitor) VisitPolicy(ctx *PolicyContext) interface{} { return v.VisitChildren(ctx) } +func (v *BaseSfplVisitor) VisitDefs(ctx *DefsContext) interface{} { + return v.VisitChildren(ctx) +} + func (v *BaseSfplVisitor) VisitPrule(ctx *PruleContext) interface{} { return v.VisitChildren(ctx) } +func (v *BaseSfplVisitor) VisitSrule(ctx *SruleContext) interface{} { + return v.VisitChildren(ctx) +} + func (v *BaseSfplVisitor) VisitPfilter(ctx *PfilterContext) interface{} { return v.VisitChildren(ctx) } +func (v *BaseSfplVisitor) VisitSfilter(ctx *SfilterContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseSfplVisitor) VisitDrop_keyword(ctx *Drop_keywordContext) interface{} { + return v.VisitChildren(ctx) +} + func (v *BaseSfplVisitor) VisitPmacro(ctx *PmacroContext) interface{} { return v.VisitChildren(ctx) } @@ -27,6 +43,10 @@ func (v *BaseSfplVisitor) VisitPlist(ctx *PlistContext) interface{} { return v.VisitChildren(ctx) } +func (v *BaseSfplVisitor) VisitPreq(ctx *PreqContext) interface{} { + return v.VisitChildren(ctx) +} + func (v *BaseSfplVisitor) VisitExpression(ctx *ExpressionContext) interface{} { return v.VisitChildren(ctx) } @@ -47,6 +67,10 @@ func (v *BaseSfplVisitor) VisitItems(ctx *ItemsContext) interface{} { return v.VisitChildren(ctx) } +func (v *BaseSfplVisitor) VisitActions(ctx *ActionsContext) interface{} { + return v.VisitChildren(ctx) +} + func (v *BaseSfplVisitor) VisitTags(ctx *TagsContext) interface{} { return v.VisitChildren(ctx) } @@ -71,6 +95,10 @@ func (v *BaseSfplVisitor) VisitSkipunknown(ctx *SkipunknownContext) interface{} return v.VisitChildren(ctx) } +func (v *BaseSfplVisitor) VisitFappend(ctx *FappendContext) interface{} { + return v.VisitChildren(ctx) +} + func (v *BaseSfplVisitor) VisitVariable(ctx *VariableContext) interface{} { return v.VisitChildren(ctx) } diff --git a/core/policyengine/policy/falco/lang/parser/sfpl_lexer.go b/core/policyengine/policy/falco/lang/parser/sfpl_lexer.go new file mode 100644 index 00000000..85412828 --- /dev/null +++ b/core/policyengine/policy/falco/lang/parser/sfpl_lexer.go @@ -0,0 +1,467 @@ +// Code generated from Sfpl.g4 by ANTLR 4.9.2. DO NOT EDIT. + +package parser + +import ( + "fmt" + "unicode" + + "github.com/antlr/antlr4/runtime/Go/antlr" +) + +// Suppress unused import error +var _ = fmt.Printf +var _ = unicode.IsLetter + +var serializedLexerAtn = []uint16{ + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 56, 709, + 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, + 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, + 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, + 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, + 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, + 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, + 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, + 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, + 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, + 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 4, 54, 9, 54, + 4, 55, 9, 55, 4, 56, 9, 56, 4, 57, 9, 57, 4, 58, 9, 58, 4, 59, 9, 59, 4, + 60, 9, 60, 4, 61, 9, 61, 4, 62, 9, 62, 4, 63, 9, 63, 4, 64, 9, 64, 4, 65, + 9, 65, 4, 66, 9, 66, 4, 67, 9, 67, 4, 68, 9, 68, 4, 69, 9, 69, 4, 70, 9, + 70, 4, 71, 9, 71, 4, 72, 9, 72, 4, 73, 9, 73, 4, 74, 9, 74, 4, 75, 9, 75, + 4, 76, 9, 76, 4, 77, 9, 77, 4, 78, 9, 78, 4, 79, 9, 79, 4, 80, 9, 80, 4, + 81, 9, 81, 4, 82, 9, 82, 4, 83, 9, 83, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, + 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, + 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, + 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, + 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, 3, + 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, + 3, 13, 3, 13, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, + 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, + 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, + 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, + 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, + 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, + 3, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, + 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, + 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, + 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, + 3, 23, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 27, 3, 27, 3, + 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3, 31, 3, 31, + 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32, 3, 32, 3, + 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 33, 3, 33, 3, 33, 3, 33, + 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, + 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, + 3, 35, 3, 35, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 37, 3, + 37, 3, 38, 3, 38, 3, 39, 3, 39, 3, 40, 3, 40, 3, 41, 3, 41, 3, 42, 3, 42, + 3, 43, 3, 43, 7, 43, 433, 10, 43, 12, 43, 14, 43, 436, 11, 43, 3, 43, 5, + 43, 439, 10, 43, 3, 44, 3, 44, 5, 44, 443, 10, 44, 3, 45, 3, 45, 3, 45, + 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, + 45, 3, 45, 3, 45, 5, 45, 461, 10, 45, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, + 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, + 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, + 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, + 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, + 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, + 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, + 3, 46, 3, 46, 3, 46, 5, 46, 534, 10, 46, 3, 47, 3, 47, 3, 47, 5, 47, 539, + 10, 47, 3, 47, 3, 47, 3, 47, 5, 47, 544, 10, 47, 3, 47, 3, 47, 7, 47, 548, + 10, 47, 12, 47, 14, 47, 551, 11, 47, 3, 47, 3, 47, 3, 47, 7, 47, 556, 10, + 47, 12, 47, 14, 47, 559, 11, 47, 3, 48, 6, 48, 562, 10, 48, 13, 48, 14, + 48, 563, 3, 48, 3, 48, 6, 48, 568, 10, 48, 13, 48, 14, 48, 569, 5, 48, + 572, 10, 48, 3, 49, 3, 49, 7, 49, 576, 10, 49, 12, 49, 14, 49, 579, 11, + 49, 3, 50, 3, 50, 3, 50, 5, 50, 584, 10, 50, 3, 50, 3, 50, 3, 50, 3, 50, + 3, 50, 5, 50, 591, 10, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, + 50, 5, 50, 600, 10, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, + 3, 50, 5, 50, 610, 10, 50, 3, 50, 3, 50, 3, 50, 5, 50, 615, 10, 50, 3, + 51, 3, 51, 3, 51, 3, 51, 3, 52, 7, 52, 622, 10, 52, 12, 52, 14, 52, 625, + 11, 52, 3, 53, 3, 53, 3, 53, 3, 53, 5, 53, 631, 10, 53, 3, 54, 6, 54, 634, + 10, 54, 13, 54, 14, 54, 635, 3, 54, 3, 54, 3, 55, 5, 55, 641, 10, 55, 3, + 55, 3, 55, 3, 55, 3, 55, 3, 56, 3, 56, 7, 56, 649, 10, 56, 12, 56, 14, + 56, 652, 11, 56, 3, 56, 3, 56, 3, 57, 3, 57, 3, 58, 3, 58, 3, 59, 3, 59, + 3, 60, 3, 60, 3, 61, 3, 61, 3, 62, 3, 62, 3, 63, 3, 63, 3, 64, 3, 64, 3, + 65, 3, 65, 3, 66, 3, 66, 3, 67, 3, 67, 3, 68, 3, 68, 3, 69, 3, 69, 3, 70, + 3, 70, 3, 71, 3, 71, 3, 72, 3, 72, 3, 73, 3, 73, 3, 74, 3, 74, 3, 75, 3, + 75, 3, 76, 3, 76, 3, 77, 3, 77, 3, 78, 3, 78, 3, 79, 3, 79, 3, 80, 3, 80, + 3, 81, 3, 81, 3, 82, 3, 82, 3, 83, 3, 83, 3, 623, 2, 84, 3, 3, 5, 4, 7, + 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, + 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, + 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, + 33, 65, 34, 67, 35, 69, 36, 71, 37, 73, 38, 75, 39, 77, 40, 79, 41, 81, + 42, 83, 43, 85, 44, 87, 45, 89, 46, 91, 47, 93, 48, 95, 49, 97, 50, 99, + 51, 101, 52, 103, 2, 105, 2, 107, 53, 109, 54, 111, 55, 113, 56, 115, 2, + 117, 2, 119, 2, 121, 2, 123, 2, 125, 2, 127, 2, 129, 2, 131, 2, 133, 2, + 135, 2, 137, 2, 139, 2, 141, 2, 143, 2, 145, 2, 147, 2, 149, 2, 151, 2, + 153, 2, 155, 2, 157, 2, 159, 2, 161, 2, 163, 2, 165, 2, 3, 2, 34, 6, 2, + 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, 97, 97, + 99, 124, 5, 2, 48, 59, 67, 92, 99, 124, 7, 2, 44, 44, 47, 59, 67, 92, 97, + 97, 99, 124, 4, 2, 12, 12, 15, 15, 5, 2, 11, 12, 14, 15, 34, 34, 4, 2, + 67, 67, 99, 99, 4, 2, 68, 68, 100, 100, 4, 2, 69, 69, 101, 101, 4, 2, 70, + 70, 102, 102, 4, 2, 71, 71, 103, 103, 4, 2, 72, 72, 104, 104, 4, 2, 73, + 73, 105, 105, 4, 2, 74, 74, 106, 106, 4, 2, 75, 75, 107, 107, 4, 2, 76, + 76, 108, 108, 4, 2, 77, 77, 109, 109, 4, 2, 78, 78, 110, 110, 4, 2, 79, + 79, 111, 111, 4, 2, 80, 80, 112, 112, 4, 2, 81, 81, 113, 113, 4, 2, 82, + 82, 114, 114, 4, 2, 83, 83, 115, 115, 4, 2, 84, 84, 116, 116, 4, 2, 85, + 85, 117, 117, 4, 2, 86, 86, 118, 118, 4, 2, 87, 87, 119, 119, 4, 2, 88, + 88, 120, 120, 4, 2, 89, 89, 121, 121, 4, 2, 90, 90, 122, 122, 4, 2, 91, + 91, 123, 123, 4, 2, 92, 92, 124, 124, 2, 715, 2, 3, 3, 2, 2, 2, 2, 5, 3, + 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, + 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, + 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, + 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, + 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, + 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, + 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, + 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, + 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73, 3, 2, 2, 2, + 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 2, 81, 3, 2, 2, + 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, 2, 2, 87, 3, 2, 2, 2, 2, 89, 3, 2, + 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, 2, 2, 2, 95, 3, 2, 2, 2, 2, 97, 3, + 2, 2, 2, 2, 99, 3, 2, 2, 2, 2, 101, 3, 2, 2, 2, 2, 107, 3, 2, 2, 2, 2, + 109, 3, 2, 2, 2, 2, 111, 3, 2, 2, 2, 2, 113, 3, 2, 2, 2, 3, 167, 3, 2, + 2, 2, 5, 172, 3, 2, 2, 2, 7, 179, 3, 2, 2, 2, 9, 184, 3, 2, 2, 2, 11, 190, + 3, 2, 2, 2, 13, 195, 3, 2, 2, 2, 15, 200, 3, 2, 2, 2, 17, 206, 3, 2, 2, + 2, 19, 216, 3, 2, 2, 2, 21, 221, 3, 2, 2, 2, 23, 229, 3, 2, 2, 2, 25, 236, + 3, 2, 2, 2, 27, 245, 3, 2, 2, 2, 29, 250, 3, 2, 2, 2, 31, 260, 3, 2, 2, + 2, 33, 268, 3, 2, 2, 2, 35, 282, 3, 2, 2, 2, 37, 305, 3, 2, 2, 2, 39, 312, + 3, 2, 2, 2, 41, 336, 3, 2, 2, 2, 43, 340, 3, 2, 2, 2, 45, 343, 3, 2, 2, + 2, 47, 347, 3, 2, 2, 2, 49, 349, 3, 2, 2, 2, 51, 352, 3, 2, 2, 2, 53, 354, + 3, 2, 2, 2, 55, 357, 3, 2, 2, 2, 57, 359, 3, 2, 2, 2, 59, 362, 3, 2, 2, + 2, 61, 365, 3, 2, 2, 2, 63, 374, 3, 2, 2, 2, 65, 384, 3, 2, 2, 2, 67, 395, + 3, 2, 2, 2, 69, 404, 3, 2, 2, 2, 71, 411, 3, 2, 2, 2, 73, 418, 3, 2, 2, + 2, 75, 420, 3, 2, 2, 2, 77, 422, 3, 2, 2, 2, 79, 424, 3, 2, 2, 2, 81, 426, + 3, 2, 2, 2, 83, 428, 3, 2, 2, 2, 85, 430, 3, 2, 2, 2, 87, 442, 3, 2, 2, + 2, 89, 460, 3, 2, 2, 2, 91, 533, 3, 2, 2, 2, 93, 535, 3, 2, 2, 2, 95, 561, + 3, 2, 2, 2, 97, 573, 3, 2, 2, 2, 99, 614, 3, 2, 2, 2, 101, 616, 3, 2, 2, + 2, 103, 623, 3, 2, 2, 2, 105, 630, 3, 2, 2, 2, 107, 633, 3, 2, 2, 2, 109, + 640, 3, 2, 2, 2, 111, 646, 3, 2, 2, 2, 113, 655, 3, 2, 2, 2, 115, 657, + 3, 2, 2, 2, 117, 659, 3, 2, 2, 2, 119, 661, 3, 2, 2, 2, 121, 663, 3, 2, + 2, 2, 123, 665, 3, 2, 2, 2, 125, 667, 3, 2, 2, 2, 127, 669, 3, 2, 2, 2, + 129, 671, 3, 2, 2, 2, 131, 673, 3, 2, 2, 2, 133, 675, 3, 2, 2, 2, 135, + 677, 3, 2, 2, 2, 137, 679, 3, 2, 2, 2, 139, 681, 3, 2, 2, 2, 141, 683, + 3, 2, 2, 2, 143, 685, 3, 2, 2, 2, 145, 687, 3, 2, 2, 2, 147, 689, 3, 2, + 2, 2, 149, 691, 3, 2, 2, 2, 151, 693, 3, 2, 2, 2, 153, 695, 3, 2, 2, 2, + 155, 697, 3, 2, 2, 2, 157, 699, 3, 2, 2, 2, 159, 701, 3, 2, 2, 2, 161, + 703, 3, 2, 2, 2, 163, 705, 3, 2, 2, 2, 165, 707, 3, 2, 2, 2, 167, 168, + 7, 116, 2, 2, 168, 169, 7, 119, 2, 2, 169, 170, 7, 110, 2, 2, 170, 171, + 7, 103, 2, 2, 171, 4, 3, 2, 2, 2, 172, 173, 7, 104, 2, 2, 173, 174, 7, + 107, 2, 2, 174, 175, 7, 110, 2, 2, 175, 176, 7, 118, 2, 2, 176, 177, 7, + 103, 2, 2, 177, 178, 7, 116, 2, 2, 178, 6, 3, 2, 2, 2, 179, 180, 7, 102, + 2, 2, 180, 181, 7, 116, 2, 2, 181, 182, 7, 113, 2, 2, 182, 183, 7, 114, + 2, 2, 183, 8, 3, 2, 2, 2, 184, 185, 7, 111, 2, 2, 185, 186, 7, 99, 2, 2, + 186, 187, 7, 101, 2, 2, 187, 188, 7, 116, 2, 2, 188, 189, 7, 113, 2, 2, + 189, 10, 3, 2, 2, 2, 190, 191, 7, 110, 2, 2, 191, 192, 7, 107, 2, 2, 192, + 193, 7, 117, 2, 2, 193, 194, 7, 118, 2, 2, 194, 12, 3, 2, 2, 2, 195, 196, + 7, 112, 2, 2, 196, 197, 7, 99, 2, 2, 197, 198, 7, 111, 2, 2, 198, 199, + 7, 103, 2, 2, 199, 14, 3, 2, 2, 2, 200, 201, 7, 107, 2, 2, 201, 202, 7, + 118, 2, 2, 202, 203, 7, 103, 2, 2, 203, 204, 7, 111, 2, 2, 204, 205, 7, + 117, 2, 2, 205, 16, 3, 2, 2, 2, 206, 207, 7, 101, 2, 2, 207, 208, 7, 113, + 2, 2, 208, 209, 7, 112, 2, 2, 209, 210, 7, 102, 2, 2, 210, 211, 7, 107, + 2, 2, 211, 212, 7, 118, 2, 2, 212, 213, 7, 107, 2, 2, 213, 214, 7, 113, + 2, 2, 214, 215, 7, 112, 2, 2, 215, 18, 3, 2, 2, 2, 216, 217, 7, 102, 2, + 2, 217, 218, 7, 103, 2, 2, 218, 219, 7, 117, 2, 2, 219, 220, 7, 101, 2, + 2, 220, 20, 3, 2, 2, 2, 221, 222, 7, 99, 2, 2, 222, 223, 7, 101, 2, 2, + 223, 224, 7, 118, 2, 2, 224, 225, 7, 107, 2, 2, 225, 226, 7, 113, 2, 2, + 226, 227, 7, 112, 2, 2, 227, 228, 7, 117, 2, 2, 228, 22, 3, 2, 2, 2, 229, + 230, 7, 113, 2, 2, 230, 231, 7, 119, 2, 2, 231, 232, 7, 118, 2, 2, 232, + 233, 7, 114, 2, 2, 233, 234, 7, 119, 2, 2, 234, 235, 7, 118, 2, 2, 235, + 24, 3, 2, 2, 2, 236, 237, 7, 114, 2, 2, 237, 238, 7, 116, 2, 2, 238, 239, + 7, 107, 2, 2, 239, 240, 7, 113, 2, 2, 240, 241, 7, 116, 2, 2, 241, 242, + 7, 107, 2, 2, 242, 243, 7, 118, 2, 2, 243, 244, 7, 123, 2, 2, 244, 26, + 3, 2, 2, 2, 245, 246, 7, 118, 2, 2, 246, 247, 7, 99, 2, 2, 247, 248, 7, + 105, 2, 2, 248, 249, 7, 117, 2, 2, 249, 28, 3, 2, 2, 2, 250, 251, 7, 114, + 2, 2, 251, 252, 7, 116, 2, 2, 252, 253, 7, 103, 2, 2, 253, 254, 7, 104, + 2, 2, 254, 255, 7, 107, 2, 2, 255, 256, 7, 110, 2, 2, 256, 257, 7, 118, + 2, 2, 257, 258, 7, 103, 2, 2, 258, 259, 7, 116, 2, 2, 259, 30, 3, 2, 2, + 2, 260, 261, 7, 103, 2, 2, 261, 262, 7, 112, 2, 2, 262, 263, 7, 99, 2, + 2, 263, 264, 7, 100, 2, 2, 264, 265, 7, 110, 2, 2, 265, 266, 7, 103, 2, + 2, 266, 267, 7, 102, 2, 2, 267, 32, 3, 2, 2, 2, 268, 269, 7, 121, 2, 2, + 269, 270, 7, 99, 2, 2, 270, 271, 7, 116, 2, 2, 271, 272, 7, 112, 2, 2, + 272, 273, 7, 97, 2, 2, 273, 274, 7, 103, 2, 2, 274, 275, 7, 120, 2, 2, + 275, 276, 7, 118, 2, 2, 276, 277, 7, 118, 2, 2, 277, 278, 7, 123, 2, 2, + 278, 279, 7, 114, 2, 2, 279, 280, 7, 103, 2, 2, 280, 281, 7, 117, 2, 2, + 281, 34, 3, 2, 2, 2, 282, 283, 7, 117, 2, 2, 283, 284, 7, 109, 2, 2, 284, + 285, 7, 107, 2, 2, 285, 286, 7, 114, 2, 2, 286, 287, 7, 47, 2, 2, 287, + 288, 7, 107, 2, 2, 288, 289, 7, 104, 2, 2, 289, 290, 7, 47, 2, 2, 290, + 291, 7, 119, 2, 2, 291, 292, 7, 112, 2, 2, 292, 293, 7, 109, 2, 2, 293, + 294, 7, 112, 2, 2, 294, 295, 7, 113, 2, 2, 295, 296, 7, 121, 2, 2, 296, + 297, 7, 112, 2, 2, 297, 298, 7, 47, 2, 2, 298, 299, 7, 104, 2, 2, 299, + 300, 7, 107, 2, 2, 300, 301, 7, 110, 2, 2, 301, 302, 7, 118, 2, 2, 302, + 303, 7, 103, 2, 2, 303, 304, 7, 116, 2, 2, 304, 36, 3, 2, 2, 2, 305, 306, + 7, 99, 2, 2, 306, 307, 7, 114, 2, 2, 307, 308, 7, 114, 2, 2, 308, 309, + 7, 103, 2, 2, 309, 310, 7, 112, 2, 2, 310, 311, 7, 102, 2, 2, 311, 38, + 3, 2, 2, 2, 312, 313, 7, 116, 2, 2, 313, 314, 7, 103, 2, 2, 314, 315, 7, + 115, 2, 2, 315, 316, 7, 119, 2, 2, 316, 317, 7, 107, 2, 2, 317, 318, 7, + 116, 2, 2, 318, 319, 7, 103, 2, 2, 319, 320, 7, 102, 2, 2, 320, 321, 7, + 97, 2, 2, 321, 322, 7, 103, 2, 2, 322, 323, 7, 112, 2, 2, 323, 324, 7, + 105, 2, 2, 324, 325, 7, 107, 2, 2, 325, 326, 7, 112, 2, 2, 326, 327, 7, + 103, 2, 2, 327, 328, 7, 97, 2, 2, 328, 329, 7, 120, 2, 2, 329, 330, 7, + 103, 2, 2, 330, 331, 7, 116, 2, 2, 331, 332, 7, 117, 2, 2, 332, 333, 7, + 107, 2, 2, 333, 334, 7, 113, 2, 2, 334, 335, 7, 112, 2, 2, 335, 40, 3, + 2, 2, 2, 336, 337, 7, 99, 2, 2, 337, 338, 7, 112, 2, 2, 338, 339, 7, 102, + 2, 2, 339, 42, 3, 2, 2, 2, 340, 341, 7, 113, 2, 2, 341, 342, 7, 116, 2, + 2, 342, 44, 3, 2, 2, 2, 343, 344, 7, 112, 2, 2, 344, 345, 7, 113, 2, 2, + 345, 346, 7, 118, 2, 2, 346, 46, 3, 2, 2, 2, 347, 348, 7, 62, 2, 2, 348, + 48, 3, 2, 2, 2, 349, 350, 7, 62, 2, 2, 350, 351, 7, 63, 2, 2, 351, 50, + 3, 2, 2, 2, 352, 353, 7, 64, 2, 2, 353, 52, 3, 2, 2, 2, 354, 355, 7, 64, + 2, 2, 355, 356, 7, 63, 2, 2, 356, 54, 3, 2, 2, 2, 357, 358, 7, 63, 2, 2, + 358, 56, 3, 2, 2, 2, 359, 360, 7, 35, 2, 2, 360, 361, 7, 63, 2, 2, 361, + 58, 3, 2, 2, 2, 362, 363, 7, 107, 2, 2, 363, 364, 7, 112, 2, 2, 364, 60, + 3, 2, 2, 2, 365, 366, 7, 101, 2, 2, 366, 367, 7, 113, 2, 2, 367, 368, 7, + 112, 2, 2, 368, 369, 7, 118, 2, 2, 369, 370, 7, 99, 2, 2, 370, 371, 7, + 107, 2, 2, 371, 372, 7, 112, 2, 2, 372, 373, 7, 117, 2, 2, 373, 62, 3, + 2, 2, 2, 374, 375, 7, 107, 2, 2, 375, 376, 7, 101, 2, 2, 376, 377, 7, 113, + 2, 2, 377, 378, 7, 112, 2, 2, 378, 379, 7, 118, 2, 2, 379, 380, 7, 99, + 2, 2, 380, 381, 7, 107, 2, 2, 381, 382, 7, 112, 2, 2, 382, 383, 7, 117, + 2, 2, 383, 64, 3, 2, 2, 2, 384, 385, 7, 117, 2, 2, 385, 386, 7, 118, 2, + 2, 386, 387, 7, 99, 2, 2, 387, 388, 7, 116, 2, 2, 388, 389, 7, 118, 2, + 2, 389, 390, 7, 117, 2, 2, 390, 391, 7, 121, 2, 2, 391, 392, 7, 107, 2, + 2, 392, 393, 7, 118, 2, 2, 393, 394, 7, 106, 2, 2, 394, 66, 3, 2, 2, 2, + 395, 396, 7, 103, 2, 2, 396, 397, 7, 112, 2, 2, 397, 398, 7, 102, 2, 2, + 398, 399, 7, 117, 2, 2, 399, 400, 7, 121, 2, 2, 400, 401, 7, 107, 2, 2, + 401, 402, 7, 118, 2, 2, 402, 403, 7, 106, 2, 2, 403, 68, 3, 2, 2, 2, 404, + 405, 7, 114, 2, 2, 405, 406, 7, 111, 2, 2, 406, 407, 7, 99, 2, 2, 407, + 408, 7, 118, 2, 2, 408, 409, 7, 101, 2, 2, 409, 410, 7, 106, 2, 2, 410, + 70, 3, 2, 2, 2, 411, 412, 7, 103, 2, 2, 412, 413, 7, 122, 2, 2, 413, 414, + 7, 107, 2, 2, 414, 415, 7, 117, 2, 2, 415, 416, 7, 118, 2, 2, 416, 417, + 7, 117, 2, 2, 417, 72, 3, 2, 2, 2, 418, 419, 7, 93, 2, 2, 419, 74, 3, 2, + 2, 2, 420, 421, 7, 95, 2, 2, 421, 76, 3, 2, 2, 2, 422, 423, 7, 42, 2, 2, + 423, 78, 3, 2, 2, 2, 424, 425, 7, 43, 2, 2, 425, 80, 3, 2, 2, 2, 426, 427, + 7, 46, 2, 2, 427, 82, 3, 2, 2, 2, 428, 429, 7, 47, 2, 2, 429, 84, 3, 2, + 2, 2, 430, 438, 7, 60, 2, 2, 431, 433, 7, 34, 2, 2, 432, 431, 3, 2, 2, + 2, 433, 436, 3, 2, 2, 2, 434, 432, 3, 2, 2, 2, 434, 435, 3, 2, 2, 2, 435, + 437, 3, 2, 2, 2, 436, 434, 3, 2, 2, 2, 437, 439, 7, 64, 2, 2, 438, 434, + 3, 2, 2, 2, 438, 439, 3, 2, 2, 2, 439, 86, 3, 2, 2, 2, 440, 443, 5, 89, + 45, 2, 441, 443, 5, 91, 46, 2, 442, 440, 3, 2, 2, 2, 442, 441, 3, 2, 2, + 2, 443, 88, 3, 2, 2, 2, 444, 445, 5, 129, 65, 2, 445, 446, 5, 131, 66, + 2, 446, 447, 5, 127, 64, 2, 447, 448, 5, 129, 65, 2, 448, 461, 3, 2, 2, + 2, 449, 450, 5, 139, 70, 2, 450, 451, 5, 123, 62, 2, 451, 452, 5, 121, + 61, 2, 452, 453, 5, 131, 66, 2, 453, 454, 5, 155, 78, 2, 454, 455, 5, 139, + 70, 2, 455, 461, 3, 2, 2, 2, 456, 457, 5, 137, 69, 2, 457, 458, 5, 143, + 72, 2, 458, 459, 5, 159, 80, 2, 459, 461, 3, 2, 2, 2, 460, 444, 3, 2, 2, + 2, 460, 449, 3, 2, 2, 2, 460, 456, 3, 2, 2, 2, 461, 90, 3, 2, 2, 2, 462, + 463, 5, 123, 62, 2, 463, 464, 5, 139, 70, 2, 464, 465, 5, 123, 62, 2, 465, + 466, 5, 149, 75, 2, 466, 467, 5, 127, 64, 2, 467, 468, 5, 123, 62, 2, 468, + 469, 5, 141, 71, 2, 469, 470, 5, 119, 60, 2, 470, 471, 5, 163, 82, 2, 471, + 534, 3, 2, 2, 2, 472, 473, 5, 115, 58, 2, 473, 474, 5, 137, 69, 2, 474, + 475, 5, 123, 62, 2, 475, 476, 5, 149, 75, 2, 476, 477, 5, 153, 77, 2, 477, + 534, 3, 2, 2, 2, 478, 479, 5, 119, 60, 2, 479, 480, 5, 149, 75, 2, 480, + 481, 5, 131, 66, 2, 481, 482, 5, 153, 77, 2, 482, 483, 5, 131, 66, 2, 483, + 484, 5, 119, 60, 2, 484, 485, 5, 115, 58, 2, 485, 486, 5, 137, 69, 2, 486, + 534, 3, 2, 2, 2, 487, 488, 5, 123, 62, 2, 488, 489, 5, 149, 75, 2, 489, + 490, 5, 149, 75, 2, 490, 491, 5, 143, 72, 2, 491, 492, 5, 149, 75, 2, 492, + 534, 3, 2, 2, 2, 493, 494, 5, 159, 80, 2, 494, 495, 5, 115, 58, 2, 495, + 496, 5, 149, 75, 2, 496, 497, 5, 141, 71, 2, 497, 498, 5, 131, 66, 2, 498, + 499, 5, 141, 71, 2, 499, 500, 5, 127, 64, 2, 500, 534, 3, 2, 2, 2, 501, + 502, 5, 141, 71, 2, 502, 503, 5, 143, 72, 2, 503, 504, 5, 153, 77, 2, 504, + 505, 5, 131, 66, 2, 505, 506, 5, 119, 60, 2, 506, 507, 5, 123, 62, 2, 507, + 534, 3, 2, 2, 2, 508, 509, 5, 131, 66, 2, 509, 510, 5, 141, 71, 2, 510, + 511, 5, 125, 63, 2, 511, 512, 5, 143, 72, 2, 512, 534, 3, 2, 2, 2, 513, + 514, 5, 131, 66, 2, 514, 515, 5, 141, 71, 2, 515, 516, 5, 125, 63, 2, 516, + 517, 5, 143, 72, 2, 517, 518, 5, 149, 75, 2, 518, 519, 5, 139, 70, 2, 519, + 520, 5, 115, 58, 2, 520, 521, 5, 153, 77, 2, 521, 522, 5, 131, 66, 2, 522, + 523, 5, 143, 72, 2, 523, 524, 5, 141, 71, 2, 524, 525, 5, 115, 58, 2, 525, + 526, 5, 137, 69, 2, 526, 534, 3, 2, 2, 2, 527, 528, 5, 121, 61, 2, 528, + 529, 5, 123, 62, 2, 529, 530, 5, 117, 59, 2, 530, 531, 5, 155, 78, 2, 531, + 532, 5, 127, 64, 2, 532, 534, 3, 2, 2, 2, 533, 462, 3, 2, 2, 2, 533, 472, + 3, 2, 2, 2, 533, 478, 3, 2, 2, 2, 533, 487, 3, 2, 2, 2, 533, 493, 3, 2, + 2, 2, 533, 501, 3, 2, 2, 2, 533, 508, 3, 2, 2, 2, 533, 513, 3, 2, 2, 2, + 533, 527, 3, 2, 2, 2, 534, 92, 3, 2, 2, 2, 535, 557, 9, 2, 2, 2, 536, 556, + 9, 3, 2, 2, 537, 539, 7, 60, 2, 2, 538, 537, 3, 2, 2, 2, 538, 539, 3, 2, + 2, 2, 539, 540, 3, 2, 2, 2, 540, 543, 7, 93, 2, 2, 541, 544, 5, 95, 48, + 2, 542, 544, 5, 97, 49, 2, 543, 541, 3, 2, 2, 2, 543, 542, 3, 2, 2, 2, + 544, 549, 3, 2, 2, 2, 545, 546, 7, 60, 2, 2, 546, 548, 5, 97, 49, 2, 547, + 545, 3, 2, 2, 2, 548, 551, 3, 2, 2, 2, 549, 547, 3, 2, 2, 2, 549, 550, + 3, 2, 2, 2, 550, 552, 3, 2, 2, 2, 551, 549, 3, 2, 2, 2, 552, 553, 7, 95, + 2, 2, 553, 556, 3, 2, 2, 2, 554, 556, 7, 44, 2, 2, 555, 536, 3, 2, 2, 2, + 555, 538, 3, 2, 2, 2, 555, 554, 3, 2, 2, 2, 556, 559, 3, 2, 2, 2, 557, + 555, 3, 2, 2, 2, 557, 558, 3, 2, 2, 2, 558, 94, 3, 2, 2, 2, 559, 557, 3, + 2, 2, 2, 560, 562, 4, 50, 59, 2, 561, 560, 3, 2, 2, 2, 562, 563, 3, 2, + 2, 2, 563, 561, 3, 2, 2, 2, 563, 564, 3, 2, 2, 2, 564, 571, 3, 2, 2, 2, + 565, 567, 7, 48, 2, 2, 566, 568, 4, 50, 59, 2, 567, 566, 3, 2, 2, 2, 568, + 569, 3, 2, 2, 2, 569, 567, 3, 2, 2, 2, 569, 570, 3, 2, 2, 2, 570, 572, + 3, 2, 2, 2, 571, 565, 3, 2, 2, 2, 571, 572, 3, 2, 2, 2, 572, 96, 3, 2, + 2, 2, 573, 577, 9, 4, 2, 2, 574, 576, 9, 5, 2, 2, 575, 574, 3, 2, 2, 2, + 576, 579, 3, 2, 2, 2, 577, 575, 3, 2, 2, 2, 577, 578, 3, 2, 2, 2, 578, + 98, 3, 2, 2, 2, 579, 577, 3, 2, 2, 2, 580, 583, 7, 36, 2, 2, 581, 584, + 5, 99, 50, 2, 582, 584, 5, 103, 52, 2, 583, 581, 3, 2, 2, 2, 583, 582, + 3, 2, 2, 2, 584, 585, 3, 2, 2, 2, 585, 586, 7, 36, 2, 2, 586, 615, 3, 2, + 2, 2, 587, 590, 7, 41, 2, 2, 588, 591, 5, 99, 50, 2, 589, 591, 5, 103, + 52, 2, 590, 588, 3, 2, 2, 2, 590, 589, 3, 2, 2, 2, 591, 592, 3, 2, 2, 2, + 592, 593, 7, 41, 2, 2, 593, 615, 3, 2, 2, 2, 594, 595, 7, 94, 2, 2, 595, + 596, 7, 36, 2, 2, 596, 599, 3, 2, 2, 2, 597, 600, 5, 99, 50, 2, 598, 600, + 5, 103, 52, 2, 599, 597, 3, 2, 2, 2, 599, 598, 3, 2, 2, 2, 600, 601, 3, + 2, 2, 2, 601, 602, 7, 94, 2, 2, 602, 603, 7, 36, 2, 2, 603, 615, 3, 2, + 2, 2, 604, 605, 7, 41, 2, 2, 605, 606, 7, 41, 2, 2, 606, 609, 3, 2, 2, + 2, 607, 610, 5, 99, 50, 2, 608, 610, 5, 103, 52, 2, 609, 607, 3, 2, 2, + 2, 609, 608, 3, 2, 2, 2, 610, 611, 3, 2, 2, 2, 611, 612, 7, 41, 2, 2, 612, + 613, 7, 41, 2, 2, 613, 615, 3, 2, 2, 2, 614, 580, 3, 2, 2, 2, 614, 587, + 3, 2, 2, 2, 614, 594, 3, 2, 2, 2, 614, 604, 3, 2, 2, 2, 615, 100, 3, 2, + 2, 2, 616, 617, 5, 93, 47, 2, 617, 618, 7, 60, 2, 2, 618, 619, 5, 93, 47, + 2, 619, 102, 3, 2, 2, 2, 620, 622, 10, 6, 2, 2, 621, 620, 3, 2, 2, 2, 622, + 625, 3, 2, 2, 2, 623, 624, 3, 2, 2, 2, 623, 621, 3, 2, 2, 2, 624, 104, + 3, 2, 2, 2, 625, 623, 3, 2, 2, 2, 626, 627, 7, 94, 2, 2, 627, 631, 7, 36, + 2, 2, 628, 629, 7, 41, 2, 2, 629, 631, 7, 41, 2, 2, 630, 626, 3, 2, 2, + 2, 630, 628, 3, 2, 2, 2, 631, 106, 3, 2, 2, 2, 632, 634, 9, 7, 2, 2, 633, + 632, 3, 2, 2, 2, 634, 635, 3, 2, 2, 2, 635, 633, 3, 2, 2, 2, 635, 636, + 3, 2, 2, 2, 636, 637, 3, 2, 2, 2, 637, 638, 8, 54, 2, 2, 638, 108, 3, 2, + 2, 2, 639, 641, 7, 15, 2, 2, 640, 639, 3, 2, 2, 2, 640, 641, 3, 2, 2, 2, + 641, 642, 3, 2, 2, 2, 642, 643, 7, 12, 2, 2, 643, 644, 3, 2, 2, 2, 644, + 645, 8, 55, 2, 2, 645, 110, 3, 2, 2, 2, 646, 650, 7, 37, 2, 2, 647, 649, + 10, 6, 2, 2, 648, 647, 3, 2, 2, 2, 649, 652, 3, 2, 2, 2, 650, 648, 3, 2, + 2, 2, 650, 651, 3, 2, 2, 2, 651, 653, 3, 2, 2, 2, 652, 650, 3, 2, 2, 2, + 653, 654, 8, 56, 2, 2, 654, 112, 3, 2, 2, 2, 655, 656, 11, 2, 2, 2, 656, + 114, 3, 2, 2, 2, 657, 658, 9, 8, 2, 2, 658, 116, 3, 2, 2, 2, 659, 660, + 9, 9, 2, 2, 660, 118, 3, 2, 2, 2, 661, 662, 9, 10, 2, 2, 662, 120, 3, 2, + 2, 2, 663, 664, 9, 11, 2, 2, 664, 122, 3, 2, 2, 2, 665, 666, 9, 12, 2, + 2, 666, 124, 3, 2, 2, 2, 667, 668, 9, 13, 2, 2, 668, 126, 3, 2, 2, 2, 669, + 670, 9, 14, 2, 2, 670, 128, 3, 2, 2, 2, 671, 672, 9, 15, 2, 2, 672, 130, + 3, 2, 2, 2, 673, 674, 9, 16, 2, 2, 674, 132, 3, 2, 2, 2, 675, 676, 9, 17, + 2, 2, 676, 134, 3, 2, 2, 2, 677, 678, 9, 18, 2, 2, 678, 136, 3, 2, 2, 2, + 679, 680, 9, 19, 2, 2, 680, 138, 3, 2, 2, 2, 681, 682, 9, 20, 2, 2, 682, + 140, 3, 2, 2, 2, 683, 684, 9, 21, 2, 2, 684, 142, 3, 2, 2, 2, 685, 686, + 9, 22, 2, 2, 686, 144, 3, 2, 2, 2, 687, 688, 9, 23, 2, 2, 688, 146, 3, + 2, 2, 2, 689, 690, 9, 24, 2, 2, 690, 148, 3, 2, 2, 2, 691, 692, 9, 25, + 2, 2, 692, 150, 3, 2, 2, 2, 693, 694, 9, 26, 2, 2, 694, 152, 3, 2, 2, 2, + 695, 696, 9, 27, 2, 2, 696, 154, 3, 2, 2, 2, 697, 698, 9, 28, 2, 2, 698, + 156, 3, 2, 2, 2, 699, 700, 9, 29, 2, 2, 700, 158, 3, 2, 2, 2, 701, 702, + 9, 30, 2, 2, 702, 160, 3, 2, 2, 2, 703, 704, 9, 31, 2, 2, 704, 162, 3, + 2, 2, 2, 705, 706, 9, 32, 2, 2, 706, 164, 3, 2, 2, 2, 707, 708, 9, 33, + 2, 2, 708, 166, 3, 2, 2, 2, 27, 2, 434, 438, 442, 460, 533, 538, 543, 549, + 555, 557, 563, 569, 571, 577, 583, 590, 599, 609, 614, 623, 630, 635, 640, + 650, 3, 2, 3, 2, +} + +var lexerChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", +} + +var lexerModeNames = []string{ + "DEFAULT_MODE", +} + +var lexerLiteralNames = []string{ + "", "'rule'", "'filter'", "'drop'", "'macro'", "'list'", "'name'", "'items'", + "'condition'", "'desc'", "'actions'", "'output'", "'priority'", "'tags'", + "'prefilter'", "'enabled'", "'warn_evttypes'", "'skip-if-unknown-filter'", + "'append'", "'required_engine_version'", "'and'", "'or'", "'not'", "'<'", + "'<='", "'>'", "'>='", "'='", "'!='", "'in'", "'contains'", "'icontains'", + "'startswith'", "'endswith'", "'pmatch'", "'exists'", "'['", "']'", "'('", + "')'", "','", "'-'", +} + +var lexerSymbolicNames = []string{ + "", "RULE", "FILTER", "DROP", "MACRO", "LIST", "NAME", "ITEMS", "COND", + "DESC", "ACTIONS", "OUTPUT", "PRIORITY", "TAGS", "PREFILTER", "ENABLED", + "WARNEVTTYPE", "SKIPUNKNOWN", "FAPPEND", "REQ", "AND", "OR", "NOT", "LT", + "LE", "GT", "GE", "EQ", "NEQ", "IN", "CONTAINS", "ICONTAINS", "STARTSWITH", + "ENDSWITH", "PMATCH", "EXISTS", "LBRACK", "RBRACK", "LPAREN", "RPAREN", + "LISTSEP", "DECL", "DEF", "SEVERITY", "SFSEVERITY", "FSEVERITY", "ID", + "NUMBER", "PATH", "STRING", "TAG", "WS", "NL", "COMMENT", "ANY", +} + +var lexerRuleNames = []string{ + "RULE", "FILTER", "DROP", "MACRO", "LIST", "NAME", "ITEMS", "COND", "DESC", + "ACTIONS", "OUTPUT", "PRIORITY", "TAGS", "PREFILTER", "ENABLED", "WARNEVTTYPE", + "SKIPUNKNOWN", "FAPPEND", "REQ", "AND", "OR", "NOT", "LT", "LE", "GT", + "GE", "EQ", "NEQ", "IN", "CONTAINS", "ICONTAINS", "STARTSWITH", "ENDSWITH", + "PMATCH", "EXISTS", "LBRACK", "RBRACK", "LPAREN", "RPAREN", "LISTSEP", + "DECL", "DEF", "SEVERITY", "SFSEVERITY", "FSEVERITY", "ID", "NUMBER", "PATH", + "STRING", "TAG", "STRLIT", "ESC", "WS", "NL", "COMMENT", "ANY", "A", "B", + "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", + "R", "S", "T", "U", "V", "W", "X", "Y", "Z", +} + +type SfplLexer struct { + *antlr.BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +// NewSfplLexer produces a new lexer instance for the optional input antlr.CharStream. +// +// The *SfplLexer instance produced may be reused by calling the SetInputStream method. +// The initial lexer configuration is expensive to construct, and the object is not thread-safe; +// however, if used within a Golang sync.Pool, the construction cost amortizes well and the +// objects can be used in a thread-safe manner. +func NewSfplLexer(input antlr.CharStream) *SfplLexer { + l := new(SfplLexer) + lexerDeserializer := antlr.NewATNDeserializer(nil) + lexerAtn := lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn) + lexerDecisionToDFA := make([]*antlr.DFA, len(lexerAtn.DecisionToState)) + for index, ds := range lexerAtn.DecisionToState { + lexerDecisionToDFA[index] = antlr.NewDFA(ds, index) + } + l.BaseLexer = antlr.NewBaseLexer(input) + l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache()) + + l.channelNames = lexerChannelNames + l.modeNames = lexerModeNames + l.RuleNames = lexerRuleNames + l.LiteralNames = lexerLiteralNames + l.SymbolicNames = lexerSymbolicNames + l.GrammarFileName = "Sfpl.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// SfplLexer tokens. +const ( + SfplLexerRULE = 1 + SfplLexerFILTER = 2 + SfplLexerDROP = 3 + SfplLexerMACRO = 4 + SfplLexerLIST = 5 + SfplLexerNAME = 6 + SfplLexerITEMS = 7 + SfplLexerCOND = 8 + SfplLexerDESC = 9 + SfplLexerACTIONS = 10 + SfplLexerOUTPUT = 11 + SfplLexerPRIORITY = 12 + SfplLexerTAGS = 13 + SfplLexerPREFILTER = 14 + SfplLexerENABLED = 15 + SfplLexerWARNEVTTYPE = 16 + SfplLexerSKIPUNKNOWN = 17 + SfplLexerFAPPEND = 18 + SfplLexerREQ = 19 + SfplLexerAND = 20 + SfplLexerOR = 21 + SfplLexerNOT = 22 + SfplLexerLT = 23 + SfplLexerLE = 24 + SfplLexerGT = 25 + SfplLexerGE = 26 + SfplLexerEQ = 27 + SfplLexerNEQ = 28 + SfplLexerIN = 29 + SfplLexerCONTAINS = 30 + SfplLexerICONTAINS = 31 + SfplLexerSTARTSWITH = 32 + SfplLexerENDSWITH = 33 + SfplLexerPMATCH = 34 + SfplLexerEXISTS = 35 + SfplLexerLBRACK = 36 + SfplLexerRBRACK = 37 + SfplLexerLPAREN = 38 + SfplLexerRPAREN = 39 + SfplLexerLISTSEP = 40 + SfplLexerDECL = 41 + SfplLexerDEF = 42 + SfplLexerSEVERITY = 43 + SfplLexerSFSEVERITY = 44 + SfplLexerFSEVERITY = 45 + SfplLexerID = 46 + SfplLexerNUMBER = 47 + SfplLexerPATH = 48 + SfplLexerSTRING = 49 + SfplLexerTAG = 50 + SfplLexerWS = 51 + SfplLexerNL = 52 + SfplLexerCOMMENT = 53 + SfplLexerANY = 54 +) diff --git a/core/policyengine/lang/parser/sfpl_listener.go b/core/policyengine/policy/falco/lang/parser/sfpl_listener.go similarity index 76% rename from core/policyengine/lang/parser/sfpl_listener.go rename to core/policyengine/policy/falco/lang/parser/sfpl_listener.go index 4806ed8a..cab60144 100644 --- a/core/policyengine/lang/parser/sfpl_listener.go +++ b/core/policyengine/policy/falco/lang/parser/sfpl_listener.go @@ -1,4 +1,4 @@ -// Code generated from Sfpl.g4 by ANTLR 4.8. DO NOT EDIT. +// Code generated from Sfpl.g4 by ANTLR 4.9.2. DO NOT EDIT. package parser // Sfpl import "github.com/antlr/antlr4/runtime/Go/antlr" @@ -10,18 +10,33 @@ type SfplListener interface { // EnterPolicy is called when entering the policy production. EnterPolicy(c *PolicyContext) + // EnterDefs is called when entering the defs production. + EnterDefs(c *DefsContext) + // EnterPrule is called when entering the prule production. EnterPrule(c *PruleContext) + // EnterSrule is called when entering the srule production. + EnterSrule(c *SruleContext) + // EnterPfilter is called when entering the pfilter production. EnterPfilter(c *PfilterContext) + // EnterSfilter is called when entering the sfilter production. + EnterSfilter(c *SfilterContext) + + // EnterDrop_keyword is called when entering the drop_keyword production. + EnterDrop_keyword(c *Drop_keywordContext) + // EnterPmacro is called when entering the pmacro production. EnterPmacro(c *PmacroContext) // EnterPlist is called when entering the plist production. EnterPlist(c *PlistContext) + // EnterPreq is called when entering the preq production. + EnterPreq(c *PreqContext) + // EnterExpression is called when entering the expression production. EnterExpression(c *ExpressionContext) @@ -37,6 +52,9 @@ type SfplListener interface { // EnterItems is called when entering the items production. EnterItems(c *ItemsContext) + // EnterActions is called when entering the actions production. + EnterActions(c *ActionsContext) + // EnterTags is called when entering the tags production. EnterTags(c *TagsContext) @@ -55,6 +73,9 @@ type SfplListener interface { // EnterSkipunknown is called when entering the skipunknown production. EnterSkipunknown(c *SkipunknownContext) + // EnterFappend is called when entering the fappend production. + EnterFappend(c *FappendContext) + // EnterVariable is called when entering the variable production. EnterVariable(c *VariableContext) @@ -73,18 +94,33 @@ type SfplListener interface { // ExitPolicy is called when exiting the policy production. ExitPolicy(c *PolicyContext) + // ExitDefs is called when exiting the defs production. + ExitDefs(c *DefsContext) + // ExitPrule is called when exiting the prule production. ExitPrule(c *PruleContext) + // ExitSrule is called when exiting the srule production. + ExitSrule(c *SruleContext) + // ExitPfilter is called when exiting the pfilter production. ExitPfilter(c *PfilterContext) + // ExitSfilter is called when exiting the sfilter production. + ExitSfilter(c *SfilterContext) + + // ExitDrop_keyword is called when exiting the drop_keyword production. + ExitDrop_keyword(c *Drop_keywordContext) + // ExitPmacro is called when exiting the pmacro production. ExitPmacro(c *PmacroContext) // ExitPlist is called when exiting the plist production. ExitPlist(c *PlistContext) + // ExitPreq is called when exiting the preq production. + ExitPreq(c *PreqContext) + // ExitExpression is called when exiting the expression production. ExitExpression(c *ExpressionContext) @@ -100,6 +136,9 @@ type SfplListener interface { // ExitItems is called when exiting the items production. ExitItems(c *ItemsContext) + // ExitActions is called when exiting the actions production. + ExitActions(c *ActionsContext) + // ExitTags is called when exiting the tags production. ExitTags(c *TagsContext) @@ -118,6 +157,9 @@ type SfplListener interface { // ExitSkipunknown is called when exiting the skipunknown production. ExitSkipunknown(c *SkipunknownContext) + // ExitFappend is called when exiting the fappend production. + ExitFappend(c *FappendContext) + // ExitVariable is called when exiting the variable production. ExitVariable(c *VariableContext) diff --git a/core/policyengine/lang/parser/sfpl_parser.go b/core/policyengine/policy/falco/lang/parser/sfpl_parser.go similarity index 56% rename from core/policyengine/lang/parser/sfpl_parser.go rename to core/policyengine/policy/falco/lang/parser/sfpl_parser.go index b7ac3d18..92081ca3 100644 --- a/core/policyengine/lang/parser/sfpl_parser.go +++ b/core/policyengine/policy/falco/lang/parser/sfpl_parser.go @@ -1,4 +1,4 @@ -// Code generated from Sfpl.g4 by ANTLR 4.8. DO NOT EDIT. +// Code generated from Sfpl.g4 by ANTLR 4.9.2. DO NOT EDIT. package parser // Sfpl import ( @@ -15,143 +15,200 @@ var _ = reflect.Copy var _ = strconv.Itoa var parserATN = []uint16{ - 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 52, 222, + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 56, 338, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, - 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 3, 2, 3, 2, - 3, 2, 3, 2, 6, 2, 49, 10, 2, 13, 2, 14, 2, 50, 3, 2, 3, 2, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 86, 10, 3, 12, 3, 14, 3, 89, 11, 3, - 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 101, - 10, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, - 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 8, 7, 8, 124, - 10, 8, 12, 8, 14, 8, 127, 11, 8, 3, 9, 3, 9, 3, 9, 7, 9, 132, 10, 9, 12, - 9, 14, 9, 135, 11, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, - 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 5, 10, 152, 10, - 10, 3, 10, 3, 10, 3, 10, 5, 10, 157, 10, 10, 7, 10, 159, 10, 10, 12, 10, - 14, 10, 162, 11, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 5, 10, 170, - 10, 10, 3, 11, 3, 11, 3, 11, 3, 11, 7, 11, 176, 10, 11, 12, 11, 14, 11, - 179, 11, 11, 5, 11, 181, 10, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, - 12, 7, 12, 189, 10, 12, 12, 12, 14, 12, 192, 11, 12, 5, 12, 194, 10, 12, - 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, - 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 6, 20, 214, 10, 20, - 13, 20, 14, 20, 215, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 2, 2, 23, 2, 4, - 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, - 2, 6, 3, 2, 11, 12, 4, 2, 28, 28, 32, 32, 5, 2, 22, 22, 24, 24, 44, 48, - 4, 2, 22, 27, 29, 31, 2, 225, 2, 48, 3, 2, 2, 2, 4, 54, 3, 2, 2, 2, 6, - 90, 3, 2, 2, 2, 8, 102, 3, 2, 2, 2, 10, 110, 3, 2, 2, 2, 12, 118, 3, 2, - 2, 2, 14, 120, 3, 2, 2, 2, 16, 128, 3, 2, 2, 2, 18, 169, 3, 2, 2, 2, 20, - 171, 3, 2, 2, 2, 22, 184, 3, 2, 2, 2, 24, 197, 3, 2, 2, 2, 26, 199, 3, - 2, 2, 2, 28, 201, 3, 2, 2, 2, 30, 203, 3, 2, 2, 2, 32, 205, 3, 2, 2, 2, - 34, 207, 3, 2, 2, 2, 36, 209, 3, 2, 2, 2, 38, 213, 3, 2, 2, 2, 40, 217, - 3, 2, 2, 2, 42, 219, 3, 2, 2, 2, 44, 49, 5, 4, 3, 2, 45, 49, 5, 6, 4, 2, - 46, 49, 5, 8, 5, 2, 47, 49, 5, 10, 6, 2, 48, 44, 3, 2, 2, 2, 48, 45, 3, - 2, 2, 2, 48, 46, 3, 2, 2, 2, 48, 47, 3, 2, 2, 2, 49, 50, 3, 2, 2, 2, 50, - 48, 3, 2, 2, 2, 50, 51, 3, 2, 2, 2, 51, 52, 3, 2, 2, 2, 52, 53, 7, 2, 2, - 3, 53, 3, 3, 2, 2, 2, 54, 55, 7, 39, 2, 2, 55, 56, 7, 3, 2, 2, 56, 57, - 7, 40, 2, 2, 57, 58, 5, 38, 20, 2, 58, 59, 7, 10, 2, 2, 59, 60, 7, 40, - 2, 2, 60, 61, 5, 38, 20, 2, 61, 62, 7, 9, 2, 2, 62, 63, 7, 40, 2, 2, 63, - 64, 5, 12, 7, 2, 64, 65, 9, 2, 2, 2, 65, 66, 7, 40, 2, 2, 66, 67, 5, 38, - 20, 2, 67, 68, 7, 13, 2, 2, 68, 69, 7, 40, 2, 2, 69, 87, 5, 26, 14, 2, - 70, 71, 7, 14, 2, 2, 71, 72, 7, 40, 2, 2, 72, 86, 5, 22, 12, 2, 73, 74, - 7, 15, 2, 2, 74, 75, 7, 40, 2, 2, 75, 86, 5, 24, 13, 2, 76, 77, 7, 16, - 2, 2, 77, 78, 7, 40, 2, 2, 78, 86, 5, 28, 15, 2, 79, 80, 7, 17, 2, 2, 80, - 81, 7, 40, 2, 2, 81, 86, 5, 30, 16, 2, 82, 83, 7, 18, 2, 2, 83, 84, 7, - 40, 2, 2, 84, 86, 5, 32, 17, 2, 85, 70, 3, 2, 2, 2, 85, 73, 3, 2, 2, 2, - 85, 76, 3, 2, 2, 2, 85, 79, 3, 2, 2, 2, 85, 82, 3, 2, 2, 2, 86, 89, 3, - 2, 2, 2, 87, 85, 3, 2, 2, 2, 87, 88, 3, 2, 2, 2, 88, 5, 3, 2, 2, 2, 89, - 87, 3, 2, 2, 2, 90, 91, 7, 39, 2, 2, 91, 92, 7, 4, 2, 2, 92, 93, 7, 40, - 2, 2, 93, 94, 7, 44, 2, 2, 94, 95, 7, 9, 2, 2, 95, 96, 7, 40, 2, 2, 96, - 100, 5, 12, 7, 2, 97, 98, 7, 16, 2, 2, 98, 99, 7, 40, 2, 2, 99, 101, 5, - 28, 15, 2, 100, 97, 3, 2, 2, 2, 100, 101, 3, 2, 2, 2, 101, 7, 3, 2, 2, - 2, 102, 103, 7, 39, 2, 2, 103, 104, 7, 5, 2, 2, 104, 105, 7, 40, 2, 2, - 105, 106, 7, 44, 2, 2, 106, 107, 7, 9, 2, 2, 107, 108, 7, 40, 2, 2, 108, - 109, 5, 12, 7, 2, 109, 9, 3, 2, 2, 2, 110, 111, 7, 39, 2, 2, 111, 112, - 7, 6, 2, 2, 112, 113, 7, 40, 2, 2, 113, 114, 7, 44, 2, 2, 114, 115, 7, - 8, 2, 2, 115, 116, 7, 40, 2, 2, 116, 117, 5, 20, 11, 2, 117, 11, 3, 2, - 2, 2, 118, 119, 5, 14, 8, 2, 119, 13, 3, 2, 2, 2, 120, 125, 5, 16, 9, 2, - 121, 122, 7, 20, 2, 2, 122, 124, 5, 16, 9, 2, 123, 121, 3, 2, 2, 2, 124, - 127, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126, 15, 3, - 2, 2, 2, 127, 125, 3, 2, 2, 2, 128, 133, 5, 18, 10, 2, 129, 130, 7, 19, - 2, 2, 130, 132, 5, 18, 10, 2, 131, 129, 3, 2, 2, 2, 132, 135, 3, 2, 2, - 2, 133, 131, 3, 2, 2, 2, 133, 134, 3, 2, 2, 2, 134, 17, 3, 2, 2, 2, 135, - 133, 3, 2, 2, 2, 136, 170, 5, 34, 18, 2, 137, 138, 7, 21, 2, 2, 138, 170, - 5, 18, 10, 2, 139, 140, 5, 36, 19, 2, 140, 141, 5, 42, 22, 2, 141, 170, - 3, 2, 2, 2, 142, 143, 5, 36, 19, 2, 143, 144, 5, 40, 21, 2, 144, 145, 5, - 36, 19, 2, 145, 170, 3, 2, 2, 2, 146, 147, 5, 36, 19, 2, 147, 148, 9, 3, - 2, 2, 148, 151, 7, 36, 2, 2, 149, 152, 5, 36, 19, 2, 150, 152, 5, 20, 11, - 2, 151, 149, 3, 2, 2, 2, 151, 150, 3, 2, 2, 2, 152, 160, 3, 2, 2, 2, 153, - 156, 7, 38, 2, 2, 154, 157, 5, 36, 19, 2, 155, 157, 5, 20, 11, 2, 156, - 154, 3, 2, 2, 2, 156, 155, 3, 2, 2, 2, 157, 159, 3, 2, 2, 2, 158, 153, - 3, 2, 2, 2, 159, 162, 3, 2, 2, 2, 160, 158, 3, 2, 2, 2, 160, 161, 3, 2, - 2, 2, 161, 163, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 163, 164, 7, 37, 2, 2, - 164, 170, 3, 2, 2, 2, 165, 166, 7, 36, 2, 2, 166, 167, 5, 12, 7, 2, 167, - 168, 7, 37, 2, 2, 168, 170, 3, 2, 2, 2, 169, 136, 3, 2, 2, 2, 169, 137, - 3, 2, 2, 2, 169, 139, 3, 2, 2, 2, 169, 142, 3, 2, 2, 2, 169, 146, 3, 2, - 2, 2, 169, 165, 3, 2, 2, 2, 170, 19, 3, 2, 2, 2, 171, 180, 7, 34, 2, 2, - 172, 177, 5, 36, 19, 2, 173, 174, 7, 38, 2, 2, 174, 176, 5, 36, 19, 2, - 175, 173, 3, 2, 2, 2, 176, 179, 3, 2, 2, 2, 177, 175, 3, 2, 2, 2, 177, - 178, 3, 2, 2, 2, 178, 181, 3, 2, 2, 2, 179, 177, 3, 2, 2, 2, 180, 172, - 3, 2, 2, 2, 180, 181, 3, 2, 2, 2, 181, 182, 3, 2, 2, 2, 182, 183, 7, 35, - 2, 2, 183, 21, 3, 2, 2, 2, 184, 193, 7, 34, 2, 2, 185, 190, 5, 36, 19, - 2, 186, 187, 7, 38, 2, 2, 187, 189, 5, 36, 19, 2, 188, 186, 3, 2, 2, 2, - 189, 192, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, - 194, 3, 2, 2, 2, 192, 190, 3, 2, 2, 2, 193, 185, 3, 2, 2, 2, 193, 194, - 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 196, 7, 35, 2, 2, 196, 23, 3, 2, - 2, 2, 197, 198, 5, 20, 11, 2, 198, 25, 3, 2, 2, 2, 199, 200, 7, 41, 2, - 2, 200, 27, 3, 2, 2, 2, 201, 202, 5, 36, 19, 2, 202, 29, 3, 2, 2, 2, 203, - 204, 5, 36, 19, 2, 204, 31, 3, 2, 2, 2, 205, 206, 5, 36, 19, 2, 206, 33, - 3, 2, 2, 2, 207, 208, 7, 44, 2, 2, 208, 35, 3, 2, 2, 2, 209, 210, 9, 4, - 2, 2, 210, 37, 3, 2, 2, 2, 211, 212, 6, 20, 2, 2, 212, 214, 11, 2, 2, 2, - 213, 211, 3, 2, 2, 2, 214, 215, 3, 2, 2, 2, 215, 213, 3, 2, 2, 2, 215, - 216, 3, 2, 2, 2, 216, 39, 3, 2, 2, 2, 217, 218, 9, 5, 2, 2, 218, 41, 3, - 2, 2, 2, 219, 220, 7, 33, 2, 2, 220, 43, 3, 2, 2, 2, 18, 48, 50, 85, 87, - 100, 125, 133, 151, 156, 160, 169, 177, 180, 190, 193, 215, -} -var deserializer = antlr.NewATNDeserializer(nil) -var deserializedATN = deserializer.DeserializeFromUInt16(parserATN) - + 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, + 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, + 29, 9, 29, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 6, 2, 64, 10, 2, 13, 2, 14, 2, + 65, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 75, 10, 3, 12, 3, 14, + 3, 78, 11, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, + 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, + 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, + 4, 3, 4, 3, 4, 7, 4, 116, 10, 4, 12, 4, 14, 4, 119, 11, 4, 3, 5, 3, 5, + 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, + 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, + 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 155, 10, 5, 12, 5, + 14, 5, 158, 11, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, + 3, 6, 5, 6, 170, 10, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, + 3, 7, 3, 7, 5, 7, 182, 10, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, + 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 196, 10, 9, 3, 10, 3, 10, 3, 10, 3, + 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, + 3, 12, 3, 13, 3, 13, 3, 13, 7, 13, 216, 10, 13, 12, 13, 14, 13, 219, 11, + 13, 3, 14, 3, 14, 3, 14, 7, 14, 224, 10, 14, 12, 14, 14, 14, 227, 11, 14, + 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, + 15, 3, 15, 3, 15, 3, 15, 3, 15, 5, 15, 244, 10, 15, 3, 15, 3, 15, 3, 15, + 5, 15, 249, 10, 15, 7, 15, 251, 10, 15, 12, 15, 14, 15, 254, 11, 15, 3, + 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 5, 15, 262, 10, 15, 3, 16, 3, 16, + 3, 16, 3, 16, 7, 16, 268, 10, 16, 12, 16, 14, 16, 271, 11, 16, 5, 16, 273, + 10, 16, 3, 16, 5, 16, 276, 10, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, + 17, 7, 17, 284, 10, 17, 12, 17, 14, 17, 287, 11, 17, 5, 17, 289, 10, 17, + 3, 17, 5, 17, 292, 10, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 7, + 18, 300, 10, 18, 12, 18, 14, 18, 303, 11, 18, 5, 18, 305, 10, 18, 3, 18, + 5, 18, 308, 10, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, + 21, 3, 22, 3, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, 26, + 3, 27, 3, 27, 6, 27, 330, 10, 27, 13, 27, 14, 27, 331, 3, 28, 3, 28, 3, + 29, 3, 29, 3, 29, 2, 2, 30, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, + 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 2, 6, 3, + 2, 4, 5, 4, 2, 31, 31, 36, 36, 5, 2, 25, 25, 27, 27, 48, 52, 4, 2, 25, + 30, 32, 35, 2, 358, 2, 63, 3, 2, 2, 2, 4, 76, 3, 2, 2, 2, 6, 81, 3, 2, + 2, 2, 8, 120, 3, 2, 2, 2, 10, 159, 3, 2, 2, 2, 12, 171, 3, 2, 2, 2, 14, + 183, 3, 2, 2, 2, 16, 185, 3, 2, 2, 2, 18, 197, 3, 2, 2, 2, 20, 205, 3, + 2, 2, 2, 22, 210, 3, 2, 2, 2, 24, 212, 3, 2, 2, 2, 26, 220, 3, 2, 2, 2, + 28, 261, 3, 2, 2, 2, 30, 263, 3, 2, 2, 2, 32, 279, 3, 2, 2, 2, 34, 295, + 3, 2, 2, 2, 36, 311, 3, 2, 2, 2, 38, 313, 3, 2, 2, 2, 40, 315, 3, 2, 2, + 2, 42, 317, 3, 2, 2, 2, 44, 319, 3, 2, 2, 2, 46, 321, 3, 2, 2, 2, 48, 323, + 3, 2, 2, 2, 50, 325, 3, 2, 2, 2, 52, 329, 3, 2, 2, 2, 54, 333, 3, 2, 2, + 2, 56, 335, 3, 2, 2, 2, 58, 64, 5, 6, 4, 2, 59, 64, 5, 10, 6, 2, 60, 64, + 5, 16, 9, 2, 61, 64, 5, 18, 10, 2, 62, 64, 5, 20, 11, 2, 63, 58, 3, 2, + 2, 2, 63, 59, 3, 2, 2, 2, 63, 60, 3, 2, 2, 2, 63, 61, 3, 2, 2, 2, 63, 62, + 3, 2, 2, 2, 64, 65, 3, 2, 2, 2, 65, 63, 3, 2, 2, 2, 65, 66, 3, 2, 2, 2, + 66, 67, 3, 2, 2, 2, 67, 68, 7, 2, 2, 3, 68, 3, 3, 2, 2, 2, 69, 75, 5, 8, + 5, 2, 70, 75, 5, 12, 7, 2, 71, 75, 5, 16, 9, 2, 72, 75, 5, 18, 10, 2, 73, + 75, 5, 20, 11, 2, 74, 69, 3, 2, 2, 2, 74, 70, 3, 2, 2, 2, 74, 71, 3, 2, + 2, 2, 74, 72, 3, 2, 2, 2, 74, 73, 3, 2, 2, 2, 75, 78, 3, 2, 2, 2, 76, 74, + 3, 2, 2, 2, 76, 77, 3, 2, 2, 2, 77, 79, 3, 2, 2, 2, 78, 76, 3, 2, 2, 2, + 79, 80, 7, 2, 2, 3, 80, 5, 3, 2, 2, 2, 81, 82, 7, 43, 2, 2, 82, 83, 7, + 3, 2, 2, 83, 84, 7, 44, 2, 2, 84, 85, 5, 52, 27, 2, 85, 86, 7, 11, 2, 2, + 86, 87, 7, 44, 2, 2, 87, 88, 5, 52, 27, 2, 88, 89, 7, 10, 2, 2, 89, 90, + 7, 44, 2, 2, 90, 117, 5, 22, 12, 2, 91, 92, 7, 13, 2, 2, 92, 93, 7, 44, + 2, 2, 93, 116, 5, 52, 27, 2, 94, 95, 7, 12, 2, 2, 95, 96, 7, 44, 2, 2, + 96, 116, 5, 32, 17, 2, 97, 98, 7, 14, 2, 2, 98, 99, 7, 44, 2, 2, 99, 116, + 5, 38, 20, 2, 100, 101, 7, 15, 2, 2, 101, 102, 7, 44, 2, 2, 102, 116, 5, + 34, 18, 2, 103, 104, 7, 16, 2, 2, 104, 105, 7, 44, 2, 2, 105, 116, 5, 36, + 19, 2, 106, 107, 7, 17, 2, 2, 107, 108, 7, 44, 2, 2, 108, 116, 5, 40, 21, + 2, 109, 110, 7, 18, 2, 2, 110, 111, 7, 44, 2, 2, 111, 116, 5, 42, 22, 2, + 112, 113, 7, 19, 2, 2, 113, 114, 7, 44, 2, 2, 114, 116, 5, 44, 23, 2, 115, + 91, 3, 2, 2, 2, 115, 94, 3, 2, 2, 2, 115, 97, 3, 2, 2, 2, 115, 100, 3, + 2, 2, 2, 115, 103, 3, 2, 2, 2, 115, 106, 3, 2, 2, 2, 115, 109, 3, 2, 2, + 2, 115, 112, 3, 2, 2, 2, 116, 119, 3, 2, 2, 2, 117, 115, 3, 2, 2, 2, 117, + 118, 3, 2, 2, 2, 118, 7, 3, 2, 2, 2, 119, 117, 3, 2, 2, 2, 120, 121, 7, + 43, 2, 2, 121, 122, 7, 3, 2, 2, 122, 123, 7, 44, 2, 2, 123, 124, 5, 52, + 27, 2, 124, 125, 7, 11, 2, 2, 125, 126, 7, 44, 2, 2, 126, 127, 5, 52, 27, + 2, 127, 128, 7, 10, 2, 2, 128, 129, 7, 44, 2, 2, 129, 156, 5, 22, 12, 2, + 130, 131, 7, 13, 2, 2, 131, 132, 7, 44, 2, 2, 132, 155, 5, 52, 27, 2, 133, + 134, 7, 12, 2, 2, 134, 135, 7, 44, 2, 2, 135, 155, 5, 32, 17, 2, 136, 137, + 7, 14, 2, 2, 137, 138, 7, 44, 2, 2, 138, 155, 5, 38, 20, 2, 139, 140, 7, + 15, 2, 2, 140, 141, 7, 44, 2, 2, 141, 155, 5, 34, 18, 2, 142, 143, 7, 16, + 2, 2, 143, 144, 7, 44, 2, 2, 144, 155, 5, 36, 19, 2, 145, 146, 7, 17, 2, + 2, 146, 147, 7, 44, 2, 2, 147, 155, 5, 40, 21, 2, 148, 149, 7, 18, 2, 2, + 149, 150, 7, 44, 2, 2, 150, 155, 5, 42, 22, 2, 151, 152, 7, 19, 2, 2, 152, + 153, 7, 44, 2, 2, 153, 155, 5, 44, 23, 2, 154, 130, 3, 2, 2, 2, 154, 133, + 3, 2, 2, 2, 154, 136, 3, 2, 2, 2, 154, 139, 3, 2, 2, 2, 154, 142, 3, 2, + 2, 2, 154, 145, 3, 2, 2, 2, 154, 148, 3, 2, 2, 2, 154, 151, 3, 2, 2, 2, + 155, 158, 3, 2, 2, 2, 156, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, + 9, 3, 2, 2, 2, 158, 156, 3, 2, 2, 2, 159, 160, 7, 43, 2, 2, 160, 161, 5, + 14, 8, 2, 161, 162, 7, 44, 2, 2, 162, 163, 7, 48, 2, 2, 163, 164, 7, 10, + 2, 2, 164, 165, 7, 44, 2, 2, 165, 169, 5, 22, 12, 2, 166, 167, 7, 17, 2, + 2, 167, 168, 7, 44, 2, 2, 168, 170, 5, 40, 21, 2, 169, 166, 3, 2, 2, 2, + 169, 170, 3, 2, 2, 2, 170, 11, 3, 2, 2, 2, 171, 172, 7, 43, 2, 2, 172, + 173, 5, 14, 8, 2, 173, 174, 7, 44, 2, 2, 174, 175, 7, 48, 2, 2, 175, 176, + 7, 10, 2, 2, 176, 177, 7, 44, 2, 2, 177, 181, 5, 22, 12, 2, 178, 179, 7, + 17, 2, 2, 179, 180, 7, 44, 2, 2, 180, 182, 5, 40, 21, 2, 181, 178, 3, 2, + 2, 2, 181, 182, 3, 2, 2, 2, 182, 13, 3, 2, 2, 2, 183, 184, 9, 2, 2, 2, + 184, 15, 3, 2, 2, 2, 185, 186, 7, 43, 2, 2, 186, 187, 7, 6, 2, 2, 187, + 188, 7, 44, 2, 2, 188, 189, 7, 48, 2, 2, 189, 190, 7, 10, 2, 2, 190, 191, + 7, 44, 2, 2, 191, 195, 5, 22, 12, 2, 192, 193, 7, 20, 2, 2, 193, 194, 7, + 44, 2, 2, 194, 196, 5, 46, 24, 2, 195, 192, 3, 2, 2, 2, 195, 196, 3, 2, + 2, 2, 196, 17, 3, 2, 2, 2, 197, 198, 7, 43, 2, 2, 198, 199, 7, 7, 2, 2, + 199, 200, 7, 44, 2, 2, 200, 201, 7, 48, 2, 2, 201, 202, 7, 9, 2, 2, 202, + 203, 7, 44, 2, 2, 203, 204, 5, 30, 16, 2, 204, 19, 3, 2, 2, 2, 205, 206, + 7, 43, 2, 2, 206, 207, 7, 21, 2, 2, 207, 208, 7, 44, 2, 2, 208, 209, 5, + 50, 26, 2, 209, 21, 3, 2, 2, 2, 210, 211, 5, 24, 13, 2, 211, 23, 3, 2, + 2, 2, 212, 217, 5, 26, 14, 2, 213, 214, 7, 23, 2, 2, 214, 216, 5, 26, 14, + 2, 215, 213, 3, 2, 2, 2, 216, 219, 3, 2, 2, 2, 217, 215, 3, 2, 2, 2, 217, + 218, 3, 2, 2, 2, 218, 25, 3, 2, 2, 2, 219, 217, 3, 2, 2, 2, 220, 225, 5, + 28, 15, 2, 221, 222, 7, 22, 2, 2, 222, 224, 5, 28, 15, 2, 223, 221, 3, + 2, 2, 2, 224, 227, 3, 2, 2, 2, 225, 223, 3, 2, 2, 2, 225, 226, 3, 2, 2, + 2, 226, 27, 3, 2, 2, 2, 227, 225, 3, 2, 2, 2, 228, 262, 5, 48, 25, 2, 229, + 230, 7, 24, 2, 2, 230, 262, 5, 28, 15, 2, 231, 232, 5, 50, 26, 2, 232, + 233, 5, 56, 29, 2, 233, 262, 3, 2, 2, 2, 234, 235, 5, 50, 26, 2, 235, 236, + 5, 54, 28, 2, 236, 237, 5, 50, 26, 2, 237, 262, 3, 2, 2, 2, 238, 239, 5, + 50, 26, 2, 239, 240, 9, 3, 2, 2, 240, 243, 7, 40, 2, 2, 241, 244, 5, 50, + 26, 2, 242, 244, 5, 30, 16, 2, 243, 241, 3, 2, 2, 2, 243, 242, 3, 2, 2, + 2, 244, 252, 3, 2, 2, 2, 245, 248, 7, 42, 2, 2, 246, 249, 5, 50, 26, 2, + 247, 249, 5, 30, 16, 2, 248, 246, 3, 2, 2, 2, 248, 247, 3, 2, 2, 2, 249, + 251, 3, 2, 2, 2, 250, 245, 3, 2, 2, 2, 251, 254, 3, 2, 2, 2, 252, 250, + 3, 2, 2, 2, 252, 253, 3, 2, 2, 2, 253, 255, 3, 2, 2, 2, 254, 252, 3, 2, + 2, 2, 255, 256, 7, 41, 2, 2, 256, 262, 3, 2, 2, 2, 257, 258, 7, 40, 2, + 2, 258, 259, 5, 22, 12, 2, 259, 260, 7, 41, 2, 2, 260, 262, 3, 2, 2, 2, + 261, 228, 3, 2, 2, 2, 261, 229, 3, 2, 2, 2, 261, 231, 3, 2, 2, 2, 261, + 234, 3, 2, 2, 2, 261, 238, 3, 2, 2, 2, 261, 257, 3, 2, 2, 2, 262, 29, 3, + 2, 2, 2, 263, 272, 7, 38, 2, 2, 264, 269, 5, 50, 26, 2, 265, 266, 7, 42, + 2, 2, 266, 268, 5, 50, 26, 2, 267, 265, 3, 2, 2, 2, 268, 271, 3, 2, 2, + 2, 269, 267, 3, 2, 2, 2, 269, 270, 3, 2, 2, 2, 270, 273, 3, 2, 2, 2, 271, + 269, 3, 2, 2, 2, 272, 264, 3, 2, 2, 2, 272, 273, 3, 2, 2, 2, 273, 275, + 3, 2, 2, 2, 274, 276, 7, 42, 2, 2, 275, 274, 3, 2, 2, 2, 275, 276, 3, 2, + 2, 2, 276, 277, 3, 2, 2, 2, 277, 278, 7, 39, 2, 2, 278, 31, 3, 2, 2, 2, + 279, 288, 7, 38, 2, 2, 280, 285, 5, 50, 26, 2, 281, 282, 7, 42, 2, 2, 282, + 284, 5, 50, 26, 2, 283, 281, 3, 2, 2, 2, 284, 287, 3, 2, 2, 2, 285, 283, + 3, 2, 2, 2, 285, 286, 3, 2, 2, 2, 286, 289, 3, 2, 2, 2, 287, 285, 3, 2, + 2, 2, 288, 280, 3, 2, 2, 2, 288, 289, 3, 2, 2, 2, 289, 291, 3, 2, 2, 2, + 290, 292, 7, 42, 2, 2, 291, 290, 3, 2, 2, 2, 291, 292, 3, 2, 2, 2, 292, + 293, 3, 2, 2, 2, 293, 294, 7, 39, 2, 2, 294, 33, 3, 2, 2, 2, 295, 304, + 7, 38, 2, 2, 296, 301, 5, 50, 26, 2, 297, 298, 7, 42, 2, 2, 298, 300, 5, + 50, 26, 2, 299, 297, 3, 2, 2, 2, 300, 303, 3, 2, 2, 2, 301, 299, 3, 2, + 2, 2, 301, 302, 3, 2, 2, 2, 302, 305, 3, 2, 2, 2, 303, 301, 3, 2, 2, 2, + 304, 296, 3, 2, 2, 2, 304, 305, 3, 2, 2, 2, 305, 307, 3, 2, 2, 2, 306, + 308, 7, 42, 2, 2, 307, 306, 3, 2, 2, 2, 307, 308, 3, 2, 2, 2, 308, 309, + 3, 2, 2, 2, 309, 310, 7, 39, 2, 2, 310, 35, 3, 2, 2, 2, 311, 312, 5, 30, + 16, 2, 312, 37, 3, 2, 2, 2, 313, 314, 7, 45, 2, 2, 314, 39, 3, 2, 2, 2, + 315, 316, 5, 50, 26, 2, 316, 41, 3, 2, 2, 2, 317, 318, 5, 50, 26, 2, 318, + 43, 3, 2, 2, 2, 319, 320, 5, 50, 26, 2, 320, 45, 3, 2, 2, 2, 321, 322, + 5, 50, 26, 2, 322, 47, 3, 2, 2, 2, 323, 324, 7, 48, 2, 2, 324, 49, 3, 2, + 2, 2, 325, 326, 9, 4, 2, 2, 326, 51, 3, 2, 2, 2, 327, 328, 6, 27, 2, 2, + 328, 330, 11, 2, 2, 2, 329, 327, 3, 2, 2, 2, 330, 331, 3, 2, 2, 2, 331, + 329, 3, 2, 2, 2, 331, 332, 3, 2, 2, 2, 332, 53, 3, 2, 2, 2, 333, 334, 9, + 5, 2, 2, 334, 55, 3, 2, 2, 2, 335, 336, 7, 37, 2, 2, 336, 57, 3, 2, 2, + 2, 29, 63, 65, 74, 76, 115, 117, 154, 156, 169, 181, 195, 217, 225, 243, + 248, 252, 261, 269, 272, 275, 285, 288, 291, 301, 304, 307, 331, +} var literalNames = []string{ - "", "'rule'", "'filter'", "'macro'", "'list'", "'name'", "'items'", "'condition'", - "'desc'", "'action'", "'output'", "'priority'", "'tags'", "'prefilter'", - "'enabled'", "'warn_evttypes'", "'skip-if-unknown-filter'", "'and'", "'or'", - "'not'", "'<'", "'<='", "'>'", "'>='", "'='", "'!='", "'in'", "'contains'", - "'icontains'", "'startswith'", "'pmatch'", "'exists'", "'['", "']'", "'('", + "", "'rule'", "'filter'", "'drop'", "'macro'", "'list'", "'name'", "'items'", + "'condition'", "'desc'", "'actions'", "'output'", "'priority'", "'tags'", + "'prefilter'", "'enabled'", "'warn_evttypes'", "'skip-if-unknown-filter'", + "'append'", "'required_engine_version'", "'and'", "'or'", "'not'", "'<'", + "'<='", "'>'", "'>='", "'='", "'!='", "'in'", "'contains'", "'icontains'", + "'startswith'", "'endswith'", "'pmatch'", "'exists'", "'['", "']'", "'('", "')'", "','", "'-'", } var symbolicNames = []string{ - "", "RULE", "FILTER", "MACRO", "LIST", "NAME", "ITEMS", "COND", "DESC", - "ACTION", "OUTPUT", "PRIORITY", "TAGS", "PREFILTER", "ENABLED", "WARNEVTTYPE", - "SKIPUNKNOWN", "AND", "OR", "NOT", "LT", "LE", "GT", "GE", "EQ", "NEQ", - "IN", "CONTAINS", "ICONTAINS", "STARTSWITH", "PMATCH", "EXISTS", "LBRACK", - "RBRACK", "LPAREN", "RPAREN", "LISTSEP", "DECL", "DEF", "SEVERITY", "SFSEVERITY", - "FSEVERITY", "ID", "NUMBER", "PATH", "STRING", "TAG", "WS", "NL", "COMMENT", - "ANY", + "", "RULE", "FILTER", "DROP", "MACRO", "LIST", "NAME", "ITEMS", "COND", + "DESC", "ACTIONS", "OUTPUT", "PRIORITY", "TAGS", "PREFILTER", "ENABLED", + "WARNEVTTYPE", "SKIPUNKNOWN", "FAPPEND", "REQ", "AND", "OR", "NOT", "LT", + "LE", "GT", "GE", "EQ", "NEQ", "IN", "CONTAINS", "ICONTAINS", "STARTSWITH", + "ENDSWITH", "PMATCH", "EXISTS", "LBRACK", "RBRACK", "LPAREN", "RPAREN", + "LISTSEP", "DECL", "DEF", "SEVERITY", "SFSEVERITY", "FSEVERITY", "ID", + "NUMBER", "PATH", "STRING", "TAG", "WS", "NL", "COMMENT", "ANY", } var ruleNames = []string{ - "policy", "prule", "pfilter", "pmacro", "plist", "expression", "or_expression", - "and_expression", "term", "items", "tags", "prefilter", "severity", "enabled", - "warnevttype", "skipunknown", "variable", "atom", "text", "binary_operator", + "policy", "defs", "prule", "srule", "pfilter", "sfilter", "drop_keyword", + "pmacro", "plist", "preq", "expression", "or_expression", "and_expression", + "term", "items", "actions", "tags", "prefilter", "severity", "enabled", + "warnevttype", "skipunknown", "fappend", "variable", "atom", "text", "binary_operator", "unary_operator", } -var decisionToDFA = make([]*antlr.DFA, len(deserializedATN.DecisionToState)) - -func init() { - for index, ds := range deserializedATN.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(ds, index) - } -} type SfplParser struct { *antlr.BaseParser } +// NewSfplParser produces a new parser instance for the optional input antlr.TokenStream. +// +// The *SfplParser instance produced may be reused by calling the SetInputStream method. +// The initial parser configuration is expensive to construct, and the object is not thread-safe; +// however, if used within a Golang sync.Pool, the construction cost amortizes well and the +// objects can be used in a thread-safe manner. func NewSfplParser(input antlr.TokenStream) *SfplParser { this := new(SfplParser) - + deserializer := antlr.NewATNDeserializer(nil) + deserializedATN := deserializer.DeserializeFromUInt16(parserATN) + decisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState)) + for index, ds := range deserializedATN.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(ds, index) + } this.BaseParser = antlr.NewBaseParser(input) this.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache()) @@ -168,79 +225,90 @@ const ( SfplParserEOF = antlr.TokenEOF SfplParserRULE = 1 SfplParserFILTER = 2 - SfplParserMACRO = 3 - SfplParserLIST = 4 - SfplParserNAME = 5 - SfplParserITEMS = 6 - SfplParserCOND = 7 - SfplParserDESC = 8 - SfplParserACTION = 9 - SfplParserOUTPUT = 10 - SfplParserPRIORITY = 11 - SfplParserTAGS = 12 - SfplParserPREFILTER = 13 - SfplParserENABLED = 14 - SfplParserWARNEVTTYPE = 15 - SfplParserSKIPUNKNOWN = 16 - SfplParserAND = 17 - SfplParserOR = 18 - SfplParserNOT = 19 - SfplParserLT = 20 - SfplParserLE = 21 - SfplParserGT = 22 - SfplParserGE = 23 - SfplParserEQ = 24 - SfplParserNEQ = 25 - SfplParserIN = 26 - SfplParserCONTAINS = 27 - SfplParserICONTAINS = 28 - SfplParserSTARTSWITH = 29 - SfplParserPMATCH = 30 - SfplParserEXISTS = 31 - SfplParserLBRACK = 32 - SfplParserRBRACK = 33 - SfplParserLPAREN = 34 - SfplParserRPAREN = 35 - SfplParserLISTSEP = 36 - SfplParserDECL = 37 - SfplParserDEF = 38 - SfplParserSEVERITY = 39 - SfplParserSFSEVERITY = 40 - SfplParserFSEVERITY = 41 - SfplParserID = 42 - SfplParserNUMBER = 43 - SfplParserPATH = 44 - SfplParserSTRING = 45 - SfplParserTAG = 46 - SfplParserWS = 47 - SfplParserNL = 48 - SfplParserCOMMENT = 49 - SfplParserANY = 50 + SfplParserDROP = 3 + SfplParserMACRO = 4 + SfplParserLIST = 5 + SfplParserNAME = 6 + SfplParserITEMS = 7 + SfplParserCOND = 8 + SfplParserDESC = 9 + SfplParserACTIONS = 10 + SfplParserOUTPUT = 11 + SfplParserPRIORITY = 12 + SfplParserTAGS = 13 + SfplParserPREFILTER = 14 + SfplParserENABLED = 15 + SfplParserWARNEVTTYPE = 16 + SfplParserSKIPUNKNOWN = 17 + SfplParserFAPPEND = 18 + SfplParserREQ = 19 + SfplParserAND = 20 + SfplParserOR = 21 + SfplParserNOT = 22 + SfplParserLT = 23 + SfplParserLE = 24 + SfplParserGT = 25 + SfplParserGE = 26 + SfplParserEQ = 27 + SfplParserNEQ = 28 + SfplParserIN = 29 + SfplParserCONTAINS = 30 + SfplParserICONTAINS = 31 + SfplParserSTARTSWITH = 32 + SfplParserENDSWITH = 33 + SfplParserPMATCH = 34 + SfplParserEXISTS = 35 + SfplParserLBRACK = 36 + SfplParserRBRACK = 37 + SfplParserLPAREN = 38 + SfplParserRPAREN = 39 + SfplParserLISTSEP = 40 + SfplParserDECL = 41 + SfplParserDEF = 42 + SfplParserSEVERITY = 43 + SfplParserSFSEVERITY = 44 + SfplParserFSEVERITY = 45 + SfplParserID = 46 + SfplParserNUMBER = 47 + SfplParserPATH = 48 + SfplParserSTRING = 49 + SfplParserTAG = 50 + SfplParserWS = 51 + SfplParserNL = 52 + SfplParserCOMMENT = 53 + SfplParserANY = 54 ) // SfplParser rules. const ( SfplParserRULE_policy = 0 - SfplParserRULE_prule = 1 - SfplParserRULE_pfilter = 2 - SfplParserRULE_pmacro = 3 - SfplParserRULE_plist = 4 - SfplParserRULE_expression = 5 - SfplParserRULE_or_expression = 6 - SfplParserRULE_and_expression = 7 - SfplParserRULE_term = 8 - SfplParserRULE_items = 9 - SfplParserRULE_tags = 10 - SfplParserRULE_prefilter = 11 - SfplParserRULE_severity = 12 - SfplParserRULE_enabled = 13 - SfplParserRULE_warnevttype = 14 - SfplParserRULE_skipunknown = 15 - SfplParserRULE_variable = 16 - SfplParserRULE_atom = 17 - SfplParserRULE_text = 18 - SfplParserRULE_binary_operator = 19 - SfplParserRULE_unary_operator = 20 + SfplParserRULE_defs = 1 + SfplParserRULE_prule = 2 + SfplParserRULE_srule = 3 + SfplParserRULE_pfilter = 4 + SfplParserRULE_sfilter = 5 + SfplParserRULE_drop_keyword = 6 + SfplParserRULE_pmacro = 7 + SfplParserRULE_plist = 8 + SfplParserRULE_preq = 9 + SfplParserRULE_expression = 10 + SfplParserRULE_or_expression = 11 + SfplParserRULE_and_expression = 12 + SfplParserRULE_term = 13 + SfplParserRULE_items = 14 + SfplParserRULE_actions = 15 + SfplParserRULE_tags = 16 + SfplParserRULE_prefilter = 17 + SfplParserRULE_severity = 18 + SfplParserRULE_enabled = 19 + SfplParserRULE_warnevttype = 20 + SfplParserRULE_skipunknown = 21 + SfplParserRULE_fappend = 22 + SfplParserRULE_variable = 23 + SfplParserRULE_atom = 24 + SfplParserRULE_text = 25 + SfplParserRULE_binary_operator = 26 + SfplParserRULE_unary_operator = 27 ) // IPolicyContext is an interface to support dynamic dispatch. @@ -377,6 +445,29 @@ func (s *PolicyContext) Plist(i int) IPlistContext { return t.(IPlistContext) } +func (s *PolicyContext) AllPreq() []IPreqContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IPreqContext)(nil)).Elem()) + var tst = make([]IPreqContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IPreqContext) + } + } + + return tst +} + +func (s *PolicyContext) Preq(i int) IPreqContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IPreqContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IPreqContext) +} + func (s *PolicyContext) GetRuleContext() antlr.RuleContext { return s } @@ -429,357 +520,248 @@ func (p *SfplParser) Policy() (localctx IPolicyContext) { }() p.EnterOuterAlt(localctx, 1) - p.SetState(46) + p.SetState(61) p.GetErrorHandler().Sync(p) _la = p.GetTokenStream().LA(1) for ok := true; ok; ok = _la == SfplParserDECL { - p.SetState(46) + p.SetState(61) p.GetErrorHandler().Sync(p) switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 0, p.GetParserRuleContext()) { case 1: { - p.SetState(42) + p.SetState(56) p.Prule() } case 2: { - p.SetState(43) + p.SetState(57) p.Pfilter() } case 3: { - p.SetState(44) + p.SetState(58) p.Pmacro() } case 4: { - p.SetState(45) + p.SetState(59) p.Plist() } + case 5: + { + p.SetState(60) + p.Preq() + } + } - p.SetState(48) + p.SetState(63) p.GetErrorHandler().Sync(p) _la = p.GetTokenStream().LA(1) } { - p.SetState(50) + p.SetState(65) p.Match(SfplParserEOF) } return localctx } -// IPruleContext is an interface to support dynamic dispatch. -type IPruleContext interface { +// IDefsContext is an interface to support dynamic dispatch. +type IDefsContext interface { antlr.ParserRuleContext // GetParser returns the parser. GetParser() antlr.Parser - // IsPruleContext differentiates from other interfaces. - IsPruleContext() + // IsDefsContext differentiates from other interfaces. + IsDefsContext() } -type PruleContext struct { +type DefsContext struct { *antlr.BaseParserRuleContext parser antlr.Parser } -func NewEmptyPruleContext() *PruleContext { - var p = new(PruleContext) +func NewEmptyDefsContext() *DefsContext { + var p = new(DefsContext) p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SfplParserRULE_prule + p.RuleIndex = SfplParserRULE_defs return p } -func (*PruleContext) IsPruleContext() {} +func (*DefsContext) IsDefsContext() {} -func NewPruleContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PruleContext { - var p = new(PruleContext) +func NewDefsContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *DefsContext { + var p = new(DefsContext) p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) p.parser = parser - p.RuleIndex = SfplParserRULE_prule + p.RuleIndex = SfplParserRULE_defs return p } -func (s *PruleContext) GetParser() antlr.Parser { return s.parser } - -func (s *PruleContext) DECL() antlr.TerminalNode { - return s.GetToken(SfplParserDECL, 0) -} - -func (s *PruleContext) RULE() antlr.TerminalNode { - return s.GetToken(SfplParserRULE, 0) -} - -func (s *PruleContext) AllDEF() []antlr.TerminalNode { - return s.GetTokens(SfplParserDEF) -} - -func (s *PruleContext) DEF(i int) antlr.TerminalNode { - return s.GetToken(SfplParserDEF, i) -} - -func (s *PruleContext) AllText() []ITextContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ITextContext)(nil)).Elem()) - var tst = make([]ITextContext, len(ts)) - - for i, t := range ts { - if t != nil { - tst[i] = t.(ITextContext) - } - } - - return tst -} - -func (s *PruleContext) Text(i int) ITextContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*ITextContext)(nil)).Elem(), i) - - if t == nil { - return nil - } - - return t.(ITextContext) -} - -func (s *PruleContext) DESC() antlr.TerminalNode { - return s.GetToken(SfplParserDESC, 0) -} - -func (s *PruleContext) COND() antlr.TerminalNode { - return s.GetToken(SfplParserCOND, 0) -} - -func (s *PruleContext) Expression() IExpressionContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0) - - if t == nil { - return nil - } - - return t.(IExpressionContext) -} - -func (s *PruleContext) PRIORITY() antlr.TerminalNode { - return s.GetToken(SfplParserPRIORITY, 0) -} - -func (s *PruleContext) Severity() ISeverityContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*ISeverityContext)(nil)).Elem(), 0) - - if t == nil { - return nil - } - - return t.(ISeverityContext) -} - -func (s *PruleContext) ACTION() antlr.TerminalNode { - return s.GetToken(SfplParserACTION, 0) -} - -func (s *PruleContext) OUTPUT() antlr.TerminalNode { - return s.GetToken(SfplParserOUTPUT, 0) -} - -func (s *PruleContext) AllTAGS() []antlr.TerminalNode { - return s.GetTokens(SfplParserTAGS) -} +func (s *DefsContext) GetParser() antlr.Parser { return s.parser } -func (s *PruleContext) TAGS(i int) antlr.TerminalNode { - return s.GetToken(SfplParserTAGS, i) +func (s *DefsContext) EOF() antlr.TerminalNode { + return s.GetToken(SfplParserEOF, 0) } -func (s *PruleContext) AllTags() []ITagsContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ITagsContext)(nil)).Elem()) - var tst = make([]ITagsContext, len(ts)) +func (s *DefsContext) AllSrule() []ISruleContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ISruleContext)(nil)).Elem()) + var tst = make([]ISruleContext, len(ts)) for i, t := range ts { if t != nil { - tst[i] = t.(ITagsContext) + tst[i] = t.(ISruleContext) } } return tst } -func (s *PruleContext) Tags(i int) ITagsContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*ITagsContext)(nil)).Elem(), i) +func (s *DefsContext) Srule(i int) ISruleContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ISruleContext)(nil)).Elem(), i) if t == nil { return nil } - return t.(ITagsContext) -} - -func (s *PruleContext) AllPREFILTER() []antlr.TerminalNode { - return s.GetTokens(SfplParserPREFILTER) -} - -func (s *PruleContext) PREFILTER(i int) antlr.TerminalNode { - return s.GetToken(SfplParserPREFILTER, i) + return t.(ISruleContext) } -func (s *PruleContext) AllPrefilter() []IPrefilterContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IPrefilterContext)(nil)).Elem()) - var tst = make([]IPrefilterContext, len(ts)) +func (s *DefsContext) AllSfilter() []ISfilterContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ISfilterContext)(nil)).Elem()) + var tst = make([]ISfilterContext, len(ts)) for i, t := range ts { if t != nil { - tst[i] = t.(IPrefilterContext) + tst[i] = t.(ISfilterContext) } } return tst } -func (s *PruleContext) Prefilter(i int) IPrefilterContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IPrefilterContext)(nil)).Elem(), i) +func (s *DefsContext) Sfilter(i int) ISfilterContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ISfilterContext)(nil)).Elem(), i) if t == nil { return nil } - return t.(IPrefilterContext) -} - -func (s *PruleContext) AllENABLED() []antlr.TerminalNode { - return s.GetTokens(SfplParserENABLED) -} - -func (s *PruleContext) ENABLED(i int) antlr.TerminalNode { - return s.GetToken(SfplParserENABLED, i) + return t.(ISfilterContext) } -func (s *PruleContext) AllEnabled() []IEnabledContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IEnabledContext)(nil)).Elem()) - var tst = make([]IEnabledContext, len(ts)) +func (s *DefsContext) AllPmacro() []IPmacroContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IPmacroContext)(nil)).Elem()) + var tst = make([]IPmacroContext, len(ts)) for i, t := range ts { if t != nil { - tst[i] = t.(IEnabledContext) + tst[i] = t.(IPmacroContext) } } return tst } -func (s *PruleContext) Enabled(i int) IEnabledContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IEnabledContext)(nil)).Elem(), i) +func (s *DefsContext) Pmacro(i int) IPmacroContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IPmacroContext)(nil)).Elem(), i) if t == nil { return nil } - return t.(IEnabledContext) -} - -func (s *PruleContext) AllWARNEVTTYPE() []antlr.TerminalNode { - return s.GetTokens(SfplParserWARNEVTTYPE) -} - -func (s *PruleContext) WARNEVTTYPE(i int) antlr.TerminalNode { - return s.GetToken(SfplParserWARNEVTTYPE, i) + return t.(IPmacroContext) } -func (s *PruleContext) AllWarnevttype() []IWarnevttypeContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IWarnevttypeContext)(nil)).Elem()) - var tst = make([]IWarnevttypeContext, len(ts)) +func (s *DefsContext) AllPlist() []IPlistContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IPlistContext)(nil)).Elem()) + var tst = make([]IPlistContext, len(ts)) for i, t := range ts { if t != nil { - tst[i] = t.(IWarnevttypeContext) + tst[i] = t.(IPlistContext) } } return tst } -func (s *PruleContext) Warnevttype(i int) IWarnevttypeContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IWarnevttypeContext)(nil)).Elem(), i) +func (s *DefsContext) Plist(i int) IPlistContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IPlistContext)(nil)).Elem(), i) if t == nil { return nil } - return t.(IWarnevttypeContext) -} - -func (s *PruleContext) AllSKIPUNKNOWN() []antlr.TerminalNode { - return s.GetTokens(SfplParserSKIPUNKNOWN) -} - -func (s *PruleContext) SKIPUNKNOWN(i int) antlr.TerminalNode { - return s.GetToken(SfplParserSKIPUNKNOWN, i) + return t.(IPlistContext) } -func (s *PruleContext) AllSkipunknown() []ISkipunknownContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ISkipunknownContext)(nil)).Elem()) - var tst = make([]ISkipunknownContext, len(ts)) +func (s *DefsContext) AllPreq() []IPreqContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IPreqContext)(nil)).Elem()) + var tst = make([]IPreqContext, len(ts)) for i, t := range ts { if t != nil { - tst[i] = t.(ISkipunknownContext) + tst[i] = t.(IPreqContext) } } return tst } -func (s *PruleContext) Skipunknown(i int) ISkipunknownContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*ISkipunknownContext)(nil)).Elem(), i) +func (s *DefsContext) Preq(i int) IPreqContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IPreqContext)(nil)).Elem(), i) if t == nil { return nil } - return t.(ISkipunknownContext) + return t.(IPreqContext) } -func (s *PruleContext) GetRuleContext() antlr.RuleContext { +func (s *DefsContext) GetRuleContext() antlr.RuleContext { return s } -func (s *PruleContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { +func (s *DefsContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { return antlr.TreesStringTree(s, ruleNames, recog) } -func (s *PruleContext) EnterRule(listener antlr.ParseTreeListener) { +func (s *DefsContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(SfplListener); ok { - listenerT.EnterPrule(s) + listenerT.EnterDefs(s) } } -func (s *PruleContext) ExitRule(listener antlr.ParseTreeListener) { +func (s *DefsContext) ExitRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(SfplListener); ok { - listenerT.ExitPrule(s) + listenerT.ExitDefs(s) } } -func (s *PruleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { +func (s *DefsContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { switch t := visitor.(type) { case SfplVisitor: - return t.VisitPrule(s) + return t.VisitDefs(s) default: return t.VisitChildren(s) } } -func (p *SfplParser) Prule() (localctx IPruleContext) { - localctx = NewPruleContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 2, SfplParserRULE_prule) +func (p *SfplParser) Defs() (localctx IDefsContext) { + localctx = NewDefsContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 2, SfplParserRULE_defs) var _la int defer func() { @@ -799,231 +781,1714 @@ func (p *SfplParser) Prule() (localctx IPruleContext) { }() p.EnterOuterAlt(localctx, 1) - { - p.SetState(52) - p.Match(SfplParserDECL) - } - { - p.SetState(53) - p.Match(SfplParserRULE) - } - { - p.SetState(54) - p.Match(SfplParserDEF) + p.SetState(74) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SfplParserDECL { + p.SetState(72) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 2, p.GetParserRuleContext()) { + case 1: + { + p.SetState(67) + p.Srule() + } + + case 2: + { + p.SetState(68) + p.Sfilter() + } + + case 3: + { + p.SetState(69) + p.Pmacro() + } + + case 4: + { + p.SetState(70) + p.Plist() + } + + case 5: + { + p.SetState(71) + p.Preq() + } + + } + + p.SetState(76) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) } { - p.SetState(55) - p.Text() + p.SetState(77) + p.Match(SfplParserEOF) } - { - p.SetState(56) - p.Match(SfplParserDESC) + + return localctx +} + +// IPruleContext is an interface to support dynamic dispatch. +type IPruleContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // IsPruleContext differentiates from other interfaces. + IsPruleContext() +} + +type PruleContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyPruleContext() *PruleContext { + var p = new(PruleContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SfplParserRULE_prule + return p +} + +func (*PruleContext) IsPruleContext() {} + +func NewPruleContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PruleContext { + var p = new(PruleContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SfplParserRULE_prule + + return p +} + +func (s *PruleContext) GetParser() antlr.Parser { return s.parser } + +func (s *PruleContext) DECL() antlr.TerminalNode { + return s.GetToken(SfplParserDECL, 0) +} + +func (s *PruleContext) RULE() antlr.TerminalNode { + return s.GetToken(SfplParserRULE, 0) +} + +func (s *PruleContext) AllDEF() []antlr.TerminalNode { + return s.GetTokens(SfplParserDEF) +} + +func (s *PruleContext) DEF(i int) antlr.TerminalNode { + return s.GetToken(SfplParserDEF, i) +} + +func (s *PruleContext) AllText() []ITextContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ITextContext)(nil)).Elem()) + var tst = make([]ITextContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(ITextContext) + } + } + + return tst +} + +func (s *PruleContext) Text(i int) ITextContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ITextContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(ITextContext) +} + +func (s *PruleContext) DESC() antlr.TerminalNode { + return s.GetToken(SfplParserDESC, 0) +} + +func (s *PruleContext) COND() antlr.TerminalNode { + return s.GetToken(SfplParserCOND, 0) +} + +func (s *PruleContext) Expression() IExpressionContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IExpressionContext) +} + +func (s *PruleContext) AllOUTPUT() []antlr.TerminalNode { + return s.GetTokens(SfplParserOUTPUT) +} + +func (s *PruleContext) OUTPUT(i int) antlr.TerminalNode { + return s.GetToken(SfplParserOUTPUT, i) +} + +func (s *PruleContext) AllACTIONS() []antlr.TerminalNode { + return s.GetTokens(SfplParserACTIONS) +} + +func (s *PruleContext) ACTIONS(i int) antlr.TerminalNode { + return s.GetToken(SfplParserACTIONS, i) +} + +func (s *PruleContext) AllActions() []IActionsContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IActionsContext)(nil)).Elem()) + var tst = make([]IActionsContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IActionsContext) + } + } + + return tst +} + +func (s *PruleContext) Actions(i int) IActionsContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IActionsContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IActionsContext) +} + +func (s *PruleContext) AllPRIORITY() []antlr.TerminalNode { + return s.GetTokens(SfplParserPRIORITY) +} + +func (s *PruleContext) PRIORITY(i int) antlr.TerminalNode { + return s.GetToken(SfplParserPRIORITY, i) +} + +func (s *PruleContext) AllSeverity() []ISeverityContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ISeverityContext)(nil)).Elem()) + var tst = make([]ISeverityContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(ISeverityContext) + } + } + + return tst +} + +func (s *PruleContext) Severity(i int) ISeverityContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ISeverityContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(ISeverityContext) +} + +func (s *PruleContext) AllTAGS() []antlr.TerminalNode { + return s.GetTokens(SfplParserTAGS) +} + +func (s *PruleContext) TAGS(i int) antlr.TerminalNode { + return s.GetToken(SfplParserTAGS, i) +} + +func (s *PruleContext) AllTags() []ITagsContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ITagsContext)(nil)).Elem()) + var tst = make([]ITagsContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(ITagsContext) + } + } + + return tst +} + +func (s *PruleContext) Tags(i int) ITagsContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ITagsContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(ITagsContext) +} + +func (s *PruleContext) AllPREFILTER() []antlr.TerminalNode { + return s.GetTokens(SfplParserPREFILTER) +} + +func (s *PruleContext) PREFILTER(i int) antlr.TerminalNode { + return s.GetToken(SfplParserPREFILTER, i) +} + +func (s *PruleContext) AllPrefilter() []IPrefilterContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IPrefilterContext)(nil)).Elem()) + var tst = make([]IPrefilterContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IPrefilterContext) + } + } + + return tst +} + +func (s *PruleContext) Prefilter(i int) IPrefilterContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IPrefilterContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IPrefilterContext) +} + +func (s *PruleContext) AllENABLED() []antlr.TerminalNode { + return s.GetTokens(SfplParserENABLED) +} + +func (s *PruleContext) ENABLED(i int) antlr.TerminalNode { + return s.GetToken(SfplParserENABLED, i) +} + +func (s *PruleContext) AllEnabled() []IEnabledContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IEnabledContext)(nil)).Elem()) + var tst = make([]IEnabledContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IEnabledContext) + } + } + + return tst +} + +func (s *PruleContext) Enabled(i int) IEnabledContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IEnabledContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IEnabledContext) +} + +func (s *PruleContext) AllWARNEVTTYPE() []antlr.TerminalNode { + return s.GetTokens(SfplParserWARNEVTTYPE) +} + +func (s *PruleContext) WARNEVTTYPE(i int) antlr.TerminalNode { + return s.GetToken(SfplParserWARNEVTTYPE, i) +} + +func (s *PruleContext) AllWarnevttype() []IWarnevttypeContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IWarnevttypeContext)(nil)).Elem()) + var tst = make([]IWarnevttypeContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(IWarnevttypeContext) + } + } + + return tst +} + +func (s *PruleContext) Warnevttype(i int) IWarnevttypeContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IWarnevttypeContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(IWarnevttypeContext) +} + +func (s *PruleContext) AllSKIPUNKNOWN() []antlr.TerminalNode { + return s.GetTokens(SfplParserSKIPUNKNOWN) +} + +func (s *PruleContext) SKIPUNKNOWN(i int) antlr.TerminalNode { + return s.GetToken(SfplParserSKIPUNKNOWN, i) +} + +func (s *PruleContext) AllSkipunknown() []ISkipunknownContext { + var ts = s.GetTypedRuleContexts(reflect.TypeOf((*ISkipunknownContext)(nil)).Elem()) + var tst = make([]ISkipunknownContext, len(ts)) + + for i, t := range ts { + if t != nil { + tst[i] = t.(ISkipunknownContext) + } + } + + return tst +} + +func (s *PruleContext) Skipunknown(i int) ISkipunknownContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ISkipunknownContext)(nil)).Elem(), i) + + if t == nil { + return nil + } + + return t.(ISkipunknownContext) +} + +func (s *PruleContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *PruleContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *PruleContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SfplListener); ok { + listenerT.EnterPrule(s) + } +} + +func (s *PruleContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SfplListener); ok { + listenerT.ExitPrule(s) + } +} + +func (s *PruleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case SfplVisitor: + return t.VisitPrule(s) + + default: + return t.VisitChildren(s) + } +} + +func (p *SfplParser) Prule() (localctx IPruleContext) { + localctx = NewPruleContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 4, SfplParserRULE_prule) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(79) + p.Match(SfplParserDECL) + } + { + p.SetState(80) + p.Match(SfplParserRULE) + } + { + p.SetState(81) + p.Match(SfplParserDEF) + } + { + p.SetState(82) + p.Text() + } + { + p.SetState(83) + p.Match(SfplParserDESC) + } + { + p.SetState(84) + p.Match(SfplParserDEF) + } + { + p.SetState(85) + p.Text() + } + { + p.SetState(86) + p.Match(SfplParserCOND) + } + { + p.SetState(87) + p.Match(SfplParserDEF) + } + { + p.SetState(88) + p.Expression() + } + p.SetState(115) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ((_la)&-(0x1f+1)) == 0 && ((1< +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package policy implements input policy translation for the rules engine. +package policy + +import "github.com/sysflow-telemetry/sf-apis/go/logger" + +// Predicate defines the type of a functional predicate. +type Predicate[R any] func(R) bool + +// Criterion defines an interface for functional predicate operations. +type Criterion[R any] struct { + Pred Predicate[R] +} + +// Eval evaluates a functional predicate. +func (c Criterion[R]) Eval(r R) bool { + return c.Pred(r) +} + +// And computes the conjunction of two functional predicates. +func (c Criterion[R]) And(cr Criterion[R]) Criterion[R] { + var p Predicate[R] = func(r R) bool { return c.Eval(r) && cr.Eval(r) } + return Criterion[R]{p} +} + +// Or computes the conjunction of two functional predicates. +func (c Criterion[R]) Or(cr Criterion[R]) Criterion[R] { + var p Predicate[R] = func(r R) bool { return c.Eval(r) || cr.Eval(r) } + return Criterion[R]{p} +} + +// Not computes the negation of the function predicate. +func (c Criterion[R]) Not() Criterion[R] { + var p Predicate[R] = func(r R) bool { return !c.Eval(r) } + return Criterion[R]{p} +} + +// True defines a functional predicate that always returns true. +func True[R any]() Criterion[R] { return Criterion[R]{Pred: func(r R) bool { return true }} } + +// False defines a functional predicate that always returns false. +func False[R any]() Criterion[R] { return Criterion[R]{Pred: func(r R) bool { return false }} } + +// All derives the conjuctive clause of all predicates in a slice of predicates. +func All[R any](criteria []Criterion[R]) Criterion[R] { + all := True[R]() + for _, c := range criteria { + all = all.And(c) + } + return all +} + +// Any derives the disjuntive clause of all predicates in a slice of predicates. +func Any[R any](criteria []Criterion[R]) Criterion[R] { + any := False[R]() + for _, c := range criteria { + any = any.Or(c) + } + return any +} + +// First accepts a (Criterion[R], error) tuple and returns the Criterio[R], stopping execution if err is not nil. +func First[R any, T Criterion[R]](pred T, err error) T { + if err != nil { + logger.Error.Fatalln("Caught error during predicate compilation: ", err) + } + return pred +} diff --git a/core/policyengine/policy/predicates_test.go b/core/policyengine/policy/predicates_test.go new file mode 100644 index 00000000..5af0b7cb --- /dev/null +++ b/core/policyengine/policy/predicates_test.go @@ -0,0 +1,62 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package policy implements input policy translation for the rules engine. +package policy + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNot(t *testing.T) { + c := False[any]() + assert.Equal(t, true, c.Not().Eval(nil)) +} + +func TestAnd(t *testing.T) { + c := False[any]() + assert.Equal(t, false, c.And(c).Eval(nil)) + assert.Equal(t, false, c.And(c.Not()).Eval(nil)) + assert.Equal(t, false, c.Not().And(c).Eval(nil)) + assert.Equal(t, true, c.Not().And(c.Not()).Eval(nil)) +} + +func TestOr(t *testing.T) { + c := False[any]() + assert.Equal(t, false, c.Or(c).Eval(nil)) + assert.Equal(t, true, c.Or(c.Not()).Eval(nil)) + assert.Equal(t, true, c.Not().Or(c).Eval(nil)) + assert.Equal(t, true, c.Not().Or(c.Not()).Eval(nil)) +} + +func TestAll(t *testing.T) { + assert.Equal(t, true, All([]Criterion[any]{True[any](), True[any]()}).Eval(nil)) + assert.Equal(t, false, All([]Criterion[any]{True[any](), False[any]()}).Eval(nil)) + assert.Equal(t, false, All([]Criterion[any]{False[any](), True[any]()}).Eval(nil)) + assert.Equal(t, false, All([]Criterion[any]{False[any](), False[any]()}).Eval(nil)) +} + +func TestAny(t *testing.T) { + assert.Equal(t, true, Any([]Criterion[any]{True[any](), True[any]()}).Eval(nil)) + assert.Equal(t, true, Any([]Criterion[any]{True[any](), False[any]()}).Eval(nil)) + assert.Equal(t, true, Any([]Criterion[any]{False[any](), True[any]()}).Eval(nil)) + assert.Equal(t, false, Any([]Criterion[any]{False[any](), False[any]()}).Eval(nil)) +} diff --git a/core/policyengine/policy/sigma/compiler.go b/core/policyengine/policy/sigma/compiler.go new file mode 100644 index 00000000..8e7f752a --- /dev/null +++ b/core/policyengine/policy/sigma/compiler.go @@ -0,0 +1,298 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sigma implements a frontend for Sigma rules engine. +package sigma + +import ( + "os" + "path" + "strings" + + "github.com/bradleyjkemp/sigma-go" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source" +) + +type PolicyCompiler[R any] struct { + // Operations + ops source.Operations[R] + + // Transformer + transformer *Transformer + + // Compiled rule objects + rules []policy.Rule[R] + + // Intermediate rule and rule config objects parsed by the Sigma parser + sigmaRules []sigma.Rule + sigmaConfig sigma.Config + + // Sigma config path + configPath string +} + +// NewPolicyCompiler constructs a new compiler instance. +func NewPolicyCompiler[R any](ops source.Operations[R], configPath string) policy.PolicyCompiler[R] { + pc := new(PolicyCompiler[R]) + pc.ops = ops + pc.transformer = NewTransformer() + pc.rules = make([]policy.Rule[R], 0) + pc.configPath = configPath + return pc +} + +// Compile parses and interprets an input policy defined in path. +func (pc *PolicyCompiler[R]) compile(rulePaths []string, configPath string) error { + // Read Sigma rules + for _, path := range rulePaths { + contents, err := os.ReadFile(path) + if err != nil { + return err + } + rule, err := sigma.ParseRule(contents) + if err != nil { + logger.Error.Printf("Could not parse input rule ") + continue + } + pc.sigmaRules = append(pc.sigmaRules, rule) + } + + // Read Sigma config + if p, err := os.Stat(configPath); err == nil && !p.IsDir() { + contents, err := os.ReadFile(configPath) + if err != nil { + return err + } + pc.sigmaConfig, err = sigma.ParseConfig(contents) + if err != nil { + return err + } + } + + // Translate the sigma rules into criterion objects + for _, rule := range pc.sigmaRules { + for _, conditions := range rule.Detection.Conditions { + logger.Trace.Println("Parsing rule ", rule.ID, rule.Title) + r := policy.Rule[R]{ + Name: rule.ID, + Desc: rule.Description, + Condition: pc.visitSearchExpression(conditions.Search, rule.Detection.Searches), + Actions: nil, + Tags: pc.getTags(rule), + Priority: pc.getPriority(rule), + Prefilter: nil, + Enabled: true, + } + pc.rules = append(pc.rules, r) + } + } + + return nil +} + +// Compile parses a set of input policies defined in paths. +func (pc *PolicyCompiler[R]) Compile(paths ...string) ([]policy.Rule[R], []policy.Filter[R], error) { + if err := pc.compile(paths, pc.configPath); err != nil { + return nil, nil, err + } + return pc.rules, nil, nil +} + +func (pc *PolicyCompiler[R]) getTags(rule sigma.Rule) []policy.EnrichmentTag { + tags := make([]policy.EnrichmentTag, len(rule.Tags)) + for i, v := range rule.Tags { + tags[i] = v + } + return tags +} + +func (pc *PolicyCompiler[R]) getPriority(rule sigma.Rule) policy.Priority { + switch strings.ToLower(rule.Level) { + case policy.Informational.String(): + return policy.Informational + case policy.Low.String(): + return policy.Low + case policy.Medium.String(): + return policy.Medium + case policy.High.String(): + return policy.High + case policy.Critical.String(): + return policy.Critical + } + return policy.Informational +} + +func (pc *PolicyCompiler[R]) visitSearchExpression(condition sigma.SearchExpr, searches map[string]sigma.Search) policy.Criterion[R] { + + switch c := condition.(type) { + + case sigma.SearchIdentifier: + if search, ok := searches[c.Name]; ok { + return pc.visitSearch(search) + } + return policy.False[R]() + + case sigma.And: + var preds []policy.Criterion[R] + for _, expr := range c { + preds = append(preds, pc.visitSearchExpression(expr, searches)) + } + return policy.All(preds) + + case sigma.Or: + var preds []policy.Criterion[R] + for _, expr := range c { + preds = append(preds, pc.visitSearchExpression(expr, searches)) + } + return policy.Any(preds) + + case sigma.Not: + return pc.visitSearchExpression(c.Expr, searches).Not() + + case sigma.OneOfThem: + var preds []policy.Criterion[R] + for _, search := range searches { + preds = append(preds, pc.visitSearch(search)) + } + return policy.Any(preds) + + case sigma.OneOfPattern: + var preds []policy.Criterion[R] + for name, search := range searches { + matchesPattern, _ := path.Match(c.Pattern, name) + if matchesPattern { + preds = append(preds, pc.visitSearch(search)) + } + } + return policy.Any(preds) + + case sigma.AllOfThem: + var preds []policy.Criterion[R] + for _, search := range searches { + preds = append(preds, pc.visitSearch(search)) + } + return policy.All(preds) + + case sigma.AllOfPattern: + var preds []policy.Criterion[R] + for name, search := range searches { + matchesPattern, _ := path.Match(c.Pattern, name) + if matchesPattern { + preds = append(preds, pc.visitSearch(search)) + } + } + return policy.All(preds) + } + return policy.False[R]() +} + +func (pc *PolicyCompiler[R]) visitSearch(search sigma.Search) policy.Criterion[R] { + + if len(search.Keywords) > 0 { + logger.Warn.Println("Keyword search is not supported. Use field selectors instead.") + return policy.False[R]() + } + + var matcherPreds []policy.Criterion[R] + for _, eventMatcher := range search.EventMatchers { + for _, fieldMatcher := range eventMatcher { + var fieldPreds policy.Criterion[R] + allValuesMustMatch := false + var transformers []TransformerFlags + var comparators []FieldModifier + for _, modifier := range fieldMatcher.Modifiers { + m := FieldModifier(modifier) + if m == All { + allValuesMustMatch = true + } + if m.IsTransformer() { + transformers = append(transformers, TransformersMap[m]...) + } + if m.IsComparator() { + comparators = append(comparators, m) + } + } + var valuePreds []policy.Criterion[R] + for _, value := range fieldMatcher.Values { + if len(transformers) > 0 { + var tPreds []policy.Criterion[R] + for _, t := range transformers { + values, _ := pc.transformer.Transform(value, t) + for _, v := range values { + tPreds = append(tPreds, pc.visitTerm(comparators, fieldMatcher.Field, v)) + } + } + valuePreds = append(valuePreds, policy.Any(tPreds)) + } else { + valuePreds = append(valuePreds, pc.visitTerm(comparators, fieldMatcher.Field, value)) + } + } + if allValuesMustMatch { + fieldPreds = policy.All(valuePreds) + } else { + fieldPreds = policy.Any(valuePreds) + } + matcherPreds = append(matcherPreds, fieldPreds) + } + } + return policy.All(matcherPreds) +} + +func (pc *PolicyCompiler[R]) visitTerm(ops []FieldModifier, attr string, value string) policy.Criterion[R] { + var opPreds []policy.Criterion[R] + + // apply field mappings + if pc.sigmaConfig.FieldMappings != nil { + if mattr, ok := pc.sigmaConfig.FieldMappings[attr]; ok { + attr = mattr.TargetNames[0] + } + } + + // build predicate expression + if len(ops) == 0 { + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.IEq))) + } else { + for _, op := range ops { + switch op { + case Contains: + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.IContains))) + case StartsWith: + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.IStartswith))) + case EndsWith: + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.IEndswith))) + case RegExp: + opPreds = append(opPreds, policy.First(pc.ops.RegExp(attr, value))) + case Lt: + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.Lt))) + case Lte: + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.LEq))) + case Gt: + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.Gt))) + case Gte: + opPreds = append(opPreds, policy.First(pc.ops.Compare(attr, value, source.GEq))) + default: + logger.Error.Printf("Unsupported operator %s", op) + } + } + } + + return policy.All(opPreds) +} diff --git a/core/policyengine/policy/sigma/compiler_test.go b/core/policyengine/policy/sigma/compiler_test.go new file mode 100644 index 00000000..e423b5dc --- /dev/null +++ b/core/policyengine/policy/sigma/compiler_test.go @@ -0,0 +1,48 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sigma implements a frontend for Sigma rules engine. +package sigma_test + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/sysflow-telemetry/sf-apis/go/ioutils" + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/sigma" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +var configPath string = "../../../../resources/policies/sigma/config/sysflow.yml" +var rulesPath string = "../../../../resources/policies/sigma/rules/linux/process_creation" + +func TestMain(m *testing.M) { + logger.InitLoggers(logger.TRACE) + os.Exit(m.Run()) +} + +func TestCompiler(t *testing.T) { + pc := sigma.NewPolicyCompiler(flatrecord.NewOperations(), configPath) + paths, err := ioutils.ListRecursiveFilePaths(rulesPath, ".yml") + assert.NoError(t, err) + _, _, err = pc.Compile(paths...) + assert.NoError(t, err) +} diff --git a/core/policyengine/policy/sigma/modifiers.go b/core/policyengine/policy/sigma/modifiers.go new file mode 100644 index 00000000..21f99258 --- /dev/null +++ b/core/policyengine/policy/sigma/modifiers.go @@ -0,0 +1,83 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sigma implements a frontend for Sigma rules engine. +package sigma + +// FieldModifier type. +type FieldModifier string + +// Sigma field modifiers. +const ( + // conjunctive modifier + All FieldModifier = "all" + + // comparators + Contains FieldModifier = "contains" + EndsWith FieldModifier = "endswith" + StartsWith FieldModifier = "startswith" + Lt FieldModifier = "lt" + Lte FieldModifier = "lte" + Gt FieldModifier = "gt" + Gte FieldModifier = "gte" + + // transformers + Base64 FieldModifier = "base64" + Base64Offset FieldModifier = "base64Offset" + UTF16 FieldModifier = "utf16" + UTF16LE FieldModifier = "utf16le" + UTF16BE FieldModifier = "utf16be" + Wide FieldModifier = "wide" + WinDash FieldModifier = "windash" + RegExp FieldModifier = "re" + CIDR FieldModifier = "cidr" +) + +var exists = struct{}{} + +var ComparatorsMap = map[FieldModifier]struct{}{ + Contains: exists, + EndsWith: exists, + StartsWith: exists, + Lt: exists, + Lte: exists, + Gt: exists, + Gte: exists, +} + +var TransformersMap = map[FieldModifier][]TransformerFlags{ + Base64: {Base64Flag}, + Base64Offset: {Base64OffsetFlag}, + UTF16: {NoFlags}, + UTF16LE: {NoFlags}, + UTF16BE: {NoFlags}, + Wide: {NoFlags}, + WinDash: {NoFlags, WinDashFlag}, + CIDR: {CIDRFlag}, +} + +func (s FieldModifier) IsComparator() bool { + _, ok := ComparatorsMap[s] + return ok +} + +func (s FieldModifier) IsTransformer() bool { + _, ok := TransformersMap[s] + return ok +} diff --git a/core/policyengine/policy/sigma/transformer.go b/core/policyengine/policy/sigma/transformer.go new file mode 100644 index 00000000..410e2f99 --- /dev/null +++ b/core/policyengine/policy/sigma/transformer.go @@ -0,0 +1,102 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sigma implements a frontend for Sigma rules engine. +package sigma + +import ( + "encoding/base64" + "strings" +) + +// TransformerFlags defines a bitmap for transformer options. +type TransformerFlags uint16 + +// NoFlags represents the zero value for transformer flags. +const NoFlags TransformerFlags = 0 + +// Transformer flags. +const ( + Base64Flag TransformerFlags = 1 << iota // Base64 flag + Base64OffsetFlag // Base64 offset flag + WinDashFlag // WinDash flag + CIDRFlag // CIDR flag +) + +// Set sets the bitmap flag. +func (s TransformerFlags) Set(flag TransformerFlags) TransformerFlags { return s | flag } + +// Clear unsets the bitmap flag. +func (s TransformerFlags) Clear(flag TransformerFlags) TransformerFlags { return s &^ flag } + +// Has checks if flag is set in the bitmap. +func (s TransformerFlags) Has(flag TransformerFlags) bool { return s&flag != NoFlags } + +// Transformer defines a set of functions that transform source attribute values. +type Transformer struct{} + +func NewTransformer() *Transformer { + return &Transformer{} +} + +func (s *Transformer) Transform(src string, flags TransformerFlags) (dst []string, err error) { + if flags == NoFlags { + return []string{src}, nil + } + if flags.Has(WinDashFlag) { + return s.Transform(s.windash(src), flags.Clear(WinDashFlag)) + } + if flags.Has(CIDRFlag) { + for _, cidr := range s.cidr(src) { + var r []string + r, err = s.Transform(cidr, flags.Clear(CIDRFlag)) + if err != nil { + return + } + dst = append(dst, r...) + } + return + } + if flags.Has(Base64Flag) { + dst = append(dst, s.base64(src, 0)) + return + } + if flags.Has(Base64OffsetFlag) { + dst = append(dst, s.base64(src, 0)) + dst = append(dst, s.base64(src, 1)) + dst = append(dst, s.base64(src, 2)) + return + } + return []string{src}, nil +} + +func (s *Transformer) base64(src string, offset int) string { + if offset > 0 { + panic("Missing implementation for base64 offsets") + } + return base64.StdEncoding.EncodeToString([]byte(src)) +} + +func (s *Transformer) windash(src string) string { + return strings.ReplaceAll(src, "-", "/") +} + +func (s *Transformer) cidr(src string) []string { + panic("Missing implementation for CIDR transformer") +} diff --git a/core/policyengine/policy/sigma/transformer_test.go b/core/policyengine/policy/sigma/transformer_test.go new file mode 100644 index 00000000..7a6b39ef --- /dev/null +++ b/core/policyengine/policy/sigma/transformer_test.go @@ -0,0 +1,51 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sigma implements a frontend for Sigma rules engine. +package sigma + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBase64(t *testing.T) { + transformer := NewTransformer() + msg := "http://" + v, err := transformer.Transform(msg, Base64Flag) + assert.NoError(t, err) + assert.Equal(t, []string{"aHR0cDovLw=="}, v) +} + +func TestWinDash(t *testing.T) { + transformer := NewTransformer() + msg := "my-windows-variant" + v, err := transformer.Transform(msg, WinDashFlag) + assert.NoError(t, err) + assert.Equal(t, []string{"my/windows/variant"}, v) +} + +func TestWinDashBase64(t *testing.T) { + transformer := NewTransformer() + msg := "my-windows-variant" + v, err := transformer.Transform(msg, WinDashFlag.Set(Base64Flag)) + assert.NoError(t, err) + assert.Equal(t, []string{"bXkvd2luZG93cy92YXJpYW50"}, v) +} diff --git a/core/policyengine/policy/types.go b/core/policyengine/policy/types.go new file mode 100644 index 00000000..356e2496 --- /dev/null +++ b/core/policyengine/policy/types.go @@ -0,0 +1,61 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package policy implements input policy translation for the rules engine. +package policy + +// EnrichmentTag denotes the type for enrichment tags. +type EnrichmentTag interface{} + +// Priority denotes the type for rule priority. +type Priority int + +// Priority enumeration. +const ( + Informational Priority = iota + Low + Medium + High + Critical +) + +// String returns the string representation of a priority instance. +func (p Priority) String() string { + return [...]string{"informational", "low", "medium", "high", "critical"}[p] +} + +// Rule type +type Rule[R any] struct { + Name string + Desc string + Condition Criterion[R] + Actions []string + Tags []EnrichmentTag + Priority Priority + Prefilter []string + Enabled bool + IsAlert bool +} + +// Filter type +type Filter[R any] struct { + Name string + Condition Criterion[R] + Enabled bool +} diff --git a/core/policyengine/policyengine.go b/core/policyengine/policyengine.go index fc6494bc..61c3b01b 100644 --- a/core/policyengine/policyengine.go +++ b/core/policyengine/policyengine.go @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +17,25 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package policyengine implements a plugin for a rules engine for telemetry records. package policyengine import ( "errors" "sync" + "time" "github.com/sysflow-telemetry/sf-apis/go/ioutils" "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/plugins" - "github.ibm.com/sysflow/sf-processor/core/cache" - "github.ibm.com/sysflow/sf-processor/core/flattener" - "github.ibm.com/sysflow/sf-processor/core/policyengine/engine" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/engine" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/monitor" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/falco" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy/sigma" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/common" ) const ( @@ -38,12 +45,15 @@ const ( // PolicyEngine defines a driver for the Policy Engine plugin. type PolicyEngine struct { - pi engine.PolicyInterpreter - tables *cache.SFTables - outCh chan *engine.Record - filterOnly bool - bypass bool - config engine.Config + pi *engine.PolicyInterpreter[*common.Record] + outCh []chan *common.Record + config engine.Config + policyMonitor monitor.PolicyMonitor[*common.Record] +} + +// NewEventChan creates a new event record channel instance. +func NewEventChan(size int) interface{} { + return &plugins.Channel[*common.Record]{In: make(chan *common.Record, size)} } // NewPolicyEngine constructs a new Policy Engine plugin. @@ -56,11 +66,6 @@ func (s *PolicyEngine) GetName() string { return pluginName } -// NewEventChan creates a new event record channel instance. -func NewEventChan(size int) interface{} { - return &engine.RecordChannel{In: make(chan *engine.Record, size)} -} - // Register registers plugin to plugin cache. func (s *PolicyEngine) Register(pc plugins.SFPluginCache) { pc.AddProcessor(pluginName, NewPolicyEngine) @@ -68,46 +73,94 @@ func (s *PolicyEngine) Register(pc plugins.SFPluginCache) { } // Init initializes the plugin. -func (s *PolicyEngine) Init(conf map[string]string) error { - config, err := engine.CreateConfig(conf) - if err != nil { - return err - } - s.config = config - s.pi = engine.NewPolicyInterpreter(s.config) - s.tables = cache.GetInstance() - if s.config.Mode == engine.FilterMode { - logger.Trace.Println("Setting policy engine in filter mode") - s.filterOnly = true - } else if s.config.Mode == engine.BypassMode { - logger.Trace.Println("Setting policy engine in bypass mode") - s.bypass = true - return nil +func (s *PolicyEngine) Init(conf map[string]interface{}) (err error) { + s.config, _ = engine.CreateConfig(conf) // no err check, assuming defaults + + if s.config.Mode == engine.EnrichMode { + logger.Trace.Println("Setting policy engine in 'enrich' mode") + if s.config.PoliciesPath == sfgo.Zeros.String { + return + } + } else { + logger.Trace.Println("Setting policy engine in 'alert' mode") + if s.config.PoliciesPath == sfgo.Zeros.String { + return errors.New("configuration attribute 'policies' missing from policy engine plugin settings") + } } - logger.Trace.Println("Loading policies from: ", config.PoliciesPath) - paths, err := ioutils.ListFilePaths(config.PoliciesPath, ".yaml") - if err == nil { - if len(paths) == 0 { - return errors.New("No policy files with extension .yaml found in path: " + config.PoliciesPath) + + if s.config.Monitor == engine.NoneType { + s.pi, err = s.createPolicyInterpreter() + if err != nil { + logger.Error.Printf("Unable to compile local policies from directory %s, %v", s.config.PoliciesPath, err) + return } - return s.pi.Compile(paths...) + + // start workers + s.pi.StartWorkers() + } else { + s.policyMonitor, err = monitor.NewPolicyMonitor(s.config, s.createPolicyInterpreter, s.out) + if err != nil { + logger.Error.Printf("Unable to load policy monitor %s, %v", s.config.Monitor.String(), err) + return + } + select { + case s.pi = <-s.policyMonitor.GetInterpreterChan(): + logger.Info.Printf("Loaded policy engine from policy monitor %s.", s.config.Monitor.String()) + s.pi.StartWorkers() + default: + logger.Error.Printf("No policy engine available for plugin. Please check error logs for details.") + return errors.New("no policy engine available for plugin") + } + s.policyMonitor.StartMonitor() } - return errors.New("Error while listing policies: " + err.Error()) + return } // Process implements the main loop of the plugin. -func (s *PolicyEngine) Process(ch interface{}, wg *sync.WaitGroup) { - in := ch.(*flattener.FlatChannel).In +// Records are processed concurrently. The number of concurrent threads is controlled by s.config.Concurrency. +func (s *PolicyEngine) Process(ch []interface{}, wg *sync.WaitGroup) { + if len(ch) != 1 { + logger.Error.Println("Policy Engine only supports a single input channel at this time") + return + } + in := ch[0].(*common.Channel).In defer wg.Done() logger.Trace.Println("Starting policy engine with capacity: ", cap(in)) - out := func(r *engine.Record) { s.outCh <- r } + + // set start and expiration time for checking for new policy interpreter + start := time.Now() + expiration := start.Add(s.config.MonitorInterval) + + lastPerfTs := time.Now() for { if fc, ok := <-in; ok { - if s.bypass { - out(engine.NewRecord(*fc, s.tables)) - } else { - s.pi.ProcessAsync(true, s.filterOnly, engine.NewRecord(*fc, s.tables), out) + if s.pi == nil { + s.bypassPolicyEngine(fc) + continue } + if s.policyMonitor != nil { + now := time.Now() + // check if another policy interpreter has been compiled (only happens when there are changes to the policy directory) + if now.After(expiration) { + select { + case pi := <-s.policyMonitor.GetInterpreterChan(): + logger.Info.Println("Updated policy interpreter in main policy engine thread.") + // stop workers from old policy interpreter before assigning new one + s.pi.StopWorkers() + pi.StartWorkers() + s.pi = pi + default: + } + expiration = now.Add(s.config.MonitorInterval) + } + } + // Log the number of queued input elements + if logger.IsEnabled(logger.Perf) && time.Since(lastPerfTs) > 15*time.Second { + logger.Perf.Printf("Policy engine input channel queue: %d", len(in)) + lastPerfTs = time.Now() + } + // Process record in interpreter's worker pool + s.processAsync(fc) } else { logger.Trace.Println("Input channel closed. Shutting down.") break @@ -115,15 +168,67 @@ func (s *PolicyEngine) Process(ch interface{}, wg *sync.WaitGroup) { } } +// Creates a policy interpreter from configuration. +func (s *PolicyEngine) createPolicyInterpreter() (*engine.PolicyInterpreter[*common.Record], error) { + dir := s.config.PoliciesPath + + // check policies + logger.Info.Println("Loading policies from: ", dir) + paths, err := ioutils.ListRecursiveFilePaths(dir, ".yaml", ".yml") + if err != nil { + return nil, err + } + if len(paths) == 0 { + return nil, errors.New("no policy files with extension .yaml or .yml found in path: " + dir) + } + + // build interpreter + logger.Info.Printf("Creating %s policy interpreter", s.config.Language.String()) + var pc policy.PolicyCompiler[*common.Record] + if s.config.Language == engine.Falco { + pc = falco.NewPolicyCompiler(common.NewOperations()) + } else { + pc = sigma.NewPolicyCompiler(common.NewOperations(), s.config.ConfigPath) + } + pf := common.NewPrefilter() + ctx := common.NewContextualizer() + pi := engine.NewPolicyInterpreter(s.config, pc, pf, ctx, s.out) + + // compile policies + err = pi.Compile(paths...) + if err != nil { + return nil, err + } + + return pi, nil +} + +// out sends a record to every output channel in the plugin. +func (s *PolicyEngine) out(r *common.Record) { + for _, c := range s.outCh { + c <- r + } +} + // SetOutChan sets the output channel of the plugin. -func (s *PolicyEngine) SetOutChan(ch interface{}) { - s.outCh = (ch.(*engine.RecordChannel)).In +func (s *PolicyEngine) SetOutChan(ch []interface{}) { + for _, c := range ch { + s.outCh = append(s.outCh, (c.(*plugins.Channel[*common.Record])).In) + } } // Cleanup clean up the plugin resources. func (s *PolicyEngine) Cleanup() { logger.Trace.Println("Exiting ", pluginName) + if s.pi != nil { + s.pi.StopWorkers() + } if s.outCh != nil { - close(s.outCh) + for _, c := range s.outCh { + close(c) + } + } + if s.policyMonitor != nil { + s.policyMonitor.StopMonitor() } } diff --git a/core/policyengine/policyengine_flatrecord.go b/core/policyengine/policyengine_flatrecord.go new file mode 100644 index 00000000..50f74bfc --- /dev/null +++ b/core/policyengine/policyengine_flatrecord.go @@ -0,0 +1,42 @@ +//go:build flatrecord +// +build flatrecord + +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// Andreas Schade +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package policyengine implements a plugin for a rules engine for telemetry records. +package policyengine + +import ( + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +// bypassPolicyEngine passes a record onto the exporter if there is no policy engine available. +// note any record transformations can be done here. +func (s *PolicyEngine) bypassPolicyEngine(rec *sfgo.FlatRecord) { + s.out(flatrecord.NewRecord(rec)) +} + +// processAsync processes a record in the policy engine. +// note any record transformations can be done here. +func (s *PolicyEngine) processAsync(rec *sfgo.FlatRecord) { + s.pi.ProcessAsync(flatrecord.NewRecord(rec)) +} diff --git a/plugins/example/go.mod b/core/policyengine/source/channel.go similarity index 70% rename from plugins/example/go.mod rename to core/policyengine/source/channel.go index f057769a..1c5bea76 100644 --- a/plugins/example/go.mod +++ b/core/policyengine/source/channel.go @@ -1,5 +1,5 @@ // -// Copyright (C) 2020 IBM Corporation. +// Copyright (C) 2023 IBM Corporation. // // Authors: // Frederico Araujo @@ -16,12 +16,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -module github.ibm.com/sysflow/sf-processor/plugins/example -go 1.14 +// Package source implements a backend for policy compilers. +package source -require ( - github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201026195524-bd9cb63ccc96 - github.ibm.com/sysflow/sf-processor/core v0.0.0-20201027030609-879f8d66a4f0 -) +// RecordChannel type +// type RecordChannel[R any] struct { +// In chan R +// } diff --git a/core/policyengine/source/common/types_flatrecord.go b/core/policyengine/source/common/types_flatrecord.go new file mode 100644 index 00000000..4a7bec9d --- /dev/null +++ b/core/policyengine/source/common/types_flatrecord.go @@ -0,0 +1,53 @@ +//go:build flatrecord +// +build flatrecord + +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package source implements a backend for policy compilers. +package common + +import ( + "github.com/sysflow-telemetry/sf-apis/go/plugins" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +// Record is the record associated with the data source (backend) that will +// be used with the rules compiler. The policy engine is defined +// with a "common.Record" type. We use type aliasing to swap in +// different data sources currently. We will support multipe sources +// in the future. All types defined here are specific to the SysFlow flatrecord backend. +type Record = flatrecord.Record + +// Channel is the backend specific channel wrapper object used to send data +// to the policy engine +type Channel = plugins.Channel[*sfgo.FlatRecord] + +// NewOperations specifies a constructor for the backend specific operations +// object used with the policy engine +var NewOperations = flatrecord.NewOperations + +// NewPrefilter specifies a constructor for the backend specific prefilter +// object used with the policy engine +var NewPrefilter = flatrecord.NewPrefilter + +// NewContextualizer specifies a constructor for the backend specific contextualizer +// object used with the policy engine +var NewContextualizer = flatrecord.NewContextualizer diff --git a/core/policyengine/source/contextualizer.go b/core/policyengine/source/contextualizer.go new file mode 100644 index 00000000..1b135c75 --- /dev/null +++ b/core/policyengine/source/contextualizer.go @@ -0,0 +1,55 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package source implements a backend for policy compilers. +package source + +import ( + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" +) + +type Contextualizer[R any] interface { + // AddRule adds one or more rules matching a record. + AddRules(r R, rules ...policy.Rule[R]) + // GetRules retrieves the list of stored rules associated with a record. + GetRules(r R) []policy.Rule[R] + // Adds one or more tags to a record. + AddTags(r R, tags ...string) + // GetTags retrieves the list of tags associated with a record. + GetTags(r R) []string +} + +// DefaultContextualizer is a default contextualizer object. +type DefaultContextualizer[R any] struct{} + +func NewDefaultContextualizer[R any]() Contextualizer[R] { + return &DefaultContextualizer[R]{} +} + +// AddRule adds one or more rules matching a record. +func (s *DefaultContextualizer[R]) AddRules(r R, rules ...policy.Rule[R]) {} + +// GetRules retrieves the list of stored rules associated with a record. +func (s *DefaultContextualizer[R]) GetRules(r R) []policy.Rule[R] { return nil } + +// Adds one or more tags to a record. +func (s *DefaultContextualizer[R]) AddTags(r R, tags ...string) {} + +// GetTags retrieves the list of tags associated with a record. +func (s *DefaultContextualizer[R]) GetTags(r R) []string { return nil } diff --git a/core/policyengine/engine/constants.go b/core/policyengine/source/flatrecord/constants.go similarity index 67% rename from core/policyengine/engine/constants.go rename to core/policyengine/source/flatrecord/constants.go index e9067b49..d6c90a3a 100644 --- a/core/policyengine/engine/constants.go +++ b/core/policyengine/source/flatrecord/constants.go @@ -16,41 +16,9 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package engine - -// Parsing constants. -const ( - LISTSEP string = "," - EMPTY string = "" - QUOTE string = "\"" - SPACE string = " " -) - -// SysFlow object types. -const ( - TyP string = "P" - TyF string = "F" - TyC string = "C" - TyH string = "H" - TyPE string = "PE" - TyFE string = "FE" - TyFF string = "FF" - TyNF string = "NF" - TyUnknow string = "" -) -// Falco priority values. -const ( - FPriorityEmergency = "emergency" - FPriorityAlert = "alert" - FPriorityCritical = "critical" - FPriorityError = "error" - FPriorityWarning = "warning" - FPriorityNotice = "notice" - FPriorityInfo = "informational" - FPriorityDebug = "debug" -) +// Package flatrecord implements a flatrecord source for the policy compilers. +package flatrecord // SysFlow attribute names. const ( @@ -122,9 +90,25 @@ const ( SF_CONTAINER_IMAGE string = "sf.container.image" SF_CONTAINER_TYPE string = "sf.container.type" SF_CONTAINER_PRIVILEGED string = "sf.container.privileged" + SF_POD_TS string = "sf.pod.ts" + SF_POD_ID string = "sf.pod.id" + SF_POD_NAME string = "sf.pod.name" + SF_POD_NODENAME string = "sf.pod.nodename" + SF_POD_NAMESPACE string = "sf.pod.namespace" + SF_POD_RESTARTCOUNT string = "sf.pod.restartcnt" + SF_POD_HOSTIP string = "sf.pod.hostip+" + SF_POD_HOSTIP_JSON string = "sf.pod.hostip" + SF_POD_INTERNALIP string = "sf.pod.internalip+" + SF_POD_INTERNALIP_JSON string = "sf.pod.internalip" + SF_POD_SERVICES string = "sf.pod.services+" + SF_POD_SERVICES_JSON string = "sf.pod.services" + SF_K8SE_ACTION string = "sf.ke.action" + SF_K8SE_KIND string = "sf.ke.kind" + SF_K8SE_MESSAGE string = "sf.ke.message" SF_NODE_ID string = "sf.node.id" SF_NODE_IP string = "sf.node.ip" - SF_SCHEMA_VERSION string = "sf.schema" + SF_SCHEMA_VERSION string = "sf.meta.schema" + SF_TRACENAME string = "sf.meta.tracename" ) // extension proc attributes @@ -145,7 +129,7 @@ const ( EXT_PROC_SIGNED_INT = "ext.proc.signed" ) -//extension file attributes +// extension file attributes const ( EXT_FILE_SHA1_HASH_STR = "ext.file.sha1" EXT_FILE_MD5_HASH_STR = "ext.file.md5" @@ -205,62 +189,75 @@ const ( EXT_TARG_PROC_SIGNED_INT = "ext.targetproc.signed" EXT_TARG_PROC_NEW_THREAD_ID_INT = "ext.targetproc.newthreadid" +) - // Non-exported attributes (query-only) for Falco compatibility - FALCO_EVT_TYPE = "evt.type" - FALCO_EVT_RAW_RES = "evt.rawres" - FALCO_EVT_RAW_TIME = "evt.rawtime" - FALCO_EVT_DIR = "evt.dir" - FALCO_EVT_IS_OPEN_READ = "evt.is_open_read" - FALCO_EVT_IS_OPEN_WRITE = "evt.is_open_write" - FALCO_EVT_UID = "evt.arg.uid" - FALCO_FD_TYPECHAR = "fd.typechar" - FALCO_FD_DIRECTORY = "fd.directory" - FALCO_FD_NAME = "fd.name" - FALCO_FD_FILENAME = "fd.filename" - FALCO_FD_PROTO = "fd.proto" - FALCO_FD_LPROTO = "fd.lproto" - FALCO_FD_L4PROTO = "fd.l4proto" - FALCO_FD_RPROTO = "fd.rproto" - FALCO_FD_SPROTO = "fd.sproto" - FALCO_FD_CPROTO = "fd.cproto" - FALCO_FD_SPORT = "fd.sport" - FALCO_FD_DPORT = "fd.dport" - FALCO_FD_SIP = "fd.sip" - FALCO_FD_DIP = "fd.dip" - FALCO_FD_IP = "fd.ip" - FALCO_FD_PORT = "fd.port" - FALCO_FD_NUM = "fd.num" - FALCO_USER_NAME = "user.name" - FALCO_PROC_PID = "proc.pid" - FALCO_PROC_TID = "proc.tid" - FALCO_PROC_GID = "proc.gid" - FALCO_PROC_UID = "proc.uid" - FALCO_PROC_GROUP = "proc.group" - FALCO_PROC_TTY = "proc.tty" - FALCO_PROC_USER = "proc.user" - FALCO_PROC_EXE = "proc.exe" - FALCO_PROC_NAME = "proc.name" - FALCO_PROC_ARGS = "proc.args" - FALCO_PROC_CREATE_TIME = "proc.createtime" - FALCO_PROC_CMDLINE = "proc.cmdline" - FALCO_PROC_ANAME = "proc.aname" - FALCO_PROC_APID = "proc.apid" - FALCO_PROC_PPID = "proc.ppid" - FALCO_PROC_PGID = "proc.pgid" - FALCO_PROC_PUID = "proc.puid" - FALCO_PROC_PGROUP = "proc.pgroup" - FALCO_PROC_PTTY = "proc.ptty" - FALCO_PROC_PUSER = "proc.puser" - FALCO_PROC_PEXE = "proc.pexe" - FALCO_PROC_PARGS = "proc.pargs" - FALCO_PROC_PCREATE_TIME = "proc.pcreatetime" - FALCO_PROC_PNAME = "proc.pname" - FALCO_PROC_PCMDLINE = "proc.pcmdline" - FALCO_CONT_ID = "container.id" - FALCO_CONT_IMAGE_ID = "container.image.id" - FALCO_CONT_IMAGE = "container.image" - FALCO_CONT_NAME = "container.name" - FALCO_CONT_TYPE = "container.type" - FALCO_CONT_PRIVILEGED = "container.privileged" +// Non-exported attributes (query-only) for Falco compatibility +const ( + FALCO_EVT_TYPE = "evt.type" + FALCO_EVT_RAW_RES = "evt.rawres" + FALCO_EVT_RAW_TIME = "evt.rawtime" + FALCO_EVT_DIR = "evt.dir" + FALCO_EVT_IS_OPEN_READ = "evt.is_open_read" + FALCO_EVT_IS_OPEN_WRITE = "evt.is_open_write" + FALCO_EVT_UID = "evt.arg.uid" + FALCO_EVT_NAME = "evt.arg.name" + FALCO_EVT_PATH = "evt.arg.path" + FALCO_EVT_NEWPATH = "evt.arg.newpath" + FALCO_EVT_OLDPATH = "evt.arg.oldpath" + FALCO_FD_TYPECHAR = "fd.typechar" + FALCO_FD_DIRECTORY = "fd.directory" + FALCO_FD_NAME = "fd.name" + FALCO_FD_FILENAME = "fd.filename" + FALCO_FD_PROTO = "fd.proto" + FALCO_FD_LPROTO = "fd.lproto" + FALCO_FD_L4PROTO = "fd.l4proto" + FALCO_FD_RPROTO = "fd.rproto" + FALCO_FD_SPROTO = "fd.sproto" + FALCO_FD_CPROTO = "fd.cproto" + FALCO_FD_SPORT = "fd.sport" + FALCO_FD_DPORT = "fd.dport" + FALCO_FD_SIP = "fd.sip" + FALCO_FD_DIP = "fd.dip" + FALCO_FD_IP = "fd.ip" + FALCO_FD_PORT = "fd.port" + FALCO_FD_NUM = "fd.num" + FALCO_USER_NAME = "user.name" + FALCO_PROC_PID = "proc.pid" + FALCO_PROC_TID = "proc.tid" + FALCO_PROC_GID = "proc.gid" + FALCO_PROC_UID = "proc.uid" + FALCO_PROC_GROUP = "proc.group" + FALCO_PROC_TTY = "proc.tty" + FALCO_PROC_USER = "proc.user" + FALCO_PROC_EXE = "proc.exe" + FALCO_PROC_NAME = "proc.name" + FALCO_PROC_ARGS = "proc.args" + FALCO_PROC_CREATE_TIME = "proc.createtime" + FALCO_PROC_CMDLINE = "proc.cmdline" + FALCO_PROC_ANAME = "proc.aname" + FALCO_PROC_APID = "proc.apid" + FALCO_PROC_PPID = "proc.ppid" + FALCO_PROC_PGID = "proc.pgid" + FALCO_PROC_PUID = "proc.puid" + FALCO_PROC_PGROUP = "proc.pgroup" + FALCO_PROC_PTTY = "proc.ptty" + FALCO_PROC_PUSER = "proc.puser" + FALCO_PROC_PEXE = "proc.pexe" + FALCO_PROC_PARGS = "proc.pargs" + FALCO_PROC_PCREATE_TIME = "proc.pcreatetime" + FALCO_PROC_PNAME = "proc.pname" + FALCO_PROC_PCMDLINE = "proc.pcmdline" + FALCO_CONT_ID = "container.id" + FALCO_CONT_IMAGE_ID = "container.image.id" + FALCO_CONT_IMAGE_REPOSITORY = "container.image.repository" + FALCO_CONT_IMAGE = "container.image" + FALCO_CONT_NAME = "container.name" + FALCO_CONT_TYPE = "container.type" + FALCO_CONT_PRIVILEGED = "container.privileged" +) + +// Falco constants +const ( + FALCO_ENTER_EVENT = ">" + FALCO_EXIT_EVENT = "<" ) diff --git a/core/policyengine/source/flatrecord/contextualizer.go b/core/policyengine/source/flatrecord/contextualizer.go new file mode 100644 index 00000000..a5fabe9a --- /dev/null +++ b/core/policyengine/source/flatrecord/contextualizer.go @@ -0,0 +1,48 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package flatrecord implements a flatrecord source for the policy compilers. +package flatrecord + +import ( + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source" +) + +type Contextualizer struct{} + +func NewContextualizer() source.Contextualizer[*Record] { + return &Contextualizer{} +} + +func (s *Contextualizer) AddRules(r *Record, rules ...policy.Rule[*Record]) { + r.Ctx.AddRules(rules...) +} + +func (s *Contextualizer) GetRules(r *Record) []policy.Rule[*Record] { + return r.Ctx.GetRules() +} + +func (s *Contextualizer) AddTags(r *Record, tags ...string) { + r.Ctx.AddTags(tags...) +} + +func (s *Contextualizer) GetTags(r *Record) []string { + return r.Ctx.GetTags() +} diff --git a/core/policyengine/engine/fieldmapper.go b/core/policyengine/source/flatrecord/fieldmapper.go similarity index 51% rename from core/policyengine/engine/fieldmapper.go rename to core/policyengine/source/flatrecord/fieldmapper.go index 9243eb07..4f299b6d 100644 --- a/core/policyengine/engine/fieldmapper.go +++ b/core/policyengine/source/flatrecord/fieldmapper.go @@ -16,8 +16,9 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package engine + +// Package flatrecord implements a flatrecord source for the policy compilers. +package flatrecord import ( "fmt" @@ -26,9 +27,11 @@ import ( "strconv" "strings" - "github.com/cespare/xxhash" + "github.com/cespare/xxhash/v2" "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/common" + "github.com/tidwall/gjson" ) // FieldMap is a functional type denoting a SysFlow attribute mapper. @@ -47,6 +50,7 @@ const ( MapSpecialInt MappingType = 5 MapSpecialStr MappingType = 6 MapSpecialBool MappingType = 7 + MapArraySvc MappingType = 8 ) // SectionType defines a section type @@ -54,14 +58,17 @@ type SectionType uint8 // Section constants const ( - SectNone SectionType = 0 - SectProc SectionType = 1 - SectPProc SectionType = 2 - SectFile SectionType = 3 - SectNet SectionType = 4 - SectFlow SectionType = 5 - SectCont SectionType = 6 - SectNode SectionType = 7 + SectNone SectionType = 0 + SectProc SectionType = 1 + SectPProc SectionType = 2 + SectFile SectionType = 3 + SectNet SectionType = 4 + SectFlow SectionType = 5 + SectCont SectionType = 6 + SectNode SectionType = 7 + SectMeta SectionType = 8 + SectPod SectionType = 9 + SectK8sEvt SectionType = 10 ) // Attribute ID constants @@ -72,17 +79,23 @@ const ( // FieldEntry is an object that stores metadata for each field in the exported map. type FieldEntry struct { - Map FieldMap - ID sfgo.Attribute - Type MappingType - Source sfgo.Source - Section SectionType - AuxAttr RecAttribute + Map FieldMap + FlatIndex sfgo.Attribute + Type MappingType + Source sfgo.Source + Section SectionType + AuxAttr RecAttribute } // IntFieldMap is a functional type denoting a numerical attribute mapper. type IntFieldMap func(r *Record) int64 +// IntArrayFieldMap is a functional type denoting a numerical array attribute mapper. +type IntArrayFieldMap func(r *Record) *[]int64 + +// SvcArrayFieldMap is a functional type denoting a service array attribute mapper. +type SvcArrayFieldMap func(r *Record) *[]*sfgo.Service + // StrFieldMap is a functional type denoting a string attribute mapper. type StrFieldMap func(r *Record) string @@ -107,23 +120,73 @@ func (m FieldMapper) MapInt(attr string) IntFieldMap { return func(r *Record) int64 { if v, ok := m.Map(attr)(r).(int64); ok { return v - } else if v, err := strconv.ParseInt(attr, 10, 64); err == nil { + } + if v, err := strconv.ParseInt(attr, 10, 64); err == nil { return v } return sfgo.Zeros.Int64 } } +// MapIntArray retrieves a numerical array field map based on a SysFlow attribute. +func (m FieldMapper) MapIntArray(attr string) IntArrayFieldMap { + return func(r *Record) *[]int64 { + if v, ok := m.Map(attr)(r).(*[]int64); ok { + return v + } + return nil + } +} + +// MapSvcArray retrieves a numerical array field map based on a SysFlow attribute. +func (m FieldMapper) MapSvcArray(attr string) SvcArrayFieldMap { + return func(r *Record) *[]*sfgo.Service { + if v, ok := m.Map(attr)(r).(*[]*sfgo.Service); ok { + return v + } + return nil + } +} + +// cut has been introduced in go 1.18. +func cut(s string, sep string) (before string, after string, found bool) { + separatorIndex := strings.Index(s, sep) + if separatorIndex < 0 { + return + } + return s[:separatorIndex], s[separatorIndex+1:], true +} + // MapStr retrieves a string field map based on a SysFlow attribute. func (m FieldMapper) MapStr(attr string) StrFieldMap { return func(r *Record) string { - if v, ok := m.Map(attr)(r).(string); ok { - return trimBoundingQuotes(v) - } else if v, ok := m.Map(attr)(r).(int64); ok { + baseattr, jsonpath, isPathExp := cut(attr, "[") + if isPathExp { // check if baseattr is field name + _, isPathExp = m.Mappers[baseattr] + } else { + baseattr = attr + } + if isPathExp { // trim ']' + jsonpath = jsonpath[:len(jsonpath)-1] + } + + o := m.Map(baseattr)(r) + if v, ok := o.(string); ok { + if isPathExp && v != "" && jsonpath != "" { + return gjson.Get(v, jsonpath).String() + } + return common.TrimBoundingQuotes(v) + } else if v, ok := o.(int64); ok { + if baseattr == SF_PROC_TTY || baseattr == SF_PROC_ENTRY { + return strconv.FormatBool(v != 0) + } return strconv.FormatInt(v, 10) - } else if v, ok := m.Map(attr)(r).(bool); ok { + } else if v, ok := o.(int32); ok { // sf.pproc.* int fields + return strconv.FormatInt(int64(v), 10) + } else if v, ok := o.(bool); ok { // sf.pproc.tty, sf.pproc.entry field return strconv.FormatBool(v) } + return sfgo.Zeros.String } } @@ -197,84 +260,117 @@ func getMappers() map[string]*FieldEntry { } // getExportedMappers defines all mappers for exported attributes. +// A FieldEntry defines a mapped attribute for the policy engine. +// +// Map: mapping function +// FlatIndex: index in the flat record structure +// Type: mapping function return type; if "MapSpecial*", the function modifies the input data +// Source: source field in the flat record structure func getExportedMappers() map[string]*FieldEntry { return map[string]*FieldEntry{ // SysFlow - SF_TYPE: &FieldEntry{Map: mapRecType(sfgo.SYSFLOW_SRC), ID: sfgo.SF_REC_TYPE, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC}, - SF_OPFLAGS: &FieldEntry{Map: mapOpFlags(sfgo.SYSFLOW_SRC), ID: sfgo.EV_PROC_OPFLAGS_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC}, - SF_RET: &FieldEntry{Map: mapRet(sfgo.SYSFLOW_SRC), ID: sfgo.SF_REC_TYPE, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC}, - SF_TS: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.TS_INT), ID: sfgo.TS_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC}, - SF_ENDTS: &FieldEntry{Map: mapEndTs(sfgo.SYSFLOW_SRC), ID: sfgo.FL_FILE_ENDTS_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC}, - SF_PROC_OID: &FieldEntry{Map: mapOID(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT, sfgo.PROC_OID_CREATETS_INT), ID: sfgo.PROC_OID_HPID_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_PID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT), ID: sfgo.PROC_OID_HPID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_NAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.PROC_EXE_STR), ID: sfgo.PROC_EXE_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_EXE: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_EXE_STR), ID: sfgo.PROC_EXE_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_ARGS: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_EXEARGS_STR), ID: sfgo.PROC_EXEARGS_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_UID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_UID_INT), ID: sfgo.PROC_UID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_USER: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_USERNAME_STR), ID: sfgo.PROC_USERNAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_TID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.TID_INT), ID: sfgo.TID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_GID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_GID_INT), ID: sfgo.PROC_GID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_GROUP: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_GROUPNAME_STR), ID: sfgo.PROC_GROUPNAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_CREATETS: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_CREATETS_INT), ID: sfgo.PROC_OID_CREATETS_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_TTY: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_TTY_INT), ID: sfgo.PROC_TTY_INT, Type: MapBoolVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_ENTRY: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_ENTRY_INT), ID: sfgo.PROC_ENTRY_INT, Type: MapBoolVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_CMDLINE: &FieldEntry{Map: mapJoin(sfgo.SYSFLOW_SRC, sfgo.PROC_EXE_STR, sfgo.PROC_EXEARGS_STR), ID: sfgo.PROC_EXE_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, - SF_PROC_ANAME: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcAName), ID: A_IDS, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcAName}, - SF_PROC_AEXE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcAExe), ID: A_IDS, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcAExe}, - SF_PROC_ACMDLINE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcACmdLine), ID: A_IDS, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcACmdLine}, - SF_PROC_APID: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcAPID), ID: A_IDS, Type: MapArrayInt, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcAPID}, - SF_PPROC_OID: &FieldEntry{Map: mapOID(sfgo.SYSFLOW_SRC, sfgo.PROC_POID_HPID_INT, sfgo.PROC_POID_CREATETS_INT), ID: sfgo.PROC_POID_HPID_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc}, - SF_PPROC_PID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_POID_HPID_INT), ID: sfgo.PROC_POID_HPID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectPProc}, - SF_PPROC_NAME: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcName), ID: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcName}, - SF_PPROC_EXE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcExe), ID: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcExe}, - SF_PPROC_ARGS: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcArgs), ID: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcArgs}, - SF_PPROC_UID: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcUID), ID: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcUID}, - SF_PPROC_USER: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcUser), ID: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcUser}, - SF_PPROC_GID: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcGID), ID: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcGID}, - SF_PPROC_GROUP: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcGroup), ID: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcGroup}, - SF_PPROC_CREATETS: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_POID_CREATETS_INT), ID: sfgo.PROC_POID_CREATETS_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectPProc}, - SF_PPROC_TTY: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcTTY), ID: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcTTY}, - SF_PPROC_ENTRY: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcEntry), ID: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcEntry}, - SF_PPROC_CMDLINE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcCmdLine), ID: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcCmdLine}, - SF_FILE_NAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), ID: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_PATH: &FieldEntry{Map: mapPath(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), ID: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_SYMLINK: &FieldEntry{Map: mapSymlink(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), ID: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_OID: &FieldEntry{Map: mapOID(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), ID: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_DIRECTORY: &FieldEntry{Map: mapDir(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), ID: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_NEWNAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), ID: sfgo.SEC_FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_NEWPATH: &FieldEntry{Map: mapPath(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), ID: sfgo.SEC_FILE_PATH_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_NEWSYMLINK: &FieldEntry{Map: mapSymlink(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), ID: sfgo.SEC_FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_NEWOID: &FieldEntry{Map: mapOID(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), ID: sfgo.SEC_FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_NEWDIRECTORY: &FieldEntry{Map: mapDir(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), ID: sfgo.SEC_FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_TYPE: &FieldEntry{Map: mapFileType(sfgo.SYSFLOW_SRC, sfgo.FILE_RESTYPE_INT), ID: sfgo.FILE_RESTYPE_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_IS_OPEN_WRITE: &FieldEntry{Map: mapIsOpenWrite(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT), ID: sfgo.FL_FILE_OPENFLAGS_INT, Type: MapSpecialBool, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_IS_OPEN_READ: &FieldEntry{Map: mapIsOpenRead(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT), ID: sfgo.FL_FILE_OPENFLAGS_INT, Type: MapSpecialBool, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_FD: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_FD_INT), ID: sfgo.FL_FILE_FD_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_FILE_OPENFLAGS: &FieldEntry{Map: mapOpenFlags(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT), ID: sfgo.FL_FILE_OPENFLAGS_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, - SF_NET_PROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT), ID: sfgo.FL_NETW_PROTO_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, - SF_NET_SPORT: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SPORT_INT), ID: sfgo.FL_NETW_SPORT_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, - SF_NET_DPORT: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_DPORT_INT), ID: sfgo.FL_NETW_DPORT_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, - SF_NET_PORT: &FieldEntry{Map: mapPort(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SPORT_INT, sfgo.FL_NETW_DPORT_INT), ID: sfgo.FL_NETW_SPORT_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, - SF_NET_SIP: &FieldEntry{Map: mapIP(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SIP_INT), ID: sfgo.FL_NETW_SIP_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, - SF_NET_DIP: &FieldEntry{Map: mapIP(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_DIP_INT), ID: sfgo.FL_NETW_DIP_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, - SF_NET_IP: &FieldEntry{Map: mapIP(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SIP_INT, sfgo.FL_NETW_DIP_INT), ID: sfgo.FL_NETW_SIP_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, - SF_FLOW_RBYTES: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMRRECVBYTES_INT, sfgo.FL_NETW_NUMRRECVBYTES_INT), ID: sfgo.FL_FILE_NUMRRECVBYTES_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, - SF_FLOW_ROPS: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMRRECVOPS_INT, sfgo.FL_NETW_NUMRRECVOPS_INT), ID: sfgo.FL_FILE_NUMRRECVOPS_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, - SF_FLOW_WBYTES: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMWSENDBYTES_INT, sfgo.FL_NETW_NUMWSENDBYTES_INT), ID: sfgo.FL_FILE_NUMWSENDBYTES_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, - SF_FLOW_WOPS: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMWSENDOPS_INT, sfgo.FL_NETW_NUMWSENDOPS_INT), ID: sfgo.FL_FILE_NUMWSENDOPS_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, - SF_CONTAINER_ID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_ID_STR), ID: sfgo.CONT_ID_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, - SF_CONTAINER_NAME: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_NAME_STR), ID: sfgo.CONT_NAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, - SF_CONTAINER_IMAGEID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_IMAGEID_STR), ID: sfgo.CONT_IMAGEID_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, - SF_CONTAINER_IMAGE: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_IMAGE_STR), ID: sfgo.CONT_IMAGE_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, - SF_CONTAINER_TYPE: &FieldEntry{Map: mapContType(sfgo.SYSFLOW_SRC, sfgo.CONT_TYPE_INT), ID: sfgo.CONT_TYPE_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, - SF_CONTAINER_PRIVILEGED: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.CONT_PRIVILEGED_INT), ID: sfgo.CONT_PRIVILEGED_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, - SF_NODE_ID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.SFHE_EXPORTER_STR), ID: sfgo.SFHE_EXPORTER_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectNode}, - SF_NODE_IP: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.SFHE_IP_STR), ID: sfgo.SFHE_IP_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectNode}, - SF_SCHEMA_VERSION: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.SFHE_VERSION_INT), ID: sfgo.SFHE_VERSION_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectNode}, + SF_TYPE: &FieldEntry{Map: mapRecType(sfgo.SYSFLOW_SRC), FlatIndex: sfgo.SF_REC_TYPE, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC}, + SF_OPFLAGS: &FieldEntry{Map: mapOpFlags(sfgo.SYSFLOW_SRC), FlatIndex: sfgo.EV_PROC_OPFLAGS_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC}, + SF_RET: &FieldEntry{Map: mapRet(sfgo.SYSFLOW_SRC), FlatIndex: sfgo.SF_REC_TYPE, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC}, + SF_TS: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.TS_INT), FlatIndex: sfgo.TS_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC}, + SF_ENDTS: &FieldEntry{Map: mapEndTs(sfgo.SYSFLOW_SRC), FlatIndex: sfgo.FL_FILE_ENDTS_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC}, + + SF_PROC_OID: &FieldEntry{Map: mapOID(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT, sfgo.PROC_OID_CREATETS_INT), FlatIndex: sfgo.PROC_OID_HPID_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_PID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT), FlatIndex: sfgo.PROC_OID_HPID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_NAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.PROC_EXE_STR), FlatIndex: sfgo.PROC_EXE_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_EXE: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_EXE_STR), FlatIndex: sfgo.PROC_EXE_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_ARGS: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_EXEARGS_STR), FlatIndex: sfgo.PROC_EXEARGS_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_UID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_UID_INT), FlatIndex: sfgo.PROC_UID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_USER: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_USERNAME_STR), FlatIndex: sfgo.PROC_USERNAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_TID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.TID_INT), FlatIndex: sfgo.TID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_GID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_GID_INT), FlatIndex: sfgo.PROC_GID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_GROUP: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.PROC_GROUPNAME_STR), FlatIndex: sfgo.PROC_GROUPNAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_CREATETS: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_CREATETS_INT), FlatIndex: sfgo.PROC_OID_CREATETS_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_TTY: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_TTY_INT), FlatIndex: sfgo.PROC_TTY_INT, Type: MapBoolVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_ENTRY: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_ENTRY_INT), FlatIndex: sfgo.PROC_ENTRY_INT, Type: MapBoolVal, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_CMDLINE: &FieldEntry{Map: mapJoin(sfgo.SYSFLOW_SRC, sfgo.PROC_EXE_STR, sfgo.PROC_EXEARGS_STR), FlatIndex: sfgo.PROC_EXE_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc}, + SF_PROC_ANAME: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcAName), FlatIndex: A_IDS, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcAName}, + SF_PROC_AEXE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcAExe), FlatIndex: A_IDS, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcAExe}, + SF_PROC_ACMDLINE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcACmdLine), FlatIndex: A_IDS, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcACmdLine}, + SF_PROC_APID: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, ProcAPID), FlatIndex: A_IDS, Type: MapArrayInt, Source: sfgo.SYSFLOW_SRC, Section: SectProc, AuxAttr: ProcAPID}, + + SF_PPROC_OID: &FieldEntry{Map: mapOID(sfgo.SYSFLOW_SRC, sfgo.PROC_POID_HPID_INT, sfgo.PROC_POID_CREATETS_INT), FlatIndex: sfgo.PROC_POID_HPID_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc}, + SF_PPROC_PID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_POID_HPID_INT), FlatIndex: sfgo.PROC_POID_HPID_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectPProc}, + SF_PPROC_NAME: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcName), FlatIndex: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcName}, + SF_PPROC_EXE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcExe), FlatIndex: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcExe}, + SF_PPROC_ARGS: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcArgs), FlatIndex: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcArgs}, + SF_PPROC_UID: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcUID), FlatIndex: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcUID}, + SF_PPROC_USER: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcUser), FlatIndex: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcUser}, + SF_PPROC_GID: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcGID), FlatIndex: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcGID}, + SF_PPROC_GROUP: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcGroup), FlatIndex: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcGroup}, + SF_PPROC_CREATETS: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_POID_CREATETS_INT), FlatIndex: sfgo.PROC_POID_CREATETS_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectPProc}, + SF_PPROC_TTY: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcTTY), FlatIndex: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcTTY}, + SF_PPROC_ENTRY: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcEntry), FlatIndex: PARENT_IDS, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcEntry}, + SF_PPROC_CMDLINE: &FieldEntry{Map: mapCachedValue(sfgo.SYSFLOW_SRC, PProcCmdLine), FlatIndex: PARENT_IDS, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectPProc, AuxAttr: PProcCmdLine}, + + SF_FILE_NAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), FlatIndex: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_PATH: &FieldEntry{Map: mapPath(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), FlatIndex: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_SYMLINK: &FieldEntry{Map: mapSymlink(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), FlatIndex: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_OID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.FILE_OID_STR), FlatIndex: sfgo.FILE_OID_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_DIRECTORY: &FieldEntry{Map: mapDir(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR), FlatIndex: sfgo.FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_NEWNAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), FlatIndex: sfgo.SEC_FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_NEWPATH: &FieldEntry{Map: mapPath(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), FlatIndex: sfgo.SEC_FILE_PATH_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_NEWSYMLINK: &FieldEntry{Map: mapSymlink(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), FlatIndex: sfgo.SEC_FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_NEWOID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_OID_STR), FlatIndex: sfgo.SEC_FILE_OID_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_NEWDIRECTORY: &FieldEntry{Map: mapDir(sfgo.SYSFLOW_SRC, sfgo.SEC_FILE_PATH_STR), FlatIndex: sfgo.SEC_FILE_PATH_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_TYPE: &FieldEntry{Map: mapFileType(sfgo.SYSFLOW_SRC, sfgo.FILE_RESTYPE_INT), FlatIndex: sfgo.FILE_RESTYPE_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_IS_OPEN_WRITE: &FieldEntry{Map: mapIsOpenWrite(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT), FlatIndex: sfgo.FL_FILE_OPENFLAGS_INT, Type: MapSpecialBool, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_IS_OPEN_READ: &FieldEntry{Map: mapIsOpenRead(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT), FlatIndex: sfgo.FL_FILE_OPENFLAGS_INT, Type: MapSpecialBool, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_FD: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_FD_INT), FlatIndex: sfgo.FL_FILE_FD_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + SF_FILE_OPENFLAGS: &FieldEntry{Map: mapOpenFlags(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT), FlatIndex: sfgo.FL_FILE_OPENFLAGS_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectFile}, + + SF_NET_PROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT), FlatIndex: sfgo.FL_NETW_PROTO_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, + SF_NET_SPORT: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SPORT_INT), FlatIndex: sfgo.FL_NETW_SPORT_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, + SF_NET_DPORT: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_DPORT_INT), FlatIndex: sfgo.FL_NETW_DPORT_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, + SF_NET_PORT: &FieldEntry{Map: mapPort(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SPORT_INT, sfgo.FL_NETW_DPORT_INT), FlatIndex: sfgo.FL_NETW_SPORT_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, + SF_NET_SIP: &FieldEntry{Map: mapIP(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SIP_INT), FlatIndex: sfgo.FL_NETW_SIP_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, + SF_NET_DIP: &FieldEntry{Map: mapIP(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_DIP_INT), FlatIndex: sfgo.FL_NETW_DIP_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, + SF_NET_IP: &FieldEntry{Map: mapIP(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_SIP_INT, sfgo.FL_NETW_DIP_INT), FlatIndex: sfgo.FL_NETW_SIP_INT, Type: MapArrayStr, Source: sfgo.SYSFLOW_SRC, Section: SectNet}, + + SF_FLOW_RBYTES: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMRRECVBYTES_INT, sfgo.FL_NETW_NUMRRECVBYTES_INT), FlatIndex: sfgo.FL_FILE_NUMRRECVBYTES_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, + SF_FLOW_ROPS: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMRRECVOPS_INT, sfgo.FL_NETW_NUMRRECVOPS_INT), FlatIndex: sfgo.FL_FILE_NUMRRECVOPS_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, + SF_FLOW_WBYTES: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMWSENDBYTES_INT, sfgo.FL_NETW_NUMWSENDBYTES_INT), FlatIndex: sfgo.FL_FILE_NUMWSENDBYTES_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, + SF_FLOW_WOPS: &FieldEntry{Map: mapSum(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_NUMWSENDOPS_INT, sfgo.FL_NETW_NUMWSENDOPS_INT), FlatIndex: sfgo.FL_FILE_NUMWSENDOPS_INT, Type: MapSpecialInt, Source: sfgo.SYSFLOW_SRC, Section: SectFlow}, + + SF_CONTAINER_ID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_ID_STR), FlatIndex: sfgo.CONT_ID_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, + SF_CONTAINER_NAME: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_NAME_STR), FlatIndex: sfgo.CONT_NAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, + SF_CONTAINER_IMAGEID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_IMAGEID_STR), FlatIndex: sfgo.CONT_IMAGEID_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, + SF_CONTAINER_IMAGE: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.CONT_IMAGE_STR), FlatIndex: sfgo.CONT_IMAGE_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, + SF_CONTAINER_TYPE: &FieldEntry{Map: mapContType(sfgo.SYSFLOW_SRC, sfgo.CONT_TYPE_INT), FlatIndex: sfgo.CONT_TYPE_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, + SF_CONTAINER_PRIVILEGED: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.CONT_PRIVILEGED_INT), FlatIndex: sfgo.CONT_PRIVILEGED_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectCont}, + + SF_POD_TS: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.POD_TS_INT), FlatIndex: sfgo.POD_TS_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_ID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.POD_ID_STR), FlatIndex: sfgo.POD_ID_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_NAME: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.POD_NAME_STR), FlatIndex: sfgo.POD_NAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_NODENAME: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.POD_NODENAME_STR), FlatIndex: sfgo.POD_NODENAME_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_NAMESPACE: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.POD_NAMESPACE_STR), FlatIndex: sfgo.POD_NAMESPACE_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_RESTARTCOUNT: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.POD_RESTARTCOUNT_INT), FlatIndex: sfgo.POD_RESTARTCOUNT_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_HOSTIP_JSON: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.POD_HOSTIP_STR), FlatIndex: sfgo.POD_HOSTIP_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectNone}, + SF_POD_HOSTIP: &FieldEntry{Map: mapIntArray(sfgo.SYSFLOW_SRC, sfgo.POD_HOSTIP_ANY), FlatIndex: sfgo.POD_HOSTIP_ANY, Type: MapArrayInt, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_INTERNALIP_JSON: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.POD_INTERNALIP_STR), FlatIndex: sfgo.POD_INTERNALIP_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectNone}, + SF_POD_INTERNALIP: &FieldEntry{Map: mapIntArray(sfgo.SYSFLOW_SRC, sfgo.POD_INTERNALIP_ANY), FlatIndex: sfgo.POD_INTERNALIP_ANY, Type: MapArrayInt, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + SF_POD_SERVICES_JSON: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.POD_SERVICES_STR), FlatIndex: sfgo.POD_SERVICES_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectNone}, + SF_POD_SERVICES: &FieldEntry{Map: mapSvcArray(sfgo.SYSFLOW_SRC, sfgo.POD_SERVICES_ANY), FlatIndex: sfgo.POD_SERVICES_ANY, Type: MapArraySvc, Source: sfgo.SYSFLOW_SRC, Section: SectPod}, + + SF_K8SE_ACTION: &FieldEntry{Map: mapAction(sfgo.SYSFLOW_SRC, sfgo.K8SE_ACTION_INT), FlatIndex: sfgo.K8SE_ACTION_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectK8sEvt}, + SF_K8SE_KIND: &FieldEntry{Map: mapKind(sfgo.SYSFLOW_SRC, sfgo.K8SE_KIND_INT), FlatIndex: sfgo.K8SE_KIND_INT, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectK8sEvt}, + SF_K8SE_MESSAGE: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.K8SE_MESSAGE_STR), FlatIndex: sfgo.K8SE_MESSAGE_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectK8sEvt}, + + SF_NODE_ID: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.SFHE_EXPORTER_STR), FlatIndex: sfgo.SFHE_EXPORTER_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectNode}, + SF_NODE_IP: &FieldEntry{Map: mapStr(sfgo.SYSFLOW_SRC, sfgo.SFHE_IP_STR), FlatIndex: sfgo.SFHE_IP_STR, Type: MapStrVal, Source: sfgo.SYSFLOW_SRC, Section: SectNode}, + + SF_SCHEMA_VERSION: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.SFHE_VERSION_INT), FlatIndex: sfgo.SFHE_VERSION_INT, Type: MapIntVal, Source: sfgo.SYSFLOW_SRC, Section: SectMeta}, + SF_TRACENAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.SFHE_FILENAME_STR), FlatIndex: sfgo.SFHE_FILENAME_STR, Type: MapSpecialStr, Source: sfgo.SYSFLOW_SRC, Section: SectMeta}, } } // getExtendedMappers defines all mappers for extended attributes. +// nolint func getExtendedMappers() map[string]FieldMap { return map[string]FieldMap{ //Ext processes @@ -355,17 +451,17 @@ func getNonExportedMappers() map[string]*FieldEntry { FALCO_EVT_TYPE: &FieldEntry{Map: mapOpFlags(sfgo.SYSFLOW_SRC)}, FALCO_EVT_RAW_RES: &FieldEntry{Map: mapRecType(sfgo.SYSFLOW_SRC)}, FALCO_EVT_RAW_TIME: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.TS_INT)}, - FALCO_EVT_DIR: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT)}, + FALCO_EVT_DIR: &FieldEntry{Map: mapConsts(FALCO_ENTER_EVENT, FALCO_EXIT_EVENT)}, FALCO_EVT_IS_OPEN_READ: &FieldEntry{Map: mapIsOpenRead(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT)}, FALCO_EVT_IS_OPEN_WRITE: &FieldEntry{Map: mapIsOpenWrite(sfgo.SYSFLOW_SRC, sfgo.FL_FILE_OPENFLAGS_INT)}, FALCO_EVT_UID: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_UID_INT)}, FALCO_FD_TYPECHAR: &FieldEntry{Map: mapFileType(sfgo.SYSFLOW_SRC, sfgo.FILE_RESTYPE_INT)}, - FALCO_FD_DIRECTORY: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT)}, - FALCO_FD_NAME: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT)}, - FALCO_FD_FILENAME: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.PROC_OID_HPID_INT)}, - FALCO_FD_PROTO: &FieldEntry{Map: mapDir(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR)}, - FALCO_FD_LPROTO: &FieldEntry{Map: mapDir(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR)}, - FALCO_FD_L4PROTO: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR)}, + FALCO_FD_DIRECTORY: &FieldEntry{Map: mapDir(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR)}, + FALCO_FD_NAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR)}, + FALCO_FD_FILENAME: &FieldEntry{Map: mapName(sfgo.SYSFLOW_SRC, sfgo.FILE_PATH_STR)}, + FALCO_FD_PROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT)}, + FALCO_FD_LPROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT)}, + FALCO_FD_L4PROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT)}, FALCO_FD_RPROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT)}, FALCO_FD_SPROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT)}, FALCO_FD_CPROTO: &FieldEntry{Map: mapInt(sfgo.SYSFLOW_SRC, sfgo.FL_NETW_PROTO_INT)}, @@ -419,6 +515,14 @@ func mapInt(src sfgo.Source, attr sfgo.Attribute) FieldMap { return func(r *Record) interface{} { return r.GetInt(attr, src) } } +func mapIntArray(src sfgo.Source, attr sfgo.Attribute) FieldMap { + return func(r *Record) interface{} { return r.GetIntArray(attr, src) } +} + +func mapSvcArray(src sfgo.Source, attr sfgo.Attribute) FieldMap { + return func(r *Record) interface{} { return r.GetSvcArray(attr, src) } +} + func mapSum(src sfgo.Source, attrs ...sfgo.Attribute) FieldMap { return func(r *Record) interface{} { var sum int64 = 0 @@ -433,7 +537,7 @@ func mapJoin(src sfgo.Source, attrs ...sfgo.Attribute) FieldMap { return func(r *Record) interface{} { var join string = r.GetStr(attrs[0], src) for _, attr := range attrs[1:] { - join += SPACE + r.GetStr(attr, src) + join += common.SPACE + r.GetStr(attr, src) } return join } @@ -441,15 +545,25 @@ func mapJoin(src sfgo.Source, attrs ...sfgo.Attribute) FieldMap { func mapRecType(src sfgo.Source) FieldMap { return func(r *Record) interface{} { - return GetRecType(r, src) + rtype, _ := sfgo.ParseRecordType(r.GetInt(sfgo.SF_REC_TYPE, src)) + return rtype.String() } } func mapOpFlags(src sfgo.Source) FieldMap { return func(r *Record) interface{} { opflags := r.GetInt(sfgo.EV_PROC_OPFLAGS_INT, src) - rtype := mapRecType(src)(r).(string) - return strings.Join(sfgo.GetOpFlags(int32(opflags), rtype), LISTSEP) + rtype, _ := sfgo.ParseRecordType(r.GetInt(sfgo.SF_REC_TYPE, src)) + return strings.Join(sfgo.GetOpFlags(int32(opflags), rtype), common.LISTSEP) + } +} + +// nolint +func mapEvtType(src sfgo.Source) FieldMap { + return func(r *Record) interface{} { + opflags := r.GetInt(sfgo.EV_PROC_OPFLAGS_INT, src) + rtype, _ := sfgo.ParseRecordType(r.GetInt(sfgo.SF_REC_TYPE, src)) + return strings.Join(sfgo.GetEvtTypes(int32(opflags), rtype), common.LISTSEP) } } @@ -473,18 +587,18 @@ func mapEndTs(src sfgo.Source) FieldMap { return r.GetInt(sfgo.FL_FILE_ENDTS_INT, src) case sfgo.NET_FLOW: return r.GetInt(sfgo.FL_NETW_ENDTS_INT, src) + case sfgo.K8S_EVT: + return r.GetInt(sfgo.TS_INT, src) default: return sfgo.Zeros.Int64 } } } +// nolint func mapEntry(src sfgo.Source, attr sfgo.Attribute) FieldMap { return func(r *Record) interface{} { - if r.GetInt(attr, src) == 1 { - return true - } - return false + return r.GetInt(attr, src) == 1 } } @@ -500,6 +614,13 @@ func mapDir(src sfgo.Source, attr sfgo.Attribute) FieldMap { } } +// nolint +func mapRepo(src sfgo.Source, attr sfgo.Attribute) FieldMap { + return func(r *Record) interface{} { + return strings.Split(r.GetStr(attr, src), ":")[0] + } +} + func mapPath(src sfgo.Source, attr sfgo.Attribute) FieldMap { return func(r *Record) interface{} { path, _ := parseSymPath(src, attr, r) @@ -522,28 +643,23 @@ func mapFileType(src sfgo.Source, attr sfgo.Attribute) FieldMap { func mapIsOpenWrite(src sfgo.Source, attr sfgo.Attribute) FieldMap { return func(r *Record) interface{} { - if sfgo.IsOpenWrite(r.GetInt(attr, src)) { - return true - } - return false + return sfgo.IsOpenWrite(r.GetInt(attr, src)) } } func mapIsOpenRead(src sfgo.Source, attr sfgo.Attribute) FieldMap { return func(r *Record) interface{} { - if sfgo.IsOpenRead(r.GetInt(attr, src)) { - return true - } - return false + return sfgo.IsOpenRead(r.GetInt(attr, src)) } } func mapOpenFlags(src sfgo.Source, attr sfgo.Attribute) FieldMap { return func(r *Record) interface{} { - return strings.Join(sfgo.GetOpenFlags(r.GetInt(attr, src)), LISTSEP) + return strings.Join(sfgo.GetOpenFlags(r.GetInt(attr, src)), common.LISTSEP) } } +// nolint func mapProto(src sfgo.Source, attr sfgo.Attribute) FieldMap { return func(r *Record) interface{} { return sfgo.GetProto(r.GetInt(attr, src)) @@ -556,8 +672,7 @@ func mapPort(src sfgo.Source, attrs ...sfgo.Attribute) FieldMap { for _, attr := range attrs { ports = append(ports, strconv.FormatInt(r.GetInt(attr, src), 10)) } - // logger.Info.Println(ports) - return strings.Join(ports, LISTSEP) + return strings.Join(ports, common.LISTSEP) } } @@ -567,8 +682,7 @@ func mapIP(src sfgo.Source, attrs ...sfgo.Attribute) FieldMap { for _, attr := range attrs { ips = append(ips, sfgo.GetIPStr(int32(r.GetInt(attr, src)))) } - // logger.Info.Println(ips) - return strings.Join(ips, LISTSEP) + return strings.Join(ips, common.LISTSEP) } } @@ -578,6 +692,18 @@ func mapContType(src sfgo.Source, attr sfgo.Attribute) FieldMap { } } +func mapAction(src sfgo.Source, attr sfgo.Attribute) FieldMap { + return func(r *Record) interface{} { + return sfgo.K8sAction(r.GetInt(attr, src)).String() + } +} + +func mapKind(src sfgo.Source, attr sfgo.Attribute) FieldMap { + return func(r *Record) interface{} { + return sfgo.K8sComponent(r.GetInt(attr, src)).String() + } +} + func mapCachedValue(src sfgo.Source, attr RecAttribute) FieldMap { return func(r *Record) interface{} { oid := sfgo.OID{CreateTS: r.GetInt(sfgo.PROC_OID_CREATETS_INT, src), Hpid: r.GetInt(sfgo.PROC_OID_HPID_INT, src)} @@ -595,6 +721,13 @@ func mapOID(src sfgo.Source, attrs ...sfgo.Attribute) FieldMap { } } +func mapConsts(consts ...string) FieldMap { + return func(r *Record) interface{} { + return strings.Join(consts, common.LISTSEP) + } +} + +// nolint func mapNa(attr string) FieldMap { return func(r *Record) interface{} { logger.Warn.Println("Attribute not supported ", attr) diff --git a/core/policyengine/source/flatrecord/operations.go b/core/policyengine/source/flatrecord/operations.go new file mode 100644 index 00000000..cea63e9c --- /dev/null +++ b/core/policyengine/source/flatrecord/operations.go @@ -0,0 +1,136 @@ +// +// Copyright (C) 2022 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package flatrecord implements a flatrecord source for the policy compilers. +package flatrecord + +import ( + "reflect" + "regexp" + "strings" + + "github.com/pkg/errors" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/common" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source" +) + +type Operations struct { + strOps source.StrOps + intOps source.IntOps[int64] +} + +func NewOperations() source.Operations[*Record] { + return &Operations{strOps: source.StrOps{}, intOps: source.IntOps[int64]{}} +} + +// Exists creates a criterion for an existential predicate. +func (op *Operations) Exists(attr string) (policy.Criterion[*Record], error) { + m := Mapper.Map(attr) + p := func(r *Record) bool { return !reflect.ValueOf(m(r)).IsZero() } + return policy.Criterion[*Record]{Pred: p}, nil +} + +// Compare creates a criterion for a binary predicate. +func (op *Operations) Compare(lattr string, rattr string, operator source.Operator) (policy.Criterion[*Record], error) { + switch operator { + case source.Lt, source.LEq, source.Gt, source.GEq: + return op.compareInt(lattr, rattr, operator) + } + return op.compareStr(lattr, rattr, operator) +} + +// compareStr creates a criterion for a binary predicate over strings. +func (op *Operations) compareStr(lattr string, rattr string, operator source.Operator) (policy.Criterion[*Record], error) { + ml := Mapper.MapStr(lattr) + mr := Mapper.MapStr(rattr) + o, _ := op.strOps.OpFunc(operator) + p := func(r *Record) bool { return compareStr(ml(r), mr(r), o) } + return policy.Criterion[*Record]{Pred: p}, nil +} + +// compareInt creates a criterion for a binary predicate over integers. +func (op *Operations) compareInt(lattr string, rattr string, operator source.Operator) (policy.Criterion[*Record], error) { + ml := Mapper.MapInt(lattr) + mr := Mapper.MapInt(rattr) + o, _ := op.intOps.OpFunc(operator) + p := func(r *Record) bool { return compareInt(ml(r), mr(r), o) } + return policy.Criterion[*Record]{Pred: p}, nil +} + +// FoldAny creates a disjunctive criterion for a binary predicate over a list of strings. +func (op *Operations) FoldAny(attr string, list []string, operator source.Operator) (policy.Criterion[*Record], error) { + m := Mapper.MapStr(attr) + o, _ := op.strOps.OpFunc(operator) + p := func(r *Record) bool { + for _, v := range list { + if compareStr(m(r), v, o) { + return true + } + } + return false + } + return policy.Criterion[*Record]{Pred: p}, nil +} + +// FoldAll creates a conjunctive criterion for a binary predicate over a list of strings. +func (op *Operations) FoldAll(attr string, list []string, operator source.Operator) (policy.Criterion[*Record], error) { + m := Mapper.MapStr(attr) + o, _ := op.strOps.OpFunc(operator) + p := func(r *Record) bool { + for _, v := range list { + if !compareStr(m(r), v, o) { + return false + } + } + return true + } + return policy.Criterion[*Record]{Pred: p}, nil +} + +// RegExp creates a criterion for a regular-expression predicate. +func (op *Operations) RegExp(attr string, re string) (policy.Criterion[*Record], error) { + m := Mapper.MapStr(attr) + if regexp, err := regexp.Compile(re); err == nil { + p := func(r *Record) bool { + return regexp.FindString(m(r)) != "" + } + return policy.Criterion[*Record]{Pred: p}, nil + } + return policy.False[*Record](), errors.Errorf("could not compile regular expression %s", re) +} + +// compareStr compares two string values based on an operator. +func compareStr(l string, r string, op source.OpFunc[string]) bool { + lattrs := strings.Split(l, common.LISTSEP) + rattrs := strings.Split(r, common.LISTSEP) + for _, lattr := range lattrs { + for _, rattr := range rattrs { + if op(lattr, rattr) { + return true + } + } + } + return false +} + +// compareInt compares two int64 values based on an operator. +func compareInt(l int64, r int64, op source.OpFunc[int64]) bool { + return op(l, r) +} diff --git a/core/policyengine/source/flatrecord/prefilter.go b/core/policyengine/source/flatrecord/prefilter.go new file mode 100644 index 00000000..7760240c --- /dev/null +++ b/core/policyengine/source/flatrecord/prefilter.go @@ -0,0 +1,46 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package flatrecord implements a flatrecord source for the policy compilers. +package flatrecord + +import ( + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source" +) + +// Prefilter defines a prefilter object +type Prefilter struct{} + +func NewPrefilter() source.Prefilter[*Record] { + return &Prefilter{} +} + +func (s *Prefilter) IsApplicable(r *Record, rule policy.Rule[*Record]) bool { + if rule.Prefilter == nil || len(rule.Prefilter) == 0 { + return true + } + rtype := Mapper.MapStr(SF_TYPE)(r) + for _, pf := range rule.Prefilter { + if rtype == pf { + return true + } + } + return false +} diff --git a/core/policyengine/engine/types.go b/core/policyengine/source/flatrecord/record.go similarity index 55% rename from core/policyengine/engine/types.go rename to core/policyengine/source/flatrecord/record.go index c8c7b735..5936dd61 100644 --- a/core/policyengine/engine/types.go +++ b/core/policyengine/source/flatrecord/record.go @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +17,9 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package engine + +// Package flatrecord implements a flatrecord source for the policy compilers. +package flatrecord import ( "path/filepath" @@ -25,97 +27,24 @@ import ( "strings" "github.com/sysflow-telemetry/sf-apis/go/sfgo" - "github.ibm.com/sysflow/sf-processor/core/cache" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/common" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" ) -// Action type for enumeration. -type Action int - -// Action enumeration. -const ( - Alert Action = iota - Tag - Hash -) - -// String returns the string representation of an action instance. -func (a Action) String() string { - return [...]string{"alert", "tag", "hash"}[a] -} - -// EnrichmentTag denotes the type for enrichment tags. -type EnrichmentTag interface{} - -// Priority denotes the type for rule priority. -type Priority int - -// Priority enumeration. -const ( - Low Priority = iota - Medium - High -) - -// String returns the string representation of a priority instance. -func (p Priority) String() string { - return [...]string{"low", "medium", "high"}[p] -} - -// Rule type -type Rule struct { - Name string - Desc string - condition Criterion - Actions []Action - Tags []EnrichmentTag - Priority Priority - Prefilter []string - Enabled bool -} - -func (s Rule) isApplicable(r *Record) bool { - if len(s.Prefilter) == 0 { - return true - } - rtype := Mapper.MapStr(SF_TYPE)(r) - for _, pf := range s.Prefilter { - if rtype == pf { - return true - } - } - return false -} - -// Filter type -type Filter struct { - Name string - condition Criterion - Enabled bool -} - // Record type type Record struct { - Fr sfgo.FlatRecord - Cr *cache.SFTables - Ptree map[sfgo.OID][]*sfgo.Process - Ctx Context + Fr *sfgo.FlatRecord + Ctx Context } // NewRecord creates a new Record isntance. -func NewRecord(fr sfgo.FlatRecord, cr *cache.SFTables) *Record { +func NewRecord(fr *sfgo.FlatRecord) *Record { var r = new(Record) r.Fr = fr - r.Cr = cr - r.Ptree = make(map[sfgo.OID][]*sfgo.Process) - r.Ctx = make(Context, 3) + r.Ctx = make(Context, 4) return r } -// RecordChannel type -type RecordChannel struct { - In chan *Record -} - // RecAttribute denotes a record attribute enumeration. type RecAttribute int8 @@ -147,125 +76,123 @@ func (r Record) GetInt(attr sfgo.Attribute, src sfgo.Source) int64 { return sfgo.Zeros.Int64 } -// GetStr returns a string value from internal flat record. -func (r Record) GetStr(attr sfgo.Attribute, src sfgo.Source) string { +// GetIntArray returns an integer array ptr value from internal flat record. +func (r Record) GetIntArray(attr sfgo.Attribute, src sfgo.Source) *[]int64 { for idx, s := range r.Fr.Sources { if s == src { - return r.Fr.Strs[idx][attr] + if v, ok := r.Fr.Anys[idx][attr].(*[]int64); ok { + return v + } + return nil } } - return sfgo.Zeros.String -} - -// GetProc returns a process object by ID. -func (r Record) GetProc(ID sfgo.OID) *sfgo.Process { - return r.Cr.GetProc(ID) + return nil } -func (r Record) getProcProv(ID sfgo.OID) []*sfgo.Process { - var ptree = make([]*sfgo.Process, 0) - if p := r.Cr.GetProc(ID); p != nil && p.Poid != nil && p.Poid.UnionType == sfgo.UnionNullOIDTypeEnumOID { - return append(append(ptree, p), r.getProcProv(*p.Poid.OID)...) +// GetSvcArray returns a service array ptr value from internal flat record. +func (r Record) GetSvcArray(attr sfgo.Attribute, src sfgo.Source) *[]*sfgo.Service { + for idx, s := range r.Fr.Sources { + if s == src { + if v, ok := r.Fr.Anys[idx][attr].(*[]*sfgo.Service); ok { + return v + } + return nil + } } - return ptree + return nil } -// MemoizePtree caches the processes hierachy given the ID. -func (r Record) MemoizePtree(ID sfgo.OID) []*sfgo.Process { - if ptree, ok := r.Ptree[ID]; ok { - return ptree +// GetStr returns a string value from internal flat record. +func (r Record) GetStr(attr sfgo.Attribute, src sfgo.Source) string { + for idx, s := range r.Fr.Sources { + if s == src { + return r.Fr.Strs[idx][attr] + } } - r.Ptree[ID] = r.getProcProv(ID) - return r.Ptree[ID] + return sfgo.Zeros.String } // GetCachedValue returns the value of attr from cache for process ID. func (r Record) GetCachedValue(ID sfgo.OID, attr RecAttribute) interface{} { - if ptree := r.MemoizePtree(ID); ptree != nil { + if ptree := r.Fr.Ptree; ptree != nil { switch attr { case PProcName: if len(ptree) > 1 { return filepath.Base(ptree[1].Exe) } - break case PProcExe: if len(ptree) > 1 { return ptree[1].Exe } - break case PProcArgs: if len(ptree) > 1 { return ptree[1].ExeArgs } - break case PProcUID: if len(ptree) > 1 { return ptree[1].Uid } - break case PProcUser: if len(ptree) > 1 { return ptree[1].UserName } - break case PProcGID: if len(ptree) > 1 { return ptree[1].Gid } - break case PProcGroup: if len(ptree) > 1 { return ptree[1].GroupName } - break case PProcTTY: if len(ptree) > 1 { return ptree[1].Tty } - break case PProcEntry: if len(ptree) > 1 { return ptree[1].Entry } - break case PProcCmdLine: if len(ptree) > 1 { if len(ptree[1].ExeArgs) > 0 { - return ptree[1].Exe + SPACE + ptree[1].ExeArgs + return ptree[1].Exe + common.SPACE + ptree[1].ExeArgs } return ptree[1].Exe } - break case ProcAName: var s []string for _, p := range ptree { s = append(s, filepath.Base(p.Exe)) } - return strings.Join(s, LISTSEP) + return strings.Join(s, common.LISTSEP) case ProcAExe: var s []string for _, p := range ptree { s = append(s, p.Exe) } - return strings.Join(s, LISTSEP) + return strings.Join(s, common.LISTSEP) case ProcACmdLine: var s []string for _, p := range ptree { if len(p.ExeArgs) > 0 { - s = append(s, p.Exe+SPACE+p.ExeArgs) + s = append(s, p.Exe+common.SPACE+p.ExeArgs) } else { s = append(s, p.Exe) } } - return strings.Join(s, LISTSEP) + return strings.Join(s, common.LISTSEP) case ProcAPID: var s []string for _, p := range ptree { s = append(s, strconv.FormatInt(p.Oid.Hpid, 10)) } - return strings.Join(s, LISTSEP) + return strings.Join(s, common.LISTSEP) } } + switch attr { + case PProcUID, PProcGID, PProcTTY, PProcEntry: + return sfgo.Zeros.Int64 + } return sfgo.Zeros.String } @@ -277,23 +204,37 @@ type contextKey int // ContextKey enum const ( - ruleCtxKey contextKey = iota + alertCtxKey contextKey = iota + ruleCtxKey tagCtxKey hashCtxKey ) -// AddRule stores add a rule instance to the set of rules matching a record. -func (s Context) AddRule(r Rule) { +func (s Context) IsAlert() bool { + if s[alertCtxKey] != nil { + return s[alertCtxKey].(bool) + } + return false +} + +func (s Context) SetAlert(isAlert bool) { + s[alertCtxKey] = isAlert +} + +// AddRule adds a rule instance to the set of rules matching a record. +func (s Context) AddRules(rules ...policy.Rule[*Record]) { if s[ruleCtxKey] == nil { - s[ruleCtxKey] = make([]Rule, 0) + s[ruleCtxKey] = make([]policy.Rule[*Record], 0) + } + for _, r := range rules { + s[ruleCtxKey] = append(s[ruleCtxKey].([]policy.Rule[*Record]), r) } - s[ruleCtxKey] = append(s[ruleCtxKey].([]Rule), r) } // GetRules retrieves the list of stored rules associated with a record context. -func (s Context) GetRules() []Rule { +func (s Context) GetRules() []policy.Rule[*Record] { if s[ruleCtxKey] != nil { - return s[ruleCtxKey].([]Rule) + return s[ruleCtxKey].([]policy.Rule[*Record]) } return nil } @@ -303,6 +244,16 @@ func (s Context) SetTags(tags []string) { s[tagCtxKey] = tags } +// Adds tags to context object. +func (s Context) AddTags(tags ...string) { + if s[tagCtxKey] == nil { + s[tagCtxKey] = make([]string, 0) + } + for _, tag := range tags { + s[tagCtxKey] = append(s[tagCtxKey].([]string), tag) + } +} + // GetTags retrieves hashes from context object. func (s Context) GetTags() []string { if s[tagCtxKey] != nil { @@ -311,24 +262,35 @@ func (s Context) GetTags() []string { return nil } -// SetHashes stores hashes into context object. -func (s Context) SetHashes(h HashSet) { - s[hashCtxKey] = h +func (s Context) GetHash(ht HashType) *HashSet { + if s[hashCtxKey] == nil { + return nil + } + hpa := s[hashCtxKey].([]*HashSet) + return hpa[ht] } -// GetHashes retrieves hashes from context object. -func (s Context) GetHashes() HashSet { - if s[hashCtxKey] != nil { - return s[hashCtxKey].(HashSet) +// Adds a hash set to context object. +func (s Context) SetHashes(ht HashType, hs *HashSet) { + if s[hashCtxKey] == nil { + s[hashCtxKey] = make([]*HashSet, 2) + } + hpa := s[hashCtxKey].([]*HashSet) + + if hpa[ht] == nil { + hpa[ht] = hs } - return HashSet{} } -// HashSet type +type HashType uint + +const ( + HASH_TYPE_PROC HashType = iota + HASH_TYPE_FILE +) + type HashSet struct { - MD5 string - SHA1 string - SHA256 string - Size int - UpdateTs int64 + Md5 string `json:"md5,omitempty"` + Sha1 string `json:"sha1,omitempty"` + Sha256 string `json:"sha256,omitempty"` } diff --git a/core/policyengine/engine/utils.go b/core/policyengine/source/flatrecord/utils.go similarity index 67% rename from core/policyengine/engine/utils.go rename to core/policyengine/source/flatrecord/utils.go index bc8705eb..c736868b 100644 --- a/core/policyengine/engine/utils.go +++ b/core/policyengine/source/flatrecord/utils.go @@ -16,48 +16,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package engine + +// Package flatrecord implements a flatrecord source for the policy compilers. +package flatrecord import ( "fmt" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" ) -func trimBoundingQuotes(s string) string { - if len(s) > 0 && (s[0] == '"' || s[0] == '\'') { - s = s[1:] - } - if len(s) > 0 && (s[len(s)-1] == '"' || s[len(s)-1] == '\'') { - s = s[:len(s)-1] - } - return s -} - -// GetRecType returns the record type of the record -func GetRecType(r *Record, src sfgo.Source) string { - switch r.GetInt(sfgo.SF_REC_TYPE, src) { - case sfgo.PROC: - return TyP - case sfgo.FILE: - return TyF - case sfgo.CONT: - return TyC - case sfgo.PROC_EVT: - return TyPE - case sfgo.FILE_EVT: - return TyFE - case sfgo.FILE_FLOW: - return TyFF - case sfgo.NET_FLOW: - return TyNF - case sfgo.HEADER: - return TyH - default: - return TyUnknow - } -} - func parseSymPath(idx sfgo.Source, attr sfgo.Attribute, r *Record) (string, string) { orig := r.GetStr(attr, idx) var src, dst uint64 diff --git a/core/policyengine/source/operations.go b/core/policyengine/source/operations.go new file mode 100644 index 00000000..871f4949 --- /dev/null +++ b/core/policyengine/source/operations.go @@ -0,0 +1,37 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package source implements a backend for policy compilers. +package source + +import "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + +// Operations interface defines a set of predicates to satisfy rule operations. +type Operations[R any] interface { + // Exists creates a criterion for an existential predicate. + Exists(attr string) (policy.Criterion[R], error) + // Compare creates a criterion for a binary predicate. + Compare(lattr string, rattr string, op Operator) (policy.Criterion[R], error) + // FoldAny creates a disjunctive criterion for a binary predicate over a list of strings. + FoldAny(attr string, list []string, op Operator) (policy.Criterion[R], error) + // FoldAll creates a conjunctive criterion for a binary predicate over a list of strings. + FoldAll(attr string, list []string, op Operator) (policy.Criterion[R], error) + // RegExp creates a criterion for a regular-expression predicate. + RegExp(attr string, re string) (policy.Criterion[R], error) +} diff --git a/core/policyengine/source/operators.go b/core/policyengine/source/operators.go new file mode 100644 index 00000000..16992332 --- /dev/null +++ b/core/policyengine/source/operators.go @@ -0,0 +1,151 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package source implements a backend for policy compilers. +package source + +import ( + "errors" + "strings" + + "golang.org/x/exp/constraints" +) + +// Operator enum type. +type Operator int32 + +// Operator enums. +const ( + Eq Operator = iota + IEq + Contains + IContains + Startswith + IStartswith + Endswith + IEndswith + Lt + LEq + Gt + GEq +) + +func (s Operator) String() string { + return [...]string{"Eq", "IEq", "Contains", "IContains", "Startswith", "IStartswith", "Endswith", "IEndswith", "Lt", "LEq", "Gt", "GEq"}[s] +} + +// Operator function type. +type OpFunc[T constraints.Ordered | ~bool] func(T, T) bool + +// Operator functions over strings. +type StrOps struct{} + +func (StrOps) OpFunc(op Operator) (OpFunc[string], error) { + switch op { + case Eq: + return func(l string, r string) bool { return l == r }, nil + case IEq: + return func(l string, r string) bool { return strings.EqualFold(l, r) }, nil + case Contains: + return func(l string, r string) bool { return strings.Contains(l, r) }, nil + case IContains: + return func(l string, r string) bool { return strings.Contains(strings.ToLower(l), strings.ToLower(r)) }, nil + case Startswith: + return func(l string, r string) bool { return strings.HasPrefix(l, r) }, nil + case IStartswith: + return func(l string, r string) bool { return strings.HasPrefix(strings.ToLower(l), strings.ToLower(r)) }, nil + case Endswith: + return func(l string, r string) bool { return strings.HasSuffix(l, r) }, nil + case IEndswith: + return func(l string, r string) bool { return strings.HasSuffix(strings.ToLower(l), strings.ToLower(r)) }, nil + } + return nil, errors.New("not a string operator") +} + +// Operator functions over booleans. +type BoolOps struct{} + +func (op BoolOps) Eq(l bool, r bool) bool { return l == r } + +// Operator functions over integers. +type IntOps[T constraints.Integer] struct{} + +func (IntOps[T]) OpFunc(op Operator) (OpFunc[T], error) { + switch op { + case Eq: + return func(l T, r T) bool { return l == r }, nil + case Lt: + return func(l T, r T) bool { return l < r }, nil + case LEq: + return func(l T, r T) bool { return l <= r }, nil + case Gt: + return func(l T, r T) bool { return l > r }, nil + case GEq: + return func(l T, r T) bool { return l >= r }, nil + } + return nil, errors.New("not an integer operator") +} + +// Operator function type for slices. +type SliceOpFunc[T constraints.Ordered | ~bool] func([]T, []T) bool + +// Operator functions over slices. +type SliceOps[T constraints.Ordered] struct{} + +func (SliceOps[T]) OpFunc(op Operator) (SliceOpFunc[T], error) { + switch op { + case Eq: + return func(l []T, r []T) bool { + if len(l) != len(r) { + return false + } + for i, avalue := range l { + if avalue != r[i] { + return false + } + } + return true + }, nil + } + return nil, errors.New("not a slice operator") +} + +// Operator for byte slices. +type ByteSliceOps SliceOps[byte] + +// Operator function type for IPv6 structure - i.e. length 16 byte array. +type IPv6OpFunc[T ~uint8] func([16]uint8, [16]uint8) bool + +// Operator functions over IPv6. +type IPv6Ops struct{} + +func (IPv6Ops) OpFunc(op Operator) (IPv6OpFunc[uint8], error) { + switch op { + case Eq: + return func(l [16]uint8, r [16]uint8) bool { + for i, avalue := range l { + if avalue != r[i] { + return false + } + } + return true + }, nil + } + return nil, errors.New("not an array operator") +} diff --git a/core/policyengine/source/prefilter.go b/core/policyengine/source/prefilter.go new file mode 100644 index 00000000..2709690b --- /dev/null +++ b/core/policyengine/source/prefilter.go @@ -0,0 +1,39 @@ +// +// Copyright (C) 2023 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package source implements a backend for policy compilers. +package source + +import "github.com/sysflow-telemetry/sf-processor/core/policyengine/policy" + +// Prefilter interface +type Prefilter[R any] interface { + IsApplicable(r R, rule policy.Rule[R]) bool +} + +// DefaultPrefilter defines a prefilter object to be used as a default prefilter. +type DefaultPrefilter[R any] struct{} + +func NewDefaultPrefilter[R any]() Prefilter[R] { + return &DefaultPrefilter[R]{} +} + +func (s *DefaultPrefilter[R]) IsApplicable(r R, rule policy.Rule[R]) bool { + return true +} diff --git a/core/processor/handlercache.go b/core/processor/handlercache.go new file mode 100644 index 00000000..cb048edf --- /dev/null +++ b/core/processor/handlercache.go @@ -0,0 +1,114 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package processor implements a processor plugin. +package processor + +import ( + "errors" + "fmt" + "os" + "plugin" + "sync" + + "github.com/sysflow-telemetry/sf-apis/go/plugins" + "github.com/sysflow-telemetry/sf-processor/core/flattener" +) + +var sHCInstance *HandlerCache +var sHCOnce sync.Once + +const ( + cHandlerSym string = "Handler" // variable required to load handler dynamically from shared object. + cHandlerName string = "handler" + cHandlerLibPath string = "handlerlibpath" +) + +// HandlerCache defines a data strucure for managing handlers. +type HandlerCache struct { + hdlFuncMap map[string]interface{} + pluginCache plugins.SFPluginCache +} + +// newHandlerCache creates a new HandlerCache instance. +func newHandlerCache(pc plugins.SFPluginCache) *HandlerCache { + hdl := &HandlerCache{ + hdlFuncMap: make(map[string]interface{}), + pluginCache: pc} + hdl.init() + return hdl +} + +// GetHandlerCacheInstance returns HandlerCache singleton instance +func GetHandlerCacheInstance(pc plugins.SFPluginCache) *HandlerCache { + sHCOnce.Do(func() { + sHCInstance = newHandlerCache(pc) + }) + return sHCInstance +} + +// initializes plugin cache. +func (p *HandlerCache) init() { + flat := &flattener.Flattener{} + flat.RegisterHandler(p) + flat.RegisterChannel(p.pluginCache) +} + +// LoadHandler loads dynamic handlers to handler cache from dir path. +func (p *HandlerCache) loadHandler(name string, path string) (plugins.SFHandler, error) { + var plug *plugin.Plugin + dynPlugin := path + "/" + name + ".so" + if _, err := os.Stat(dynPlugin); err == nil { + if plug, err = plugin.Open(dynPlugin); err != nil { + return nil, err + } + sym, err := plug.Lookup(cHandlerSym) + if err != nil { + return nil, err + } + if hdlr, ok := sym.(plugins.SFHandler); ok { + hdlr.RegisterHandler(p) + hdlr.RegisterChannel(p.pluginCache) + return hdlr, nil + } + } else { + return nil, errors.New("error trying load plugin at: " + dynPlugin) + } + return nil, fmt.Errorf("unable to dynamicly load Handler '%s' from library %s", name, dynPlugin) +} + +// AddHandler adds a handler method to the handler cache +func (p *HandlerCache) AddHandler(name string, factory interface{}) { + p.hdlFuncMap[name] = factory +} + +// GetHandler retrieves a cached plugin handler by name. +func (p *HandlerCache) GetHandler(conf map[string]interface{}) (plugins.SFHandler, error) { + if name, ok := conf[cHandlerName].(string); ok { + if val, ok := p.hdlFuncMap[name]; ok { + funct := val.(func() plugins.SFHandler) + return funct(), nil + } + if path, o := conf[cHandlerLibPath].(string); o { + return p.loadHandler(name, path) + } + return nil, fmt.Errorf("handler '%s' not found in built-in handlers, and no attribute 'handlerlib' for dynamic library defined", name) + } + return nil, fmt.Errorf("attribute 'handler' missing from sysflow processor's configuration") +} diff --git a/core/processor/processor.go b/core/processor/processor.go index 3e0e0ec6..d4ba630a 100644 --- a/core/processor/processor.go +++ b/core/processor/processor.go @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,37 +17,36 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package processor implements a processor plugin. package processor import ( - "strings" "sync" + "github.com/pkg/errors" "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/plugins" "github.com/sysflow-telemetry/sf-apis/go/sfgo" - "github.ibm.com/sysflow/sf-processor/core/cache" - "github.ibm.com/sysflow/sf-processor/core/flattener" ) const ( - pluginName string = "sysflowreader" - channelName string = "sysflowchan" + pluginName string = "sysflowprocessor" + channelName string = "ctxchan" ) // SysFlowProcessor defines the main processor class. type SysFlowProcessor struct { - hdr *sfgo.SFHeader - hdl plugins.SFHandler - tables *cache.SFTables + hdl plugins.SFHandler } +var sPluginCache plugins.SFPluginCache +var sPCOnce sync.Once + // NewSysFlowProcessor creates a new SysFlowProcessor instance. -func NewSysFlowProcessor(hdl plugins.SFHandler) plugins.SFProcessor { +func NewSysFlowProcessor() plugins.SFProcessor { logger.Trace.Println("Calling NewSysFlowProc") p := new(SysFlowProcessor) - p.hdl = hdl return p } @@ -55,36 +55,49 @@ func (s *SysFlowProcessor) GetName() string { return pluginName } -// NewSysFlowChan creates a new processor channel instance. -func NewSysFlowChan(size int) interface{} { - return &plugins.SFChannel{In: make(chan *sfgo.SysFlow, size)} +// NewCtxSysFlowChan creates a new processor channel instance. +func NewCtxSysFlowChan(size int) interface{} { + return &plugins.Channel[*plugins.CtxSysFlow]{In: make(chan *plugins.CtxSysFlow, size)} } // Register registers plugin to plugin cache. func (s *SysFlowProcessor) Register(pc plugins.SFPluginCache) { pc.AddProcessor(pluginName, NewSysFlowProcessor) - pc.AddChannel(channelName, NewSysFlowChan) - (&flattener.Flattener{}).Register(pc) + pc.AddChannel(channelName, NewCtxSysFlowChan) + sPCOnce.Do(func() { + sPluginCache = pc + }) } // Init initializes the processor with a configuration map. -func (s *SysFlowProcessor) Init(conf map[string]string) error { - s.tables = cache.GetInstance() +func (s *SysFlowProcessor) Init(conf map[string]interface{}) (err error) { + hdlCache := GetHandlerCacheInstance(sPluginCache) + s.hdl, err = hdlCache.GetHandler(conf) + if err != nil { + return errors.Wrap(err, "couldn't obtain the processor handler from cache") + } + if err = s.hdl.Init(conf); err != nil { + return errors.Wrap(err, "couldn't initialize processor handler") + } return nil } // SetOutChan sets the output channel of the plugin. -func (s *SysFlowProcessor) SetOutChan(ch interface{}) { +func (s *SysFlowProcessor) SetOutChan(ch []interface{}) { s.hdl.SetOutChan(ch) } // Process implements the main processor method of the plugin. -func (s *SysFlowProcessor) Process(ch interface{}, wg *sync.WaitGroup) { +func (s *SysFlowProcessor) Process(ch []interface{}, wg *sync.WaitGroup) { entEnabled := s.hdl.IsEntityEnabled() - cha := ch.(*plugins.SFChannel) + if len(ch) != 1 { + logger.Error.Println("SysFlow Processor only supports a single input channel at this time") + return + } + cha := ch[0].(*plugins.Channel[*plugins.CtxSysFlow]) record := cha.In defer wg.Done() - logger.Trace.Println("Starting SysFlow processing...") + logger.Trace.Println("Starting SysFlow Processor...") for { sf, ok := <-record if !ok { @@ -93,53 +106,42 @@ func (s *SysFlowProcessor) Process(ch interface{}, wg *sync.WaitGroup) { } switch sf.Rec.UnionType { case sfgo.SF_HEADER: - hdr := sf.Rec.SFHeader - s.hdr = hdr - s.tables.Reset() if entEnabled { - s.hdl.HandleHeader(s.hdr) + s.hdl.HandleHeader(sf, sf.Header) } case sfgo.SF_CONT: - cont := sf.Rec.Container - s.tables.SetCont(cont.Id, cont) if entEnabled { - s.hdl.HandleContainer(s.hdr, cont) + s.hdl.HandleContainer(sf, sf.Container) + } + case sfgo.SF_POD: + if entEnabled { + s.hdl.HandlePod(sf, sf.Pod) + } + case sfgo.SF_K8S_EVT: + if entEnabled { + s.hdl.HandleK8sEvt(sf, sf.K8sEvent) } case sfgo.SF_PROCESS: - proc := sf.Rec.Process - proc.Exe = strings.TrimSpace(proc.Exe) - proc.ExeArgs = strings.TrimSpace(proc.ExeArgs) - s.tables.SetProc(*proc.Oid, proc) if entEnabled { - cont := s.getContFromProc(proc) - s.hdl.HandleProcess(s.hdr, cont, proc) + s.hdl.HandleProcess(sf, sf.Process) } case sfgo.SF_FILE: - file := sf.Rec.File - s.tables.SetFile(file.Oid, file) if entEnabled { - cont := s.getContFromFile(file) - s.hdl.HandleFile(s.hdr, cont, file) + s.hdl.HandleFile(sf, sf.File) } case sfgo.SF_PROC_EVT: pe := sf.Rec.ProcessEvent - cont, proc := s.getContAndProc(pe.ProcOID) - s.hdl.HandleProcEvt(s.hdr, cont, proc, pe) + s.hdl.HandleProcEvt(sf, pe) case sfgo.SF_NET_FLOW: nf := sf.Rec.NetworkFlow - cont, proc := s.getContAndProc(nf.ProcOID) - s.hdl.HandleNetFlow(s.hdr, cont, proc, nf) + s.hdl.HandleNetFlow(sf, nf) case sfgo.SF_FILE_FLOW: ff := sf.Rec.FileFlow - cont, proc := s.getContAndProc(ff.ProcOID) - file := s.getFile(ff.FileOID) - s.hdl.HandleFileFlow(s.hdr, cont, proc, file, ff) + s.hdl.HandleFileFlow(sf, ff) case sfgo.SF_FILE_EVT: fe := sf.Rec.FileEvent - cont, proc := s.getContAndProc(fe.ProcOID) - file := s.getFile(fe.FileOID) - file2 := s.getOptFile(fe.NewFileOID) - s.hdl.HandleFileEvt(s.hdr, cont, proc, file, file2, fe) + s.hdl.HandleFileEvt(sf, fe) + case sfgo.SF_PROC_FLOW: case sfgo.SF_NET_EVT: default: logger.Warn.Println("Error unsupported SysFlow Type: ", sf.Rec.UnionType) @@ -152,52 +154,3 @@ func (s *SysFlowProcessor) Cleanup() { logger.Trace.Println("Exiting ", pluginName) s.hdl.Cleanup() } - -func (s *SysFlowProcessor) getContFromProc(proc *sfgo.Process) *sfgo.Container { - if proc.ContainerId != nil && proc.ContainerId.UnionType == sfgo.UnionNullStringTypeEnumString { - if c := s.tables.GetCont(proc.ContainerId.String); c != nil { - return c - } - logger.Warn.Println("No container object for ID: ", proc.ContainerId.String) - } - return nil -} - -func (s *SysFlowProcessor) getContAndProc(oid *sfgo.OID) (*sfgo.Container, *sfgo.Process) { - if p := s.tables.GetProc(*oid); p != nil { - if p.ContainerId != nil && p.ContainerId.UnionType == sfgo.UnionNullStringTypeEnumString { - if c := s.tables.GetCont(p.ContainerId.String); c != nil { - return c, p - } - logger.Warn.Println("No container object for ID: ", p.ContainerId.String) - } - return nil, p - } - logger.Error.Println("No process object for ID: ", *oid) - return nil, nil -} - -func (s *SysFlowProcessor) getFile(foid sfgo.FOID) *sfgo.File { - if f := s.tables.GetFile(foid); f != nil { - return f - } - logger.Error.Println("No file object for FOID: ", foid) - return nil -} - -func (s *SysFlowProcessor) getOptFile(unf *sfgo.UnionNullFOID) *sfgo.File { - if unf != nil && unf.UnionType == sfgo.UnionNullFOIDTypeEnumFOID { - return s.getFile(unf.FOID) - } - return nil -} - -func (s *SysFlowProcessor) getContFromFile(file *sfgo.File) *sfgo.Container { - if file != nil && file.ContainerId.UnionType == sfgo.UnionNullStringTypeEnumString { - if c := s.tables.GetCont(file.ContainerId.String); c != nil { - return c - } - logger.Warn.Println("Not container object for ID: ", file.ContainerId.String) - } - return nil -} diff --git a/core/processor/reader.go b/core/processor/reader.go new file mode 100644 index 00000000..df9196d5 --- /dev/null +++ b/core/processor/reader.go @@ -0,0 +1,228 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// Andreas Schade +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package processor implements a processor plugin. +package processor + +import ( + "strings" + "sync" + + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-apis/go/plugins" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/cache" +) + +const ( + readerPluginName string = "sysflowreader" + readerChannelName string = "sysflowchan" +) + +// SysFlowReader defines the main reader class, which process SysFlow records and builds the cache. +// This plugin should typically be first in the pipeline. +type SysFlowReader struct { + SysFlowProcessor + hdr *sfgo.SFHeader + tables *cache.SFTables +} + +// NewSysFlowProcessor creates a new SysFlowProcessor instance. +func NewSysFlowReader() plugins.SFProcessor { + logger.Trace.Println("Calling NewSysFlowReader") + p := new(SysFlowReader) + return p +} + +// GetName returns the plugin name. +func (s *SysFlowReader) GetName() string { + return readerPluginName +} + +// NewSysFlowChan creates a new processor channel instance. +func NewSysFlowChan(size int) interface{} { + return &plugins.Channel[*sfgo.SysFlow]{In: make(chan *sfgo.SysFlow, size)} +} + +// Register registers plugin to plugin cache. +func (s *SysFlowReader) Register(pc plugins.SFPluginCache) { + pc.AddProcessor(readerPluginName, NewSysFlowReader) + pc.AddChannel(readerChannelName, NewSysFlowChan) + sPCOnce.Do(func() { + sPluginCache = pc + }) +} + +// Init initializes the processor with a configuration map. +func (s *SysFlowReader) Init(conf map[string]interface{}) (err error) { + s.tables = cache.NewSFTables() + return s.SysFlowProcessor.Init(conf) +} + +// Process implements the main processor method of the plugin. +func (s *SysFlowReader) Process(ch []interface{}, wg *sync.WaitGroup) { + entEnabled := s.hdl.IsEntityEnabled() + if len(ch) != 1 { + logger.Error.Println("SysFlow Reader only supports a single input channel at this time") + return + } + cha := ch[0].(*plugins.Channel[*sfgo.SysFlow]) + record := cha.In + defer wg.Done() + logger.Trace.Println("Starting SysFlow Reader...") + for { + r, ok := <-record + if !ok { + logger.Trace.Println("SysFlow Reader channel closed. Shutting down.") + break + } + sf := new(plugins.CtxSysFlow) + sf.SysFlow = r + sf.Header = s.hdr + switch sf.Rec.UnionType { + case sfgo.SF_HEADER: + s.hdr = sf.Rec.SFHeader + s.tables.Reset() + if entEnabled { + s.hdl.HandleHeader(sf, s.hdr) + } + case sfgo.SF_CONT: + cont := sf.Rec.Container + s.tables.SetCont(cont.Id, cont) + if entEnabled { + s.hdl.HandleContainer(sf, cont) + } + case sfgo.SF_POD: + pod := sf.Rec.Pod + s.tables.SetPod(pod.Id, pod) + if entEnabled { + s.hdl.HandlePod(sf, pod) + } + case sfgo.SF_K8S_EVT: + ke := sf.Rec.K8sEvent + s.hdl.HandleK8sEvt(sf, ke) + case sfgo.SF_PROCESS: + proc := sf.Rec.Process + proc.Exe = strings.TrimSpace(proc.Exe) + proc.ExeArgs = strings.TrimSpace(proc.ExeArgs) + s.tables.SetProc(*proc.Oid, proc) + if entEnabled { + sf.Process = proc + sf.PTree = s.tables.GetPtree(*proc.Oid) + sf.Container = s.getContFromProc(proc) + sf.Pod = s.getPodFromCont(sf.Container) + s.hdl.HandleProcess(sf, proc) + } + case sfgo.SF_FILE: + sf.File = sf.Rec.File + s.tables.SetFile(sf.File.Oid, sf.File) + if entEnabled { + sf.Container = s.getContFromFile(sf.File) + sf.Pod = s.getPodFromCont(sf.Container) + s.hdl.HandleFile(sf, sf.File) + } + case sfgo.SF_PROC_EVT: + pe := sf.Rec.ProcessEvent + sf.Pod, sf.Container, sf.Process, sf.PTree = s.getPodContAndProc(pe.ProcOID) + s.hdl.HandleProcEvt(sf, pe) + case sfgo.SF_NET_FLOW: + nf := sf.Rec.NetworkFlow + sf.Pod, sf.Container, sf.Process, sf.PTree = s.getPodContAndProc(nf.ProcOID) + s.hdl.HandleNetFlow(sf, nf) + case sfgo.SF_FILE_FLOW: + ff := sf.Rec.FileFlow + sf.Pod, sf.Container, sf.Process, sf.PTree = s.getPodContAndProc(ff.ProcOID) + sf.File = s.getFile(ff.FileOID) + s.hdl.HandleFileFlow(sf, ff) + case sfgo.SF_FILE_EVT: + fe := sf.Rec.FileEvent + sf.Pod, sf.Container, sf.Process, sf.PTree = s.getPodContAndProc(fe.ProcOID) + sf.File = s.getFile(fe.FileOID) + sf.NewFile = s.getOptFile(fe.NewFileOID) + s.hdl.HandleFileEvt(sf, fe) + case sfgo.SF_PROC_FLOW: + case sfgo.SF_NET_EVT: + default: + logger.Warn.Printf("Error unsupported SysFlow Type: %d", sf.Rec.UnionType) + } + } +} + +// Cleanup tears down the plugin resources. +func (s *SysFlowReader) Cleanup() { + logger.Trace.Println("Exiting ", readerPluginName) + s.hdl.Cleanup() +} + +func (s *SysFlowReader) getContFromProc(proc *sfgo.Process) *sfgo.Container { + if proc.ContainerId != nil && proc.ContainerId.UnionType == sfgo.ContainerIdUnionTypeEnumString { + if c := s.tables.GetCont(proc.ContainerId.String); c != nil { + return c + } + logger.Warn.Println("No container object for ID: ", proc.ContainerId.String) + } + return nil +} + +func (s *SysFlowReader) getPodFromCont(cont *sfgo.Container) *sfgo.Pod { + if cont != nil && cont.PodId != nil && cont.PodId.UnionType == sfgo.PodIdUnionTypeEnumString { + if pd := s.tables.GetPod(cont.PodId.String); pd != nil { + return pd + } + logger.Warn.Println("No pod object for ID: ", cont.PodId.String) + } + return nil +} + +func (s *SysFlowReader) getPodContAndProc(oid *sfgo.OID) (*sfgo.Pod, *sfgo.Container, *sfgo.Process, []*sfgo.Process) { + if p := s.tables.GetProc(*oid); p != nil { + ptree := s.tables.GetPtree(*oid) + c := s.getContFromProc(p) + pd := s.getPodFromCont(c) + return pd, c, p, ptree + } + logger.Error.Println("No process object for ID: ", *oid) + return nil, nil, nil, nil +} + +func (s *SysFlowReader) getFile(foid sfgo.FOID) *sfgo.File { + if f := s.tables.GetFile(foid); f != nil { + return f + } + logger.Error.Println("No file object for FOID: ", foid) + return nil +} + +func (s *SysFlowReader) getOptFile(unf *sfgo.NewFileOIDUnion) *sfgo.File { + if unf != nil && unf.UnionType == sfgo.NewFileOIDUnionTypeEnumFOID { + return s.getFile(unf.FOID) + } + return nil +} + +func (s *SysFlowReader) getContFromFile(file *sfgo.File) *sfgo.Container { + if file != nil && file.ContainerId.UnionType == sfgo.ContainerIdUnionTypeEnumString { + if c := s.tables.GetCont(file.ContainerId.String); c != nil { + return c + } + logger.Warn.Println("Not container object for ID: ", file.ContainerId.String) + } + return nil +} diff --git a/docs/BUILD.md b/docs/BUILD.md new file mode 100644 index 00000000..add50f92 --- /dev/null +++ b/docs/BUILD.md @@ -0,0 +1,83 @@ +# SysFlow Processor (sf-processor repo) + +The SysFlow processor is a lighweight edge analytics pipeline that can process and enrich SysFlow data. The processor is written in golang, and allows users to build and configure various pipelines using a set of built-in and custom plugins and drivers. Pipeline plugins are producer-consumer objects that follow an interface and pass data to one another through pre-defined channels in a multi-threaded environment. By contrast, a driver represents a data source, which pushes data to the plugins. The processor currently supports two builtin drivers, including one that reads sysflow from a file, and another that reads streaming sysflow over a domain socket. Plugins and drivers are configured using a JSON file. + +A core built-in plugin is a policy engine that can apply logical rules to filter, alert, or semantically label sysflow records using a declarative language based on the [Falco rules syntax](https://falco.org/docs/rules/) with a few added extensions (more on this later). + +Custom plugins and drivers can be implemented as dynamic libraries to tailor analytics to specific user requirements. + +The endpoint of a pipeline configuration is an exporter plugin that sends the processed data to a target. The processor supports various types of export plugins for a variety of different targets. + +## Pre-requisites + +The processor has been tested on Ubuntu/RHEL distributions, but should work on any Linux system. + +- Golang version 1.17+ and make (if building from sources) +- Docker, docker-compose (if building with docker) + +## Build + +Clone the processor repository + +```bash +git clone https://github.com/sysflow-telemetry/sf-processor.git +``` + +Build locally, from sources + +```bash +cd sf-processor +make build +``` + +Build with docker + +```bash +cd sf-processor +make docker-build +``` + +## Usage + +For usage information, type: + +```bash +cd driver/ +./sfprocessor -help +``` + +This should yield the following usage statement: + +```bash +Usage: sfprocessor [[-version]|[-driver ] [-log ] [-driverdir ] [-plugdir ] path] +Positional arguments: + path string + Input path +Arguments: + -config string + Path to pipeline configuration file (default "pipeline.json") + -cpuprofile file + Write cpu profile to file + -driver string + Driver name {file|socket|} (default "file") + -driverdir string + Dynamic driver directory (default "../resources/drivers") + -log string + Log level {trace|info|warn|error} (default "info") + -memprofile file + Write memory profile to file + -plugdir string + Dynamic plugins directory (default "../resources/plugins") + -test + Test pipeline configuration + -traceprofile file + Write trace profile to file + -version + Output version information +``` + +The four most important flags are `config`, `driverdir`, `plugdir`, and `driver`. The `config` flag points to a pipeline configuration file, which describes the entire pipeline and settings for the individual settings for the plugins. The `driverdir` and `plugdir` flags specify where any dynamic drivers and plugins shared libraries reside that should be loaded by the processor at runtime. The `driver` flag accepts a label to a pre-configured driver (either built-in or custom) that will be used as the data source to the pipeline. Currently, the pipeline only supports one driver at a time, but we anticipate handling multiple drivers in the future. There are two built-in drivers: + +- _file_: loads a sysflow file reading driver that reads from `path`. +- _socket_: the processor loads a sysflow streaming driver. The driver creates a domain socket named `path` + and acts as a server waiting for a SysFlow collector to attach and send sysflow data. diff --git a/docs/CONFIG.md b/docs/CONFIG.md new file mode 100644 index 00000000..beda6708 --- /dev/null +++ b/docs/CONFIG.md @@ -0,0 +1,191 @@ +## Configuration + +The pipeline configuration below shows how to configure a pipeline that will read a sysflow stream and push records to the policy engine, which will trigger alerts using a set of runtime policies stored in a `yaml` file. An example pipeline with this configuration looks as follows: + +```json +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "policyengine", + "in": "flat flattenerchan", + "out": "evt eventchan", + "policies": "../resources/policies/runtimeintegrity" + }, + { + "processor": "exporter", + "in": "evt eventchan", + "export": "syslog", + "proto": "tcp", + "tag": "sysflow", + "host": "localhost", + "port": "514" + } + ] +} +``` + +> **NOTE:** This configuration can be found in: `sf-collector/resources/pipelines/pipeline.syslog.json` + +This pipeline specifies three built-in plugins: + +- [sysflowreader](https://github.com/sysflow-telemetry/sf-processor/blob/master/core/processor/processor.go): is a generic reader plugin that ingests sysflow from the driver, caches entities, and presents sysflow objects to a handler object (i.e., an object that implements the [handler interface](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/plugins/handler.go)) for processing. In this case, we are using the [flattener](https://github.com/sysflow-telemetry/sf-processor/blob/master/core/flattener/flattener.go) handler, but custom handlers are possible. +- [policyengine](https://github.com/sysflow-telemetry/sf-processor/blob/master/core/policyengine/policyengine.go): is the policy engine, which takes [flattened](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/sfgo/flatrecord.go) (row-oriented) SysFlow records as input and outputs [records](https://github.com/sysflow-telemetry/sf-processor/blob/master/core/policyengine/engine/types.go), which represent alerts, or filtered sysflow records depending on the policy engine's _mode_ (more on this later). +- [exporter](https://github.com/sysflow-telemetry/sf-processor/blob/master/core/exporter/exporter.go): takes records from the policy engine, and exports them to ElasticSearch, syslog, file, or terminal, in a JSON format or in Elastic Common Schema (ECS) format. Note that custom export plugins can be created to export to other serialization formats and transport protocols. + +Each plugin has a set of general attributes that are present in all plugins, and a set of attributes that are custom to the specific plugins. For more details on the specific attributes in this example, see the pipeline configuration [template](https://github.com/sysflow-telemetry/sf-processor/blob/master/driver/pipeline.template.json) + +The general attributes are as follows: + +- _processor_ (required): the name of the processor plugin to load. Processors must implement the [SFProcessor](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/plugins/processor.go) interface; the name is the value that must be returned from the `GetName()` function as defined in the processor object. +- _handler_ (optional): the name of the handler object to be used for the processor. Handlers must implement the [SFHandler](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/plugins/handler.go) interface. +- _in_ (required): the input channel (i.e. golang channel) of objects that are passed to the plugin. +- _out_ (optional): the output channel (i.e. golang channel) for objects that are pushed out of the plugin, and into the next plugin in the pipeline sequence. + +Channels are modelled as channel objects that have an `In` attribute representing some golang channel of objects. See [SFChannel](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/plugins/processor.go) for an example. The syntax for a channel in the pipeline is `[channel name] [channel type]`. Where channel type is the label given to the channel type at plugin registration (more on this later), and channel name is a unique identifier for the current channel instance. The name and type of an output channel in one plugin must match that of the name and type of the input channel of the next plugin in the pipeline sequence. + +> **NOTE:** A plugin has exacly one input channel but it may specify more than one output channels. This allows pipeline definitions that fan out data to more than one receiver plugin similar to a Unix `tee` command. While there must be always one SysFlow reader acting as the entry point of a pipeline, a pipeline configuration may specify policy engines passing data to different exporters or a SysFlow reader passing data to different policy engines. Generally, pipelines form a tree rather being a linear structure. + +### Policy engine configuration + +The policy engine (`"processor": "policyengine"`) plugin is driven by a set of rules. These rules are specified in a YAML file which adopts the same syntax as the rules of the [Falco](https://falco.org/docs/rules) project. A policy engine plugin specification may have the following attributes: + +- _policies_ (required for `alert` mode`): The path to the YAML rules specification file. More information on rules can be found in the [Policies](POLICIES.md) section. +- _mode_ (optional): The mode of the policy engine. Allowed values are: + - `alert` (default): the policy engine generates rule-based alerts; `alert` is a blocking mode that drops all records that do not match any given rule. If no mode is specified, the policy engine runs in `alert` mode by default. + - `enrich` for enriching records with additional context from the rule. In contrast to `alert`, this is a non-blocking mode which applies tagging and action enrichments to matching records as defined in the policy file. Non-matching records are passed on "as is". +- _monitor_ (optional): Specifies if changes to the policy file(s) should be monitored and updated in the policy engine. + - `none` (default): no monitor is used. + - `local`: the processor will monitor for changes in the policies path and update its rule set if changes are detected. +- _monitor.interval_ (optional): The interval in seconds for updating policies, if a monitor is used. (default: 30 seconds). +- _concurrency_ (optional); The number of concurrent threads for record processing. (default: 5). +- _actiondir_ (optional): The path of the directory containing the shared object files for user-defined action plugins. See the section on [User-defined Actions](POLICIES.md#user-defined-actions) for more information. + +> **NOTE:** Prior to release 0.4.0, the _mode_ attribute accepted different values with different semantics. To preserve the behavior of older releases: +> - For old `alert` behavior, use `enrich` mode. +> - For old `filter` behavior, use `enrich` mode and a policy file with filter rules only. +> - For old `bypass` behavior, use `enrich` and drop the _policies_ key from the configuration. + +### Exporter configuration + +An exporter (`"processor": "exporter"`) plugin consists of two modules, an encoder for converting the data to a suitable format, and a transport module for sending the data to the target. Encoders target specific, i.e. for a particular export target a particular set of encoders may be used. In the exporter configuration the transport module is specified via the _export_ parameter (required). The encoder is selected via the _format_ parameter (optional). The default format is `json`. + +The following table lists the currently supported exporter modules and the corresponding encoders. Additional encoders and transport modules can be implemented if need arises. If you plan to [contribute](../CONTIRBUTING.md) or want to get involved in the discussion please join the SysFlow community. + +| Transport module (_export_) | Target | Encoders (_format_) | +|-----------------------------|----------------------------|---------------------| +| `terminal` | console | `json`, `ecs` | +| `file` | local file | `json`, `ecs` | +| `es` | ElasticSearch service | `ecs` | +| `syslog` | syslog service | `json`, `ecs` | +| `findings` | IBM Findings API | `occurence` | +| `null` | | | + +Some of these combinations require additional configuration as described in the following sections. `null` is used for debugging the processor and doesn't export any data. + +#### File + +If _export_ is set to `file`, an additional parameter _file.path_ allows the specification of the target file. + +#### Syslog + +If the _export_ parameter is set to `syslog`, output to syslog is enabled and the following addtional parameters are used: + +- _syslog.proto_ (optional): The protocol used for communicating with the syslog server. Allows values are `tcp`, `tls` and `udp`. Default is `tcp`. +- _syslog.tag_ (optional): The tag used for each Sysflow record in syslog. Default is `SysFlow`. +- _syslog.source_ (optional): If set adds a hostname to the syslog header. +- _syslog.host_ (optional): The hostname of the sysflow server. Default is `localhost`. +- _syslog.port_ (optional): The port of the syslow server. Default is `514`. + +#### ElasticSearch + +Export to ElasticSearch is enabled by setting the config parameter _export_ to `es`. The only supported _format_ for export to ElasticSearch is `ecs`. + +Data export is done via bulk ingestion. The ingestion can be controlled by some additional parameters which are read when the `es` export target is selected. Required parameters specify the ES target, index and credentials. Optional parameters control some aspects of the behavior of the bulk ingestion and may have an effect on performance. You may need to adapt their valuesfor optimal performance in your environment. + +- _es.addresses_ (required): A comma-separated list of ES endpoints. +- _es.index_ (required): The name of the ES index to ingest into. +- _es.username_ (required): The ES username. +- _es.password_ (required): The password for the specified ES user. +- _buffer_ (optional) The bulk size as the number of records to be ingested at once. Default is `0` but value of `0` indicates record-by-record ingestion which may be highly inefficient. +- _es.bulk.numWorkers_ (optional): The number of ingestion workers used in parallel. Default is `0` which means that the exporter uses as many workers as there are cores in the machine. +- _es.bulk.flashBuffer_ (optional): The size in bytes of the flush buffer for ingestion. It should be large enough to hold one bulk (the number of records specified in _buffer_), otherwise the bulk is broken into smaller chunks. Default is `5e+6`. +- _es.bulk.flushTimeout_ (optional): The flush buffer time threshold. Valid values are golang duration strings. Default is `30s`. + +The Elastic exporter does not make any assumption on the existence or configuration of the index specified in _es.index_. If the index does not exist, Elastic will automatically create it and apply a default dynamic mapping. It may be beneficial to use an explicit mapping for the ECS data generated by the Elastic exporter. For convinience we provide an [explicit mapping](https://github.com/sysflow-telemetry/sf-processor/blob/master/resources/mappings/ecs_mapping.json) for creating a new tailored index in Elastic. For more information refer to the [Elastic Mapping](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html) reference. + + + +### Environment variables + +It is possible to override any of the custom attributes of a plugin using an environment variable. This is especially useful when operating the processor as a container, where you may have to deploy the processor to multiple nodes, and have attributes that change per node. If an environment variable is set, it overrides the setting inside the config file. The environment variables must follow the following structure: + +- Environment variables must follow the naming schema `_` +- The plugin name inside the pipeline configuration file must be all lower case. + +For example, to set the alert mode inside the policy engine, the following environment variable is set: + +```bash +export POLICYENGINE_MODE=alert +``` + +To set the syslog values for the exporter: + +```bash +export EXPORTER_TYPE=telemetry +export EXPORTER_SOURCE=${HOSTNAME} +export EXPORTER_EXPORT=syslog +export EXPORTER_HOST=192.168.2.10 +export EXPORTER_PORT=514 +``` + +If running as a docker container, environment variables can be passed with the docker run command: + +```bash +docker run +-e EXPORTER_TYPE=telemetry \ +-e EXPORTER_SOURCE=${HOSTNAME} \ +-e EXPORTER_EXPORT=syslog \ +-e EXPORTER_HOST=192.168.2.10 \ +-e EXPORTER_PORT=514 +... +``` + +### Rate limiter configuration (experimental) + +The `flattener` handler has a built-in time decay filter that can be enabled to reduce even rates in the processor. The filter uses a time-decay bloom filter based on a semantic hashing of records. This means that the filter should only forward one record matching a semantic hash per time decay period. The semantic hash takes into consideration process, flow and event attributes. To enable rate limiting, modify the `sysflowreader` processor as follows: + +```json +{ + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan", + "filter.enabled": "on|off (default: off)", + "filter.maxage": "time decay in minutes (default: 24H)" +} +``` diff --git a/docs/DOCKER.md b/docs/DOCKER.md new file mode 100644 index 00000000..427d0d8a --- /dev/null +++ b/docs/DOCKER.md @@ -0,0 +1,42 @@ +## Docker usage + +Documentation and scripts for how to deploy the SysFlow Processor with docker compose can be found in [here](https://sysflow.readthedocs.io/en/latest/docker.html). + +### Processor environment + +As mentioned in a previous section, all custom plugin attributes can be set using the following: `_` format. Note that the docker compose file sets several attributes including `EXPORTER_TYPE`, `EXPORTER_HOST` and `EXPORTER_PORT`. + +The following are the default locations of the pipeline configuration and plugins directory: + +- pipeline.json: `/usr/local/sysflow/conf/pipeline.json` +- drivers dir: `/usr/local/sysflow/resources/drivers` +- plugins dir: `/usr/local/sysflow/resources/plugins` +- handler dir: `/usr/local/sysflow/resources/handlers` +- actions dir: `/usr/local/sysflow/resources/actions` + +The default configuration can be changed by setting up a virtual mounts mapping the host directories/files into the container using the volumes section of the sf-processor in the docker-compose.yaml. + +```yaml +sf-processor: + container_name: sf-processor + image: sysflowtelemetry/sf-processor:latest + privileged: true + volumes: + ... + - ./path/to/my.pipeline.json:/usr/local/sysflow/conf/pipeline.json +``` + +The policy location can be overwritten by setting the `POLICYENGINE_POLICIES` environment variable, which can point to a policy file or a directory containing policy files (must have yaml extension). + +The docker container uses a default `filter.yaml` policy that outputs SysFlow records in json. You can use your own policy files from the host by mounting your policy directory into the container as follows, in which the custom pipeline points to the mounted policies. + +```yaml +sf-processor: + container_name: sf-processor + image: sysflowtelemetry/sf-processor:latest + privileged: true + volumes: + ... + - ./path/to/my.pipeline.json:/usr/local/sysflow/conf/pipeline.json + - ./path/to/policies/:/usr/local/sysflow/resources/policies/ +``` diff --git a/docs/PLUGINS.md b/docs/PLUGINS.md new file mode 100644 index 00000000..b124b3eb --- /dev/null +++ b/docs/PLUGINS.md @@ -0,0 +1,409 @@ +## Plugins + +In addition to its core plugins, the processor also supports custom plugins that can be dynamically loaded into the processor via a compiled golang shared library using the [golang plugin package](https://golang.org/pkg/plugin/). Custom plugins enable easy extension of the processor and the creation of custom pipelines tailored to specific use cases. + +The processor supports four types of plugins: + +* **drivers**: enable the ingestion of different telemetry sources into the processor pipeline. +* **processors**: enable the creation of custom data processing and analytic plugins to extend sf-processor pipelines. +* **handlers**: enable the creation of custom SysFlow record handling plugins. +* **actions**: enable the creation of custom action plugins to extend sf-processor's policy engine. + +### Pre-requisites + +* Go 1.17 (if building locally, without the plugin builder) + +### Processor Plugins + +User-defined plugins can be plugged and extend the sf-processor pipeline. These are the most generic type of plugins, from which all built-in processor plugins are build. Check the `core` package for examples. We have built-in processor plugins for flattening the telemetry stream, implementing a policy engine, and creating event exporters. + +#### Interface + +Processor plugins (or just plugins) are implemented via the golang plugin mechanism. A plugin must implement the following interface, defined in the `github.com/sysflow-telemetry/sf-apis/go/plugins` package. + +```go +// SFProcessor defines the SysFlow processor interface. +type SFProcessor interface { + Register(pc SFPluginCache) + Init(conf map[string]interface{}) error + Process(ch interface{}, wg *sync.WaitGroup) + GetName() string + SetOutChan(ch []interface{}) + Cleanup() +} +``` + +The `Process` function is the main function of the plugin.It's where the "main loop" of the plugin should be implemented. It receives the input channel configured in the custom plugin's block in the pipeline configuration. It also received the pepeline thread WaitGroup. Custom processing code should be implemented using this function. `Init` is called once, when the pipeline is loaded. `Cleanup` is called when the pipeline is terminated. `SetOutChannel` receives a slice with the output channels configured in the plugin's block in the pipeline configuration. + +When loading a pipeline, sf-processor performs a series of health checks before the pipeline is enabled. If these health checks fail, the processor terminates. To enable health checks on custom plugins, implement the `Test` function defined in the interface below. For an example, check `core/exporter/exporter.go`. + +```go +// SFTestableProcessor defines a testable SysFlow processor interface. +type SFTestableProcessor interface { + SFProcessor + Test() (bool, error) +} +``` + +#### Example + +A dynamic plugin example is provided in [github](https://github.com/sysflow-telemetry/sf-processor/tree/master/plugins/processors/example). The core of the plugin is building an object that implements an [SFProcessor interface](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/plugins/processor.go). Such an implementation looks as follows: + +```golang +package main + +import ( + "sync" + + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-apis/go/plugins" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/flattener" +) + +const ( + pluginName string = "example" +) + +// Plugin exports a symbol for this plugin. +var Plugin Example + +// Example defines an example plugin. +type Example struct{} + +// NewExample creates a new plugin instance. +func NewExample() plugins.SFProcessor { + return new(Example) +} + +// GetName returns the plugin name. +func (s *Example) GetName() string { + return pluginName +} + +// Init initializes the plugin with a configuration map. +func (s *Example) Init(conf map[string]interface{}) error { + return nil +} + +// Register registers plugin to plugin cache. +func (s *Example) Register(pc plugins.SFPluginCache) { + pc.AddProcessor(pluginName, NewExample) +} + +// Process implements the main interface of the plugin. +func (s *Example) Process(ch interface{}, wg *sync.WaitGroup) { + cha := ch.(*flattener.FlatChannel) + record := cha.In + logger.Trace.Println("Example channel capacity:", cap(record)) + defer wg.Done() + logger.Trace.Println("Starting Example") + for { + fc, ok := <-record + if !ok { + logger.Trace.Println("Channel closed. Shutting down.") + break + } + if fc.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] == sfgo.PROC_EVT { + logger.Info.Printf("Process Event: %s, %d", fc.Strs[sfgo.SYSFLOW_IDX][sfgo.PROC_EXE_STR], fc.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_PROC_TID_INT]) + } + } + logger.Trace.Println("Exiting Example") +} + +// SetOutChan sets the output channel of the plugin. +func (s *Example) SetOutChan(ch []interface{}) {} + +// Cleanup tears down plugin resources. +func (s *Example) Cleanup() {} + +// This function is not run when module is used as a plugin. +func main() {} + +``` + +The custom plugin must implement the following interface: + +* `GetName()` - returns a lowercase string representing the plugin's label. This label is important, because it identifies the plugin in the `pipeline.json` file, enabling the processor to load the plugin. In the object above, this plugin is called `example`. Note that the label must be unique. +* `Init(conf map[string]interface{}) error` - used to initialize the plugin. The configuration map that is passed to the function stores all the configuration information defined in the plugin's definition inside `pipeline.json` (more on this later). +* `Register(pc plugins.SFPluginCache)` - this registers the plugin with the plugin cache of the processor. + * `pc.AddProcessor(pluginName, )` (required) - registers the plugin named `example` with the processor. You must define a constructor function using the convention `New` which is used to instantiate the plugin, and returns it as an `SFProcessor` interface - see `NewExample` for an example. + * `pc.AddChannel(channelName, )` (optional) - if your plugin is using a custom output channel of objects (i.e., the channel used to pass output objects from this plugin to the next in the pipeline), it should be included in this plugin. + * The `channelName` should be a lowercase unique label defining the channel type. + * The constructor function should return a golang `interface{}` representing an object that as an `In` attribute of type `chan `. We will call this object, a wrapped channel object going forward. For example, the channel object that passes sysflow objects is called SFChannel, and is defined [here](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/plugins/processor.go) + * For a complete example of defining an output channel, see `NewFlattenerChan` in the [flattener](https://github.com/sysflow-telemetry/sf-processor/blob/master/core/flattener/flattener.go) as well as the `Register` function. The `FlatChannel` is defined [here](https://github.com/sysflow-telemetry/sf-apis/blob/master/go/plugins/handler.go) +* `Process(ch interface{}, wg *sync.WaitGroup)` - this function is launched by the processor as a go thread and is where the main plugin processing occurs. It takes a wrapped channel object, which acts as the input data source to the plugin (i.e., this is the channel that is configured as the input channel to the plugin in the pipeline.json). It also takes a sync.WaitGroup object, which is used to signal to the processor when the plugin has completed running (see `defer wg.Done()` in code). The processor must loop on the input channel, and do its analysis on each input record. In this case, the example plugin is reading flat records and printing them to the screen. +* `SetOutChan(ch []interface{})` - sets the wrapped channels that will serve as the output channels for the plugin. The output channels are instantiated by the processor, which is also in charge of stitching the plugins together. If the plugin is the last one in the chain, then this function can be left empty. See the `SetOutputChan` function in the [flattener](https://github.com/sysflow-telemetry/sf-processor/blob/master/core/flattener/flattener.go) to see how an output channel is implemented. +* `Cleanup()` - Used to cleanup any resources. This function is called by the processor after the plugin `Process` function exits. One of the key items to close in the `Cleanup` function is the output channel using the golang `close()` [function](https://gobyexample.com/closing-channels). Closing the output channel enables the pipeline to be torn down gracefully and in sequence. +* `main(){}` - this main method is not used by the plugin or processor. It's required by golang in order to be able to compile as a shared object. + +To compile the example plugin, use the provided Makefile: + +```bash +make -C plugins/processors/example +``` + +This will build the plugin and copy it into `resources/plugins/`. + +To use the new plugin, use the configuration provided in [github](https://github.com/sysflow-telemetry/sf-processor/tree/master/plugins/processors/example), which defines the following pipeline: + +```bash +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "example", + "in": "flat flattenerchan" + } + ] +} +``` + +This pipeline contains two plugins: +* The builtin `sysflowReader` plugin with flattener handler, which takes raw sysflow objects, and flattens them + into arrays of integers and strings for easier processing in certain plugins like the policy engine. +* The `example` plugin, which takes the flattened output from the sysflowreader plugin, and prints it the screen. + +The key item to note is that the output channel (i.e., `out`) of `sysflowreader` matches the input channel (i.e., `in`) of the example plugin. This ensures that the plugins will be properly stitched together. + +#### Build + +The `example` plugin is a custom plugin that illustrates how to implement a minimal plugin that reads the records from the input channel and logs them to the standard output. + +To run this example, in the root of sf-processor, build the processor and the example plugin. Note, this plugin's shared object is generated in `resources/plugins/example.so`. + +```bash +make build && make -C plugins/processors/example +``` + +Then, run: + +```bash +cd driver && ./sfprocessor -log=info -config=../plugins/processors/example/pipeline.example.json ../resources/traces/tcp.sf +``` + +#### Plugin builder + +To build the plugin for release, Go requires the code to be compiled with the exact package versions that the SysFlow processor was compiled with. The easiest way to achieve this is to use the pre-built `plugin-builder` Docker image in your build. This option also works for building plugins for deployment with the SysFlow binary packages. + +Below is an example of how this can be achieved. Set $TAG to a SysFlow release (>=0.4.0), `edge`, or `dev`. + +First, build the plugin: + +```bash +docker run --rm \ + -v $(pwd)/plugins:/go/src/github.com/sysflow-telemetry/sf-processor/plugins \ + -v $(pwd)/resources:/go/src/github.com/sysflow-telemetry/sf-processor/resources \ + sysflowtelemetry/plugin-builder:$TAG \ + make -C /go/src/github.com/sysflow-telemetry/sf-processor/plugins/processors/example +``` + +To test it, run the pre-built processor with the example configuration and trace. + +```bash +docker run --rm \ + -v $(pwd)/plugins:/usr/local/sysflow/plugins \ + -v $(pwd)/resources:/usr/local/sysflow/resources \ + -w /usr/local/sysflow/bin \ + --entrypoint=/usr/local/sysflow/bin/sfprocessor \ + sysflowtelemetry/sf-processor:$TAG \ + -log=info -config=../plugins/processors/example/pipeline.example.json ../resources/traces/tcp.sf +``` + +The output on the above pre-recorded trace should look like this: + +```plain +[Health] 2022/02/21 12:55:19 pipeline.go:246: Health checks: passed +[Info] 2022/02/21 12:55:19 main.go:147: Successfully loaded pipeline configuration +[Info] 2022/02/21 12:55:19 pipeline.go:170: Starting the processing pipeline +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./server, 13823 +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./client, 13824 +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./client, 13824 +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./server, 13823 +``` + +### Handler Plugins + +User-defined handler modules can be plugged to the built-in SysFlow `processor` plugin to implement custom data processing and analytic pipelines. + +#### Interface + +Handlers are implemented via the golang plugin mechanism. A handler must implement the following interface, defined in the `github.com/sysflow-telemetry/sf-apis/go/plugins` package. + +```go +// SFHandler defines the SysFlow handler interface. +type SFHandler interface { + RegisterChannel(pc SFPluginCache) + RegisterHandler(hc SFHandlerCache) + Init(conf map[string]interface{}) error + IsEntityEnabled() bool + HandleHeader(sf *CtxSysFlow, hdr *sfgo.SFHeader) error + HandleContainer(sf *CtxSysFlow, cont *sfgo.Container) error + HandleProcess(sf *CtxSysFlow, proc *sfgo.Process) error + HandleFile(sf *CtxSysFlow, file *sfgo.File) error + HandleNetFlow(sf *CtxSysFlow, nf *sfgo.NetworkFlow) error + HandleNetEvt(sf *CtxSysFlow, ne *sfgo.NetworkEvent) error + HandleFileFlow(sf *CtxSysFlow, ff *sfgo.FileFlow) error + HandleFileEvt(sf *CtxSysFlow, fe *sfgo.FileEvent) error + HandleProcFlow(sf *CtxSysFlow, pf *sfgo.ProcessFlow) error + HandleProcEvt(sf *CtxSysFlow, pe *sfgo.ProcessEvent) error + SetOutChan(ch []interface{}) + Cleanup() +} +``` + +Each `Handle*` function receives the current SysFlow record being processed along with its corresponding parsed record type. Custom processing code should be implemented using these functions. + +#### Build + +The `printer` handler is a pluggable handler that logs select SysFlow records to the standard output. This plugin doesn't define any output channels, so it acts as a plugin sink (last plugin in a pipeline). + +To run this example, in the root of sf-processor, build the processor and the handler plugin. Note, this plugin's shared object is generated in `resources/handlers/printer.so`. + +```bash +make build && make -C plugins/handlers/printer +``` + +Then, run: + +```bash +cd driver && ./sfprocessor -log=info -config=../plugins/handlers/printer/pipeline.printer.json ../resources/traces/tcp.sf +``` + +#### Plugin builder + +To build the plugin for release, Go requires the code to be compiled with the exact package versions that the SysFlow processor was compiled with. The easiest way to achieve this is to use the pre-built `plugin-builder` Docker image in your build. This option also works for building plugins for deployment with the SysFlow binary packages. + +Below is an example of how this can be achieved. Set $TAG to a SysFlow release (>=0.4.0), `edge`, or `dev`. + +First, build the plugin: + +```bash +docker run --rm \ + -v $(pwd)/plugins:/go/src/github.com/sysflow-telemetry/sf-processor/plugins \ + -v $(pwd)/resources:/go/src/github.com/sysflow-telemetry/sf-processor/resources \ + sysflowtelemetry/plugin-builder:$TAG \ + make -C /go/src/github.com/sysflow-telemetry/sf-processor/plugins/handlers/printer +``` + +To test it, run the pre-built processor with the example configuration and trace. + +```bash +docker run --rm \ + -v $(pwd)/plugins:/usr/local/sysflow/plugins \ + -v $(pwd)/resources:/usr/local/sysflow/resources \ + -w /usr/local/sysflow/bin \ + --entrypoint=/usr/local/sysflow/bin/sfprocessor \ + sysflowtelemetry/sf-processor:$TAG \ + -log=info -config=../plugins/handlers/printer/pipeline.printer.json ../resources/traces/tcp.sf +``` + +The output on the above pre-recorded trace should look like this: + +```plain +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./server, 13823 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./server, 3 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./server, 3 +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./client, 13824 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./client, 3 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./client, 3 +[Info] 2022/02/21 15:39:58 printer.go:94: NetworkFlow ./client, 8080 +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./client, 13824 +[Info] 2022/02/21 15:39:58 printer.go:94: NetworkFlow ./server, 8080 +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./server, 13823 +``` + +### Action Plugins + +User-defined actions can be plugged to SysFlow's Policy Engine rule declarations to perform additional processing on matched records. + +#### Interface + +Actions are implemented via the golang plugin mechanism. An action must implement the following interface, defined in the `github.com/sysflow-telemetry/sf-processor/core/policyengine/engine` package. + +```go +// Prototype of an action function +type ActionFunc func(r *Record) error + +// Action interface for user-defined actions +type Action interface { + GetName() string + GetFunc() ActionFunc +} +``` + +Actions have a name and an action function. Within a single policy engine instance, action names must be unique. User-defined actions cannot re-declare built-in actions. Reusing names of user-defined actions overwrites previously registered actions. + +The action function receives the current record as an argument and thus has access to all record attributes. The action result can be stored in the record context via the context modifier methods. + +#### Build + +The `now` action is a pluggable action that creates a tag containing the current time in nanosecond precision. + +First, in the root of sf-processor, build the processor and the action plugin. Note, this plugin's shared object is generated in `resources/actions/now.so`. + +```bash +make build && make -C plugins/actions/example +``` + +Then, run: + +```bash +cd driver && ./sfprocessor -log=quiet -config=../plugins/actions/example/pipeline.actions.json ../resources/traces/tcp.sf +``` + +#### Plugin builder + +To build the plugin for release, Go requires the code to be compiled with the exact package versions that the SysFlow processor was compiled with. The easiest way to achieve this is to use the pre-built `plugin-builder` Docker image in your build. This option also works for building plugins for deployment with the SysFlow binary packages. + +Below is an example of how this can be achieved. Set $TAG to a SysFlow release (>=0.4.0), `edge`, or `dev`. + +First, build the plugin: + +```bash +docker run --rm \ + -v $(pwd)/plugins:/go/src/github.com/sysflow-telemetry/sf-processor/plugins \ + -v $(pwd)/resources:/go/src/github.com/sysflow-telemetry/sf-processor/resources \ + sysflowtelemetry/plugin-builder:$TAG \ + make -C /go/src/github.com/sysflow-telemetry/sf-processor/plugins/actions/example +``` + +To test it, run the pre-built processor with the example configuration and trace. + +```bash +docker run --rm \ + -v $(pwd)/plugins:/usr/local/sysflow/plugins \ + -v $(pwd)/resources:/usr/local/sysflow/resources \ + -w /usr/local/sysflow/bin \ + --entrypoint=/usr/local/sysflow/bin/sfprocessor \ + sysflowtelemetry/sf-processor:$TAG \ + -log=quiet -config=../plugins/actions/example/pipeline.actions.json ../resources/traces/tcp.sf +``` + +In the output, observe that all records matching the policy speficied in `pipeline.actions.json` are tagged by action `now` with the tag `now_in_nanos`. For example: + +```plain +{ + "version": 4, + "endts": 0, + "opflags": [ + "EXEC" + ], + ... + "policies": [ + { + "id": "Action example", + "desc": "user-defined action example", + "priority": 0 + } + ], + "tags": [ + "now_in_nanos:1645409122055957900" + ] +} +``` diff --git a/docs/POLICIES.md b/docs/POLICIES.md new file mode 100644 index 00000000..57a9e349 --- /dev/null +++ b/docs/POLICIES.md @@ -0,0 +1,189 @@ +## Policy Language + +The policy engine adopts and extends the Falco rules definition syntax. Before reading the rest of this section, please go through the [Falco Rules](https://falco.org/docs/rules/) documentation to get familiar with _rule_, _macro_, and _list_ syntax, all of which are supported in our policy engine. Policies are written in one or more `yaml` files, and stored in a directory specified in the pipeline configuration file under the `policies` attribute of the policy engine plugin. + +*Rules* contain the following fields: + +- _rule_: the name of the rule +- _description_: a textual description of the rule +- _condition_: a set of logical operations that can reference lists and macros, which when evaluating to _true_, can trigger record enrichment or alert creation (depending on the policy engine mode) +- _action_: a comma-separated list of actions to take place when the rule evaluates to _true_. For a particular rule, actions are evaluated in the order they are specified, i.e., an action can make use of the results provided by earlier actions. An action is just the name of an action function without any parameters. The current version only supports plugable user-defined actions. See [here](#user-defined-actions) for a detailed description of the plugin interface and a sample action plugin. +- _priority_: label representing the severity of the alert can be: (1) low, medium, or high, or (2) emergency, alert, critical, error, warning, notice, informational, debug. +- _tags_ (optional): set of labels appended to alert (default: empty). +- _prefilter_ (optional): list of record types (`sf.type`) to whitelist before applying rule condition (default: empty). +- _enabled_ (optional): indicates whether the rule is enabled (default: true). + +> **NOTE:** The syntax of the policy language changed slighly with the switch to release 0.4.0. For migrating policy files used with prior releases to release 0.4.0 or higher, simply remove all `action: [tag]` lines. As of release 0.4.0, tagging is done automatically. If a rule triggers all tags specified via the _tags_ key will be appended to the record. The _action_ key is reserved for specifying user-defined action plugins.

+ +*Macros* are named conditions and contain the following fields: + +- _macro_: the name of the macro +- _condition_: a set of logical operations that can reference lists and macros, which evaluate to _true_ or _false_ + +*Lists* are named collections and contain the following fields: + +- _list_: the name of the list +- _items_: a collection of values or lists + +*Drop* rules block records matching a condition and can be used for reducing the amount of records processed by the policy engine: + +- _drop_: the name of the filter +- _condition_: a set of logical operations that can reference lists and macros, which evaluate to _true_ or _false_ + +For example, the rule below specifies that matching records are process events (`sf.type = PE`), denoting `EXEC` operations (`sf.opflags = EXEC`) for which the process matches macro `package_installers`. Additionally, the global filter `containers` preemptively removes from the processing stream any records for processes not running in a container environment. + +```yaml +# lists +- list: rpm_binaries + items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, subscription-ma, + repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, + abrt-action-sav, rpmdb_stat, microdnf, rhn_check, yumdb] + +- list: deb_binaries + items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, + frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, + apt-listchanges, unattended-upgr, apt-add-reposit] + +- list: package_mgmt_binaries + items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, pip3, sane-utils.post, alternatives, chef-client] + +# macros +- macro: package_installers + condition: sf.proc.name pmatch (package_mgmt_binaries) + +# global filters (blacklisting) +- filter: containers + condition: sf.container.type = host + +# rule definitions +- rule: Package installer detected + desc: Use of package installer detected + condition: sf.opflags = EXEC and package_installers + priority: medium + tags: [actionable-offense, suspicious-process] + prefilter: [PE] # record types for which this rule should be applied (whitelisting) + enabled: true +``` + +### Attribute names + +The following table shows a detailed list of attribute names supported by the policy engine, as well as their +type, and comparative Falco attribute name. Our policy engine supports both SysFlow and Falco attribute naming convention to enable reuse of policies across the two frameworks. + +| Attributes | Description | Values | Falco Attribute | +|:----------------|:-----------------|:------|----------| +| sf.type | Record type | PE,PF,NF,FF,FE,KE | N/A | +| sf.opflags | Operation flags | [Operation Flags List](https://sysflow.readthedocs.io/en/latest/spec.html#operation-flags): remove `OP_` prefix | evt.type (remapped as falco event types) | +| sf.ret | Return code | int | evt.res | +| sf.ts | start timestamp(ns)| int64 | evt.time | +| sf.endts | end timestamp(ns) | int64 | N/A | +| sf.proc.pid | Process PID | int64 | proc.pid | +| sf.proc.tid | Thread PID | int64 | thread.tid | +| sf.proc.uid | Process user ID | int | user.uid | +| sf.proc.user | Process user name | string | user.name | +| sf.proc.gid | Process group ID | int | group.gid | +| sf.proc.group | Process group name | string | group.name | +| sf.proc.apid | Proc ancestors PIDs (qo) | int64 | proc.apid | +| sf.proc.aname | Proc anctrs names (qo) (exclude path) | string | proc.aname | +| sf.proc.exe | Process command/filename (with path) | string | proc.exe | +| sf.proc.args | Process command arguments | string | proc.args | +| sf.proc.name | Process name (qo) (exclude path) | string | proc.name | +| sf.proc.cmdline | Process command line (qo) | string | proc.cmdline | +| sf.proc.tty | Process TTY status | boolean | proc.tty | +| sf.proc.entry | Process container entrypoint | bool | proc.vpid == 1 | +| sf.proc.createts | Process creation timestamp (ns) | int64 | N/A | +| sf.pproc.pid | Parent process ID | int64 | proc.ppid | +| sf.pproc.gid | Parent process group ID | int64 | N/A | +| sf.pproc.uid | Parent process user ID | int64 | N/A | +| sf.pproc.group | Parent process group name | string | N/A | +| sf.pproc.tty | Parent process TTY status | bool | N/A | +| sf.pproc.entry | Parent process container entry | bool | N/A | +| sf.pproc.user | Parent process user name | string | N/A | +| sf.pproc.exe | Parent process command/filename | string | N/A | +| sf.pproc.args | Parent process command arguments | string | N/A | +| sf.pproc.name | Parent process name (qo) (no path) | string | proc.pname | +| sf.pproc.cmdline | Parent process command line (qo) | string | proc.pcmdline | +| sf.pproc.createts | Parent process creation timestamp | int64 | N/A | +| sf.file.fd | File descriptor number | int | fd.num | +| sf.file.path | File path | string | fd.name | +| sf.file.newpath | New file path (used in some FileEvents) | string | N/A | +| sf.file.name | File name (qo) | string | fd.filename | +| sf.file.directory | File directory (qo) | string | fd.directory | +| sf.file.type | File type | char 'f': file, 4: IPv4, 6: IPv6, 'u': unix socket, 'p': pipe, 'e': eventfd, 's': signalfd, 'l': eventpoll, 'i': inotify, 'o': unknown. | fd.typechar | +| sf.file.is_open_write | File open with write flag (qo) | bool | evt.is_open_write | +| sf.file.is_open_read | File open with read flag (qo) | bool | evt.is_open_read | +| sf.file.openflags | File open flags | int | evt.args | +| sf.net.proto | Network protocol | int | fd.l4proto | +| sf.net.sport | Source port | int | fd.sport | +| sf.net.dport | Destination port | int | fd.dport | +| sf.net.port | Src or Dst port (qo) | int | fd.port | +| sf.net.sip | Source IP | int | fd.sip | +| sf.net.dip | Destination IP | int | fd.dip | +| sf.net.ip | Src or dst IP (qo) | int | fd.ip | +| sf.res | File or network resource | string | fd.name | +| sf.flow.rbytes | Flow bytes read/received | int64 | evt.res | +| sf.flow.rops | Flow operations read/received | int64 | N/A | +| sf.flow.wbytes | Flow bytes written/sent | int64 | evt.res | +| sf.flow.wops | Flow bytes written/sent | int64 | N/A | +| sf.container.id | Container ID | string | container.id | +| sf.container.name | Container name | string | container.name | +| sf.container.image.id | Container image ID | string | container.image.id | +| sf.container.image | Container image name | string | container.image | +| sf.container.type | Container type | CT_DOCKER, CT_LXC, CT_LIBVIRT_LXC, CT_MESOS, CT_RKT, CT_CUSTOM, CT_CRI, CT_CONTAINERD, CT_CRIO, CT_BPM | container.type | +| sf.container.privileged | Container privilege status | bool | container.privileged | +| sf.pod.ts | Pod creation timestamp | int | N/A | +| sf.pod.id | Pod id | string | N/A | +| sf.pod.name | Pod name | string | N/A | +| sf.pod.nodename | Pod node name | string | N/A | +| sf.pod.namespace | Pod namespace | string | N/A | +| sf.pod.restartcnt | Pod restart count | int | N/A | +| sf.pod.hostip | Pod host IP addresses | json | N/A | +| sf.pod.internalip | Pod internal IP addresses| json | N/A | +| sf.pod.services | Pod services | json | N/A | +| sf.ke.action | Kubernetes event action | K8S_COMPONENT_ADDED, K8S_COMPONENT_MODIFIED, K8S_COMPONENT_DELETED, K8S_COMPONENT_ERROR, K8S_COMPONENTNONEXISTENT, K8S_COMPONENT_UNKNOWN | N/A | +| sf.ke.kind | Kubernetes event resource type | K8S_NODES, K8S_NAMESPACES, K8S_PODS, K8S_REPLICATIONCONTROLLERS, K8S_SERVICES, K8S_EVENTS, K8S_REPLICASETS, K8S_DAEMONSETS, K8S_DEPLOYMENT, K8S_UNKNOWN | N/A | +| sf.ke.message | Kubernetes event json message | json | N/A | +| sf.node.id | Node identifier | string | N/A | +| sf.node.ip | Node IP address | string | N/A | +| sf.schema.version | SysFlow schema version | string | N/A | +| sf.version | SysFlow JSON schema version | int | N/A | + +###$ Jsonpath Expressions + +Unlike attributes of the scalar types bool, int(64), and string, attributes of type `json` contain structured information in form of stringified json records. The policy language allows access to subfields inside such json records via [GJSON](github.com/tidwall/gjson) jsonpath expressions. The jsonpath expression must be specified as a suffix to the attribute enclosed in square brackets. Examples of such terms are: + +``` +sf.pod.services[0.clusterip.0] - the first cluster IP address of the first service associated with a pod +sf.ke.message[items.0.namespace] - the namespace of the first item in a KE message attribute +``` + +See the [GJSON path synax](https://github.com/tidwall/gjson#path-syntax) for more details. The result of applying a jsonpath expression to a json attribute is always of type string. + +### Operations + +The policy language supports the following operations: + +| Operation | Description | Example | +|:----------|:------------|:--------| +| A and B | Returns true if both statements are true | sf.pproc.name=bash and sf.pproc.cmdline contains echo | +| A or B | Returns true if one of the statements are true | sf.file.path = "/etc/passwd" or sf.file.path = "/etc/shadow" | +| not A | Returns true if the statement isn't true | not sf.pproc.exe = /usr/local/sbin/runc | +| A = B| Returns true if A exactly matches B. Note, if B is a list, A only has to exact match one element of the list. If B is a list, it must be explicit. It cannot be a variable. If B is a variable use `in` instead. | sf.file.path = ["/etc/passwd", "/etc/shadow"] | +| A != B| Returns true if A is not equal to B. Note, if B is a list, A only has to be not equal to one element of the list. If B is a list, it must be explicit. It cannot be a variable. | sf.file.path != "/etc/passwd"| +| A < B | Returns true if A is less than B. Note, if B is a list, A only has to be less than one element in the list. If B is a list, it must be explicit. It cannot be a variable. | sf.flow.wops < 1000 | +| A <= B | Returns true if A is less than or equal to B. Note, if B is a list, A only has to be less than or equal to one element in the list. If B is a list, it must be explicit. It cannot be a variable. | sf.flow.wops <= 1000 | +| A > B | Returns true if A is greater than B. Note, if B is a list, A only has to be greater than one element in the list. If B is a list, it must be explicit. It cannot be a variable. | sf.flow.wops > 1000 | +| A >= B | Returns true if A is greater than or equal to B. Note, if B is a list, A only has to be greater than or equal to one element in the list. If B is a list, it must be explicit. It cannot be a variable. | sf.flow.wops >= 1000 | +| A in B | Returns true if value A is an exact match to one of the elements in list B. Note: B must be a list. Note: () can be used on B to merge multiple list objects into one list. | sf.proc.exe in (bin_binaries, usr_bin_binaries) | +| A startswith B | Returns true if string A starts with string B | sf.file.path startswith '/home' | +| A endswith B | Returns true if string A ends with string B | sf.file.path endswith '.json' | +| A contains B | Returns true if string A contains string B | sf.pproc.name=java and sf.pproc.cmdline contains org.apache.hadoop | +| A icontains B | Returns true if string A contains string B ignoring capitalization | sf.pproc.name=java and sf.pproc.cmdline icontains org.apache.hadooP | +| A pmatch B | Returns true if string A partial matches one of the elements in B. Note: B must be a list. Note: () can be used on B to merge multiple list objects into one list. | sf.proc.name pmatch (modify_passwd_binaries, verify_passwd_binaries, user_util_binaries) | +| exists A | Checks if A is not a zero value (i.e. 0 for int, "" for string)| exists sf.file.path | + +See the resources policies directory in [github](https://github.com/sysflow-telemetry/sf-processor/tree/master/resources/policies) for examples. Feel free to contribute new and interesting rules through a github pull request. + +### User-defined Actions + +User-defined actions are implemented via the golang plugin mechanism. Check the documentation on [Action Plugins](https://sysflow.readthedocs.io/en/latest/processor.html#action-plugins) for a custom action plugin example. diff --git a/driver/go.mod b/driver/go.mod index 242d9adc..2842996b 100644 --- a/driver/go.mod +++ b/driver/go.mod @@ -4,6 +4,7 @@ // Authors: // Frederico Araujo // Teryl Taylor +// Andreas Schade // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,34 +18,73 @@ // See the License for the specific language governing permissions and // limitations under the License. // -module github.ibm.com/sysflow/sf-processor/driver +module github.com/sysflow-telemetry/sf-processor/driver -go 1.14 +go 1.19 require ( - github.com/actgardner/gogen-avro/v7 v7.1.1 - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/kr/pretty v0.2.0 // indirect + github.com/actgardner/gogen-avro/v7 v7.3.1 github.com/linkedin/goavro v2.1.0+incompatible - github.com/mitchellh/mapstructure v1.2.2 // indirect - github.com/pelletier/go-toml v1.7.0 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/paulbellamy/ratecounter v0.2.0 + github.com/spf13/viper v1.10.1 + github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 + github.com/sysflow-telemetry/sf-processor/core v0.0.0-20220221021811-25c7181c2904 +) + +require ( + github.com/IBM/go-sdk-core/v5 v5.9.2 // indirect + github.com/IBM/scc-go-sdk/v3 v3.1.5 // indirect + github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 // indirect + github.com/alecthomas/participle v0.7.1 // indirect + github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0 // indirect + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect + github.com/bradleyjkemp/sigma-go v0.5.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/elastic/go-elasticsearch/v8 v8.0.0-20210427093042-01613f93a7ae // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/go-openapi/errors v0.19.8 // indirect + github.com/go-openapi/strfmt v0.21.1 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-stack/stack v1.8.0 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/satta/gommunityid v0.0.0-20210315182841-1cdcb73ce408 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.4.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.6.3 - github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201029230226-b00c1f55c6b0 - github.ibm.com/sysflow/sf-processor/core v0.0.0-20201027025951-2690acc0c563 - golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gopkg.in/ini.v1 v1.55.0 // indirect - gopkg.in/linkedin/goavro.v1 v1.0.5 // indirect + github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 // indirect + github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/tidwall/gjson v1.14.1 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + go.mongodb.org/mongo-driver v1.7.5 // indirect + golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + gopkg.in/go-playground/validator.v9 v9.31.0 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) -replace github.ibm.com/sysflow/sf-processor/core => ../core +replace github.com/sysflow-telemetry/sf-processor/core => ../core replace ( github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 - github.com/fsnotify/fsevents => github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 - github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v0.0.0-20180417234312-c9bbe1f46f1d ) diff --git a/driver/go.sum b/driver/go.sum index 8b201703..f99810e7 100644 --- a/driver/go.sum +++ b/driver/go.sum @@ -1,304 +1,334 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/IBM/go-sdk-core/v5 v5.7.0/go.mod h1:+YbdhrjCHC84ls4MeBp+Hj4NZCni+tDAc0XQUqRO9Jc= +github.com/IBM/go-sdk-core/v5 v5.9.2 h1:QKB5JwhlZfRvFHqcOwMeu/Dis/Q7qCBxrQLhx04onMc= +github.com/IBM/go-sdk-core/v5 v5.9.2/go.mod h1:YlOwV9LeuclmT/qi/LAK2AsobbAP42veV0j68/rlZsE= +github.com/IBM/scc-go-sdk/v3 v3.1.5 h1:6JUivsaIb32NorA0+Fb/KsI6MSZaoDS8BFgtl/wyjh4= +github.com/IBM/scc-go-sdk/v3 v3.1.5/go.mod h1:cBxkth9AIOcKQx4Gy9bWgyGYa7vYwHAalUBvY+O8xAE= +github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= +github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= +github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 h1:vX+gnvBc56EbWYrmlhYbFYRaeikAke1GL84N4BEYOFE= github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ= -github.com/actgardner/gogen-avro v6.5.0+incompatible h1:P73NiZR/S0lBWQDkK6mbvdgBXRc6e0/AaaSTqu/AvLI= -github.com/actgardner/gogen-avro v6.5.0+incompatible/go.mod h1:N2PzqZtS+5w9xxGp2daeykhWdTL0lBiRhbbvkVj4Yd8= -github.com/actgardner/gogen-avro/v7 v7.1.1 h1:fAKfqQNIDIXq4Pwop3Fqu+0Tym5PuAX/cMVbdEIuVdM= -github.com/actgardner/gogen-avro/v7 v7.1.1/go.mod h1:DALbHv5zAeoz7KJ/fPAvl+d8Ixcy6x8Fjo+PO0YM8mU= -github.com/adriansr/fsnotify v0.0.0-20180417234312-c9bbe1f46f1d h1:g0M6kedfjDpyAAuxqBvJzMNjFzlrQ7Av6LCDFqWierk= -github.com/adriansr/fsnotify v0.0.0-20180417234312-c9bbe1f46f1d/go.mod h1:VykaKG/ofkKje+MSvqjrDsz1wfyHIvEVFljhq2EOZ4g= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/actgardner/gogen-avro/v7 v7.3.1 h1:6JJU3o7168lcyIB6uXYyYdflCsJT3aMFKZPSpSc4toI= +github.com/actgardner/gogen-avro/v7 v7.3.1/go.mod h1:1d45RpDvI29sU7l9wUxlRTEglZSdQSbd6bDbWJaEMgo= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/participle v0.7.1 h1:2bN7reTw//5f0cugJcTOnY/NYZcWQOaajW+BwZB5xWs= +github.com/alecthomas/participle v0.7.1/go.mod h1:HfdmEuwvr12HXQN44HPWXR0lHmVolVYe4dyL6lQ3duY= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0 h1:j7MyDjg6pb7A2ziow17FDZ2Oj5vGnJsLyDmjpN4Jkcg= github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/bradleyjkemp/cupaloy/v2 v2.6.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= +github.com/bradleyjkemp/sigma-go v0.5.1 h1:2a747+swYse4KfIvLRCg49q118MSONk5+W/JeGM40cc= +github.com/bradleyjkemp/sigma-go v0.5.1/go.mod h1:ZiTmCLylS8LOQPm1/2FuNDlSteiWwuHWScE69vOhh8c= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/enriquebris/goconcurrentqueue v0.6.0 h1:DJ97cgoPVoqlC4tTGBokn/omaB3o16yIs5QdAm6YEjc= -github.com/enriquebris/goconcurrentqueue v0.6.0/go.mod h1:wGJhQNFI4wLNHleZLo5ehk1puj8M6OIl0tOjs3kwJus= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210427093042-01613f93a7ae h1:sZOzFMm2XxvAO0hwo0k1XUyKusaUedme7rnUMXF96zs= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210427093042-01613f93a7ae/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= +github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.21.1 h1:G6s2t5V5kGCHLVbSdZ/6lI8Wm4OzoPFkc3/cjAsKQrM= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= +github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/josephspurrier/goversioninfo v0.0.0-20200309025242-14b0ab84c6ca/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/johnstarich/go/gopages v0.1.8/go.mod h1:OaSRjfHdFfN+LS7u6xqgNO7C2Uxjlvpm17DcKcvLBhY= +github.com/johnstarich/go/pipe v0.2.0/go.mod h1:3X9IdVJJnI7pkpzEH6np98wqHl55zFmbilKG+9+koMo= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/linkedin/goavro v2.1.0+incompatible h1:DV2aUlj2xZiuxQyvag8Dy7zjY69ENjS66bWkSfdpddY= github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= -github.com/linkedin/goavro/v2 v2.9.7/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= -github.com/mattn/go-colorable v0.0.8/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= -github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw= github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= +github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/satta/gommunityid v0.0.0-20210315182841-1cdcb73ce408 h1:l1nqzjjPpj99dxtQizYjbzvIf2RBHneeuOoka3G7Lu4= +github.com/satta/gommunityid v0.0.0-20210315182841-1cdcb73ce408/go.mod h1:dz6UCF9ERHtGjdv5LwOTgZxng/7IZm2spR/mXtTpLjc= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs= -github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201026195524-bd9cb63ccc96 h1:XbL0XPH5OZMVH5d0RhM0g9VXOKclsy9hVUh6+cem73c= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201026195524-bd9cb63ccc96/go.mod h1:rpTKky267xtopNUCoInTEZiaFkOrawhzE0HaZMEvIAI= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201029230226-b00c1f55c6b0 h1:ezn2cJhqCZTilHOX8S5botGSLf54V+PboDFsgXNr32M= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201029230226-b00c1f55c6b0/go.mod h1:rpTKky267xtopNUCoInTEZiaFkOrawhzE0HaZMEvIAI= -github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= -github.ibm.com/sysflow/goutils v0.0.0-20200619144433-a13c12f45010 h1:XGJoAghFe4nG7oXWhGrnY5PAs2eaf+TuugOHnvsXD7I= -github.ibm.com/sysflow/goutils v0.0.0-20200619144433-a13c12f45010/go.mod h1:+n3eFg2LmQeYUF3HVggFkeEDWGM2ZP8jnyXAicoYMqM= -github.ibm.com/sysflow/sf-processor/core v0.0.0-20201027025951-2690acc0c563 h1:4wBB0p2ziWgG6rkQmz1N5xKyeJ/+QGv6rmFfAnvpFAM= -github.ibm.com/sysflow/sf-processor/core v0.0.0-20201027025951-2690acc0c563/go.mod h1:ECpqHM00b9hruN6vA+SdVNam/NZ2iwem//fOnfBwmVM= -go.elastic.co/apm v1.7.2/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= -go.elastic.co/apm/module/apmelasticsearch v1.7.2/go.mod h1:ZyNFuyWdt42GBZkz0SogoLzDBrBGj4orxpiUuxYeYq8= -go.elastic.co/apm/module/apmhttp v1.7.2/go.mod h1:sTFWiWejnhSdZv6+dMgxGec2Nxe/ZKfHfz/xtRM+cRY= -go.elastic.co/ecszap v0.1.1-0.20200424093508-cdd95a104193/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= -go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 h1:ZxzwimQe2R4kYorqS33/l+m/+SXWMzPn1cLtpA1ExA0= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300/go.mod h1:rvE0WXuIQmACykrVpAKxP5Crf/7KvZplUTULATmAuf4= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.7.5 h1:ny3p0reEpgsR2cfA5cjgwFZg3Cv/ofFh/8jbhGtz9VI= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210315170653-34ac3e1c2000/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20201021000207-d49c4edd7d96/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ= -gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/linkedin/goavro.v1 v1.0.5 h1:BJa69CDh0awSsLUmZ9+BowBdokpduDZSM9Zk8oKHfN4= -gopkg.in/linkedin/goavro.v1 v1.0.5/go.mod h1:Aw5GdAbizjOEl0kAMHV9iHmA8reZzW/OKuJAl4Hb9F0= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= +gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/driver/main.go b/driver/main.go index 542ee333..8e57d82e 100644 --- a/driver/main.go +++ b/driver/main.go @@ -1,4 +1,3 @@ -// // Copyright (C) 2020 IBM Corporation. // // Authors: @@ -9,20 +8,18 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// package main import ( "flag" "fmt" - "log" "os" "os/signal" "runtime" @@ -33,8 +30,8 @@ import ( "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/plugins" "github.com/sysflow-telemetry/sf-apis/go/sfgo" - "github.ibm.com/sysflow/sf-processor/driver/manifest" - "github.ibm.com/sysflow/sf-processor/driver/pipeline" + "github.com/sysflow-telemetry/sf-processor/driver/manifest" + "github.com/sysflow-telemetry/sf-processor/driver/pipeline" ) var pl plugins.SFPipeline @@ -51,24 +48,30 @@ func initSigTerm() { }() } -func main() { +func main() { os.Exit(run()) } + +func run() int { // setup interruption handler initSigTerm() // setup arg parsing - inputType := flag.String("driver", "file", fmt.Sprintf("Driver name {file|socket|}")) + inputType := flag.String("driver", "", fmt.Sprintf("Driver name {file|socket|}")) cpuprofile := flag.String("cpuprofile", "", "Write cpu profile to `file`") memprofile := flag.String("memprofile", "", "Write memory profile to `file`") traceprofile := flag.String("traceprofile", "", "Write trace profile to `file`") configFile := flag.String("config", "pipeline.json", "Path to pipeline configuration file") - logLevel := flag.String("log", "info", "Log level {trace|info|warn|error}") + logLevel := flag.String("log", "info", "Log level {trace|info|warn|error|health|quiet}") + perflog := flag.Bool("perflog", false, "Enable performance logging") driverDir := flag.String("driverdir", pipeline.DriverDir, "Dynamic driver directory") pluginDir := flag.String("plugdir", pipeline.PluginDir, "Dynamic plugins directory") - version := flag.Bool("version", false, "Outputs version information") + test := flag.Bool("test", false, "Test pipeline configuration") + version := flag.Bool("version", false, "Output version information") flag.Usage = func() { - fmt.Println("Usage: sfprocessor [[-version]|[-driver ] [-log ] [-driverdir ] [-plugdir ] path]") + fmt.Println(`Usage: sfprocessor [-version + |-test [-log ] [-config ] [-driverdir ] [-plugdir ]] + |[-driver ] [-log ] [-perflog] [-config ] [-driverdir ] [-plugdir ] [-cpuprofile ] [-memprofile ] [-traceprofile ] path]`) fmt.Println() fmt.Println("Positional arguments:") fmt.Println(" path string\n\tInput path") @@ -78,11 +81,11 @@ func main() { fmt.Println() } - // parse args and validade positional args + // parse args and validate positional args flag.Parse() - if !*version && flag.NArg() < 1 { + if !*version && !*test && flag.NArg() < 1 { flag.Usage() - os.Exit(1) + return 1 } // prints version information and exits @@ -90,41 +93,41 @@ func main() { hdr := sfgo.NewSFHeader() hdr.SetDefault(0) schemaVersion := hdr.Version - fmt.Printf("Version: %s+%s, Avro Schema Version: %v, Export Schema Version: %v\n", manifest.Version, manifest.BuildNumber, schemaVersion, manifest.JSONSchemaVersion) - os.Exit(0) + fmt.Printf("Version: %s+%s, Avro Schema Version: %v, Export Schema Version: %v (JSON), %v (ECS)\n", manifest.Version, manifest.BuildNumber, schemaVersion, manifest.JSONSchemaVersion, manifest.EcsVersion) //nolint:typecheck + return 0 } - // retrieve positional args - path := flag.Arg(0) - - // initialize logger + // initialize loggers logger.InitLoggers(logger.GetLogLevelFromValue(*logLevel)) + logger.SetPerfLogger(*perflog) // CPU profiling if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { - log.Fatal("Could not create CPU profile: ", err) + logger.Error.Println("Could not create CPU profile: ", err) + return 1 } defer f.Close() // error handling omitted if err := pprof.StartCPUProfile(f); err != nil { - log.Fatal("Could not start CPU profile: ", err) + logger.Error.Println("Could not start CPU profiling: ", err) + return 1 } defer pprof.StopCPUProfile() } + // Trace profiling if *traceprofile != "" { f, err := os.Create(*traceprofile) if err != nil { - log.Fatal("Could not create Trace profile: ", err) - panic(err) + logger.Error.Println("Could not create trace profile: ", err) + return 1 } - defer f.Close() - + defer f.Close() // error handling omitted err = trace.Start(f) if err != nil { - log.Fatal("Could not create Trace profile: ", err) - panic(err) + logger.Error.Println("Could not start trace profiling: ", err) + return 1 } defer trace.Stop() } @@ -133,30 +136,44 @@ func main() { pl = pipeline.New(*driverDir, *pluginDir, *configFile) err := pl.Load(*inputType) if err != nil { - logger.Error.Println("Unable to load pipeline error: " + err.Error()) - return + logger.Error.Println("Unable to load pipeline error: ", err.Error()) + return 1 } // log summary of loaded pipeline pl.Print() + // log success status for pipeline configuration + logger.Info.Println("Successfully loaded pipeline configuration") + + // exit if testing configuration + if *test { + return 0 + } + + // retrieve positional args + path := flag.Arg(0) + // initialize the pipeline err = pl.Init(path) if err != nil { - logger.Error.Println("Error caught while starting the pipeline: " + err.Error()) - return + logger.Error.Println("Error caught while starting the pipeline: ", err.Error()) + return 1 } // memory profiling if *memprofile != "" { f, err := os.Create(*memprofile) if err != nil { - log.Fatal("Could not create memory profile: ", err) + logger.Error.Println("Could not create memory profile: ", err) + return 1 } defer f.Close() // error handling omitted runtime.GC() // get up-to-date statistics if err := pprof.WriteHeapProfile(f); err != nil { - log.Fatal("Could not write memory profile: ", err) + logger.Error.Println("Could not write memory profile: ", err) + return 1 } } + return 0 } diff --git a/driver/manifest/manifest.go.in b/driver/manifest/manifest.go.in index ab9fa9b2..1569c48d 100644 --- a/driver/manifest/manifest.go.in +++ b/driver/manifest/manifest.go.in @@ -16,13 +16,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package manifest provides manifest metadata for SysFlow processor. package manifest // Manifest constants. Do not edit it. Values are replaced by make. const ( Version = "SYSFLOW_VERSION" JSONSchemaVersion = "JSON_SCHEMA_VERSION" + EcsVersion = "ECS_VERSION" BuildNumber = "BUILD_NUMBER" ) @@ -30,5 +32,6 @@ const ( const ( VersionKey string = "version" JSONSchemaVersionKey string = "jsonschemaversion" + EcsVersionKey string = "ecsversion" BuildNumberKey string = "buildnumber" ) diff --git a/driver/manifest/package.go b/driver/manifest/package.go new file mode 100644 index 00000000..9a77dae9 --- /dev/null +++ b/driver/manifest/package.go @@ -0,0 +1,2 @@ +// Package manifest provides manifest metadata for SysFlow processor. +package manifest diff --git a/driver/pipeline/config.go b/driver/pipeline/config.go index 69e058b7..eedae451 100644 --- a/driver/pipeline/config.go +++ b/driver/pipeline/config.go @@ -16,13 +16,24 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package pipeline implements a pluggable data processing pipeline infrastructure. package pipeline import ( + "os" "strconv" + "strings" + "time" + + "github.com/sysflow-telemetry/sf-apis/go/sfgo" + "github.com/sysflow-telemetry/sf-processor/core/exporter/commons" + "github.com/sysflow-telemetry/sf-processor/driver/manifest" +) - "github.ibm.com/sysflow/sf-processor/driver/manifest" +// Global config variables +const ( + ClusterIDEnvKey string = "CLUSTER_ID" ) // Config attributes @@ -32,33 +43,39 @@ const ( HdlConfig string = "handler" InChanConfig string = "in" OutChanConfig string = "out" + DrivConfig string = "driver" ) // Driver constants/defaults const ( - SockFile = "/var/run/sysflow.sock" - BuffSize = 16384 - OOBuffSize = 1024 - DriverDir = "../resources/drivers" - PluginDir = "../resources/plugins" - ChanSize = 100000 + SockFile = "/var/run/sysflow.sock" + BuffSize = 16384 + OOBuffSize = 1024 + DriverDir = "../resources/drivers" + PluginDir = "../resources/plugins" + ChanSize = 100000 + HealthChecksTimeout = 10 * time.Second ) -type inputType int - // PluginConfig defines a map for plugin configuration -type PluginConfig map[string]string +type PluginConfig map[string]interface{} + +// DriverConfig defines a map for plugin configuration +type DriverConfig map[string]interface{} // Config defines a pipeline configuration object type Config struct { + Drivers []DriverConfig `json,mapstructures:"drivers"` Pipeline []PluginConfig `json,mapstructures:"pipeline"` } // setManifestInfo sets manifest attributes to plugins configuration items. func setManifestInfo(conf *Config) { - addGlobalConfigItem(conf, manifest.VersionKey, manifest.Version) - addGlobalConfigItem(conf, manifest.JSONSchemaVersionKey, manifest.JSONSchemaVersion) - addGlobalConfigItem(conf, manifest.BuildNumberKey, manifest.BuildNumber) + addGlobalConfigItem(conf, manifest.VersionKey, manifest.Version) //nolint:typecheck + addGlobalConfigItem(conf, manifest.JSONSchemaVersionKey, manifest.JSONSchemaVersion) //nolint:typecheck + addGlobalConfigItem(conf, manifest.EcsVersionKey, manifest.EcsVersion) //nolint:typecheck + addGlobalConfigItem(conf, manifest.BuildNumberKey, manifest.BuildNumber) //nolint:typecheck + addGlobalConfigItem(conf, commons.ClusterIDKey, getEnv(ClusterIDEnvKey)) } // addGlobalConfigItem adds a config item to all processors in the pipeline. @@ -73,3 +90,14 @@ func addGlobalConfigItem(conf *Config, k string, v interface{}) { } } } + +// getEnv retrieves the environment varible for a key. +func getEnv(k string) string { + for _, e := range os.Environ() { + pair := strings.SplitN(e, "=", 2) + if pair[0] == k && len(pair) == 2 { + return pair[1] + } + } + return sfgo.Zeros.String +} diff --git a/driver/pipeline/pipeline.go b/driver/pipeline/pipeline.go index 0e2075bd..7577b177 100644 --- a/driver/pipeline/pipeline.go +++ b/driver/pipeline/pipeline.go @@ -16,10 +16,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package pipeline implements a pluggable data processing pipeline infrastructure. package pipeline import ( + "context" "errors" "fmt" "sync" @@ -31,7 +33,7 @@ import ( // Pipeline represents a loaded plugin pipeline type Pipeline struct { wg *sync.WaitGroup - driver plugins.SFDriver + drivers []plugins.SFDriver processors []plugins.SFProcessor channels []interface{} handlers []plugins.SFHandler @@ -79,43 +81,45 @@ func (pl *Pipeline) AddChannel(channelName string, channel interface{}) { // Load loads and enables the pipeline func (pl *Pipeline) Load(driverName string) error { - if err := pl.pluginCache.LoadDrivers(pl.driverDir); err != nil { - logger.Error.Println("Unable to load dynamic driver: ", err) - return err - } - if err := pl.pluginCache.LoadPlugins(pl.pluginDir); err != nil { - logger.Error.Println("Unable to load dynamic plugins: ", err) - return err - } conf, err := pl.pluginCache.GetConfig() if err != nil { logger.Error.Println("Unable to load pipeline config: ", err) return err } setManifestInfo(conf) - if pl.driver, err = pl.pluginCache.GetDriver(driverName); err != nil { - logger.Error.Println("Unable to load driver: ", err) + if err := pl.pluginCache.LoadDrivers(pl.driverDir); err != nil { + logger.Error.Println("Unable to load dynamic driver: ", err) return err } - var in interface{} - var out interface{} - for _, p := range conf.Pipeline { - hdler := false - var hdl plugins.SFHandler - if val, ok := p[HdlConfig]; ok { - hdl, err = pl.pluginCache.GetHandler(val) - if err != nil { - logger.Error.Println(err) - return err + if len(driverName) > 0 { + var driver plugins.SFDriver + if driver, err = pl.pluginCache.GetDriver(driverName); err != nil { + logger.Error.Println("Unable to load driver: ", err) + return err + } + pl.drivers = append(pl.drivers, driver) + } else { + for _, d := range conf.Drivers { + if val, ok := d[DrivConfig].(string); ok { + var driver plugins.SFDriver + if driver, err = pl.pluginCache.GetDriver(val); err != nil { + logger.Error.Println("Unable to load driver: ", val, err) + return err + } + logger.Trace.Println("Loading driver: " + driver.GetName()) + pl.drivers = append(pl.drivers, driver) } - pl.handlers = append(pl.handlers, hdl) - xType := fmt.Sprintf("%T", hdl) - logger.Trace.Println(xType) - hdler = true } + } + if len(pl.drivers) == 0 { + return errors.New("No drivers configured on command line or in pipeline config") + } + for _, p := range conf.Pipeline { + var out interface{} + var inChannels []interface{} var prc plugins.SFProcessor - if val, ok := p[ProcConfig]; ok { - prc, err = pl.pluginCache.GetProcessor(val, hdl, hdler) + if val, ok := p[ProcConfig].(string); ok { + prc, err = pl.pluginCache.GetProcessor(pl.pluginDir, val) if err != nil { logger.Error.Println(err) return err @@ -128,48 +132,101 @@ func (pl *Pipeline) Load(driverName string) error { return err } } else { - logger.Error.Println("Processor or handler tag must exist in plugin config") + logger.Error.Println("processor tag must exist in plugin config") return err } if v, o := p[InChanConfig]; o { - in, err = pl.pluginCache.GetChan(v, ChanSize) - pl.channels = append(pl.channels, in) - chp := fmt.Sprintf("%T", in) - logger.Trace.Println(chp) + switch t := v.(type) { + case []interface{}: + for _, channel := range t { + in, err := pl.pluginCache.GetChan(channel.(string), ChanSize) + if err != nil { + logger.Error.Println(err) + return err + } + inChannels = append(inChannels, in) + chp := fmt.Sprintf("%T", in) + pl.channels = append(pl.channels, in) + logger.Trace.Println(chp) + + } + case string: + in, err := pl.pluginCache.GetChan(t, ChanSize) + if err != nil { + logger.Error.Println(err) + return err + } + pl.channels = append(pl.channels, in) + inChannels = append(inChannels, in) + chp := fmt.Sprintf("%T", in) + logger.Trace.Println(chp) + } } else { logger.Error.Println("in tag must exist in plugin config") return errors.New("in tag must exist in plugin config") } if v, o := p[OutChanConfig]; o { - out, err = pl.pluginCache.GetChan(v, ChanSize) - chp := fmt.Sprintf("%T", out) - pl.channels = append(pl.channels, out) - logger.Trace.Println(chp) - prc.SetOutChan(out) + var channels []interface{} + switch t := v.(type) { + case []interface{}: + for _, channel := range t { + out, err = pl.pluginCache.GetChan(channel.(string), ChanSize) + if err != nil { + logger.Error.Println(err) + return err + } + channels = append(channels, out) + chp := fmt.Sprintf("%T", out) + pl.channels = append(pl.channels, out) + logger.Trace.Println(chp) + } + case string: + out, err = pl.pluginCache.GetChan(t, ChanSize) + if err != nil { + logger.Error.Println(err) + return err + } + channels = append(channels, out) + chp := fmt.Sprintf("%T", out) + pl.channels = append(pl.channels, out) + logger.Trace.Println(chp) + } + prc.SetOutChan(channels) } pl.processors = append(pl.processors, prc) pl.wg.Add(1) - go pl.process(prc, in) + go pl.process(prc, inChannels) } + pl.test() return nil } // Init initializes the pipeline func (pl *Pipeline) Init(path string) error { logger.Info.Println("Starting the processing pipeline") + numDrivers := len(pl.drivers) // initialize driver - err := pl.driver.Init(pl) - if err != nil { - logger.Error.Println("Driver initialization error: " + err.Error()) - return err - } - // start processing - pl.running = true - err = pl.driver.Run(path, &(pl.running)) - if err != nil { - pl.running = false - logger.Error.Println("Cannot start the driver: " + err.Error()) - return err + for i, d := range pl.drivers { + conf := pl.GetDriverConfig(d.GetName()) + logger.Trace.Println("Initializing Driver with config", d.GetName()) + err := d.Init(pl, conf) + if err != nil { + logger.Error.Println("Driver initialization error: " + err.Error()) + return err + } + // start processing + pl.running = true + logger.Trace.Println("Calling Run on driver", d.GetName()) + if i == (numDrivers - 1) { + err = d.Run(path, &(pl.running)) + if err != nil { + pl.running = false + logger.Error.Println("Cannot start the driver: " + err.Error()) + return err + } + } else { + go d.Run(path, &(pl.running)) + } } return nil } @@ -178,7 +235,9 @@ func (pl *Pipeline) Init(path string) error { func (pl *Pipeline) Shutdown() error { logger.Info.Println("Stopping the processing pipeline") pl.running = false - pl.driver.Cleanup() + for _, d := range pl.drivers { + d.Cleanup() + } return nil } @@ -190,11 +249,34 @@ func (pl *Pipeline) GetRootChannel() interface{} { return nil } +// GetChannel returns a channel given a " " string +func (pl *Pipeline) GetChannel(channel string) (interface{}, error) { + return pl.pluginCache.GetChan(channel, ChanSize) +} + +// GetDriverConfig returns a driver configuration from the pipeline.json +// given a driver name +func (pl *Pipeline) GetDriverConfig(driverName string) map[string]interface{} { + conf, err := pl.pluginCache.GetConfig() + if err != nil { + logger.Error.Println("Unable to load pipeline config: ", err) + return nil + } + for _, d := range conf.Drivers { + if val, ok := d[DrivConfig].(string); ok { + if val == driverName { + return d + } + } + } + return nil +} + // Print outputs summary information about the loaded pipeline func (pl *Pipeline) Print() { - logger.Trace.Printf("Loaded %d stages\n", len(pl.processors)) - logger.Trace.Printf("Loaded %d channels\n", len(pl.channels)) - logger.Trace.Printf("Loaded %d handlers\n", len(pl.handlers)) + logger.Trace.Printf("Loaded %d stages", len(pl.processors)) + logger.Trace.Printf("Loaded %d handlers", len(pl.handlers)) + logger.Trace.Printf("Loaded %d channels", len(pl.channels)) } // Wait calls on pipeline's waitgroup @@ -203,7 +285,41 @@ func (pl *Pipeline) Wait() { } // Proxy function for handling transparent cleanup of resources -func (pl *Pipeline) process(prc plugins.SFProcessor, in interface{}) { +func (pl *Pipeline) process(prc plugins.SFProcessor, in []interface{}) { prc.Process(in, pl.wg) prc.Cleanup() } + +// Function for handling testable plugin checks. +func (pl *Pipeline) test() { + ctx, cancel := context.WithTimeout(context.Background(), HealthChecksTimeout) + defer cancel() + + c := make(chan error, 1) + go func() { + for _, prc := range pl.processors { + if tprc, ok := prc.(plugins.SFTestableProcessor); ok { + if _, err := tprc.Test(); err != nil { + logger.Error.Printf("Health checks for plugin %s failed: %v", prc.GetName(), err) + c <- err + return + } + } + } + c <- nil + }() + + select { + case err := <-c: + if err != nil { + logger.Health.Println("Health checks: failed") + } else { + logger.Health.Println("Health checks: passed") + } + return + case <-ctx.Done(): + logger.Error.Println("Health checks timed out: ", ctx.Err()) + logger.Health.Println("Health checks: failed") + return + } +} diff --git a/driver/pipeline/plugincache.go b/driver/pipeline/plugincache.go index 3d3f9c33..6df6c6eb 100644 --- a/driver/pipeline/plugincache.go +++ b/driver/pipeline/plugincache.go @@ -16,7 +16,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package pipeline implements a pluggable data processing pipeline infrastructure. package pipeline import ( @@ -31,10 +32,10 @@ import ( "github.com/sysflow-telemetry/sf-apis/go/ioutils" "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/plugins" - "github.ibm.com/sysflow/sf-processor/core/exporter" - "github.ibm.com/sysflow/sf-processor/core/policyengine" - "github.ibm.com/sysflow/sf-processor/core/processor" - "github.ibm.com/sysflow/sf-processor/driver/sysflow" + "github.com/sysflow-telemetry/sf-processor/core/exporter" + "github.com/sysflow-telemetry/sf-processor/core/policyengine" + "github.com/sysflow-telemetry/sf-processor/core/processor" + "github.com/sysflow-telemetry/sf-processor/driver/sysflow" ) // PluginCache defines a data strucure for managing plugins. @@ -44,13 +45,13 @@ type PluginCache struct { procFuncMap map[string]interface{} hdlFuncMap map[string]interface{} chanFuncMap map[string]interface{} - config *viper.Viper + config *Config configFile string } // NewPluginCache creates a new PluginCache instance. func NewPluginCache(conf string) *PluginCache { - plug := &PluginCache{config: viper.New(), + plug := &PluginCache{config: new(Config), chanMap: make(map[string]interface{}), driverMap: make(map[string]interface{}), procFuncMap: make(map[string]interface{}), @@ -63,6 +64,7 @@ func NewPluginCache(conf string) *PluginCache { // initializes plugin cache. func (p *PluginCache) init() { + (&processor.SysFlowReader{}).Register(p) (&processor.SysFlowProcessor{}).Register(p) (&policyengine.PolicyEngine{}).Register(p) (&exporter.Exporter{}).Register(p) @@ -70,23 +72,25 @@ func (p *PluginCache) init() { (&sysflow.StreamingDriver{}).Register(p) } -// LoadPlugins loads dynamic plugins to plugin cache from dir path. -func (p *PluginCache) LoadPlugins(dir string) error { - var plug *plugin.Plugin - if paths, err := ioutils.ListFilePaths(dir, ".so"); err == nil { - for _, path := range paths { - if plug, err = plugin.Open(path); err != nil { - return err - } - sym, err := plug.Lookup(plugins.PlugSym) - if err != nil { - return err - } - if proc, ok := sym.(plugins.SFProcessor); ok { - // p.pluginMap[proc.GetName()] = plug - proc.Register(p) - } +// TryToLoadPlugin loads dynamic plugins to plugin cache from dir path. +func (p *PluginCache) TryToLoadPlugin(dir string, name string) error { + dynPlugin := dir + "/" + name + ".so" + if _, err := os.Stat(dynPlugin); err == nil { + var plug *plugin.Plugin + if plug, err = plugin.Open(dynPlugin); err != nil { + return err } + sym, err := plug.Lookup(plugins.PlugSym) + if err != nil { + return err + } + if proc, ok := sym.(plugins.SFProcessor); ok { + // p.pluginMap[proc.GetName()] = plug + logger.Trace.Printf("loading plugin %s from file %s", name, dynPlugin) + proc.Register(p) + } + } else { + return errors.New("error trying load plugin at: " + dynPlugin) } return nil } @@ -121,11 +125,6 @@ func (p *PluginCache) AddProcessor(name string, factory interface{}) { p.procFuncMap[name] = factory } -// AddHandler adds a handler factory method to the plugin cache. -func (p *PluginCache) AddHandler(name string, factory interface{}) { - p.hdlFuncMap[name] = factory -} - // AddChannel adds a channel factory method to the plugin cache. func (p *PluginCache) AddChannel(name string, factory interface{}) { p.chanFuncMap[name] = factory @@ -141,33 +140,33 @@ func (p *PluginCache) GetConfig() (*Config, error) { return nil, errors.New("Pipeline config file is not a file") } dir := filepath.Dir(p.configFile) - p.config.SetConfigName(strings.TrimSuffix(filepath.Base(p.configFile), filepath.Ext(p.configFile))) - p.config.SetConfigType("json") - p.config.AddConfigPath(dir) - conf := new(Config) - err = p.config.ReadInConfig() + configReader := viper.New() + configReader.SetConfigName(strings.TrimSuffix(filepath.Base(p.configFile), filepath.Ext(p.configFile))) + configReader.SetConfigType("json") + configReader.AddConfigPath(dir) + err = configReader.ReadInConfig() if err != nil { return nil, err } - err = p.config.Unmarshal(conf) + err = configReader.Unmarshal(p.config) if err != nil { return nil, err } + p.updateConfigFromEnv() - p.updateConfigFromEnv(conf) - return conf, nil + return p.config, nil } // updateConfigFromEnv updates config object with environment variables if set. // It assumes the following convention: // - Environment variables follow the naming schema _ // - Processor name in pipeline.json is all lower case -func (p *PluginCache) updateConfigFromEnv(config *Config) { - for _, c := range config.Pipeline { - if proc, ok := c[ProcConfig]; ok { +func (p *PluginCache) updateConfigFromEnv() { + for _, c := range p.config.Pipeline { + if proc, ok := c[ProcConfig].(string); ok { for k, v := range p.getEnv(proc) { c[k] = v } @@ -175,62 +174,64 @@ func (p *PluginCache) updateConfigFromEnv(config *Config) { } } -// getEnv returns the environemnt config settings for processor proc. +// getEnv returns the environment config settings for processor proc. func (p *PluginCache) getEnv(proc string) map[string]string { var conf = make(map[string]string) for _, e := range os.Environ() { pair := strings.SplitN(e, "=", 2) key := strings.SplitN(strings.ToLower(pair[0]), "_", 2) if len(key) == 2 && key[0] == proc { - conf[key[1]] = pair[1] + attr := strings.ReplaceAll(key[1], "_", ".") + conf[attr] = pair[1] } } return conf } -// GetHandler retrieves a cached plugin handler by name. -func (p *PluginCache) GetHandler(name string) (plugins.SFHandler, error) { - if val, ok := p.hdlFuncMap[name]; ok { - funct := val.(func() plugins.SFHandler) - return funct(), nil - } - return nil, fmt.Errorf("Handler '%s' not found in plugin cache", name) -} - // GetChan retrieves a cached plugin channel by name. func (p *PluginCache) GetChan(ch string, size int) (interface{}, error) { fields := strings.Fields(ch) + logger.Trace.Println("Trying to get channel with fields ", fields[0], fields[1]) if len(fields) != 2 { - return nil, errors.New("Channel must be of the form ") + return nil, errors.New("channel must be of the form ") } if val, ok := p.chanMap[fields[0]]; ok { logger.Trace.Println("Found existing channel ", fields[0]) return val, nil } if val, ok := p.chanFuncMap[fields[1]]; ok { + logger.Trace.Println("Channel not in cache...creating ", fields[0], fields[1]) funct := val.(func(int) interface{}) c := funct(size) p.chanMap[fields[0]] = c return c, nil } - return nil, fmt.Errorf("Channel '%s' not found in plugin cache", fields[0]) + return nil, fmt.Errorf("channel '%s':'%s' not found in plugin cache", fields[0], fields[1]) } // GetProcessor retrieves a cached plugin processor by name. -func (p *PluginCache) GetProcessor(name string, hdl plugins.SFHandler, hdlr bool) (plugins.SFProcessor, error) { +func (p *PluginCache) GetProcessor(dir string, name string) (plugins.SFProcessor, error) { + var con interface{} = nil if val, ok := p.procFuncMap[name]; ok { - logger.Trace.Println("Found processor in function map: ", name) - var prc plugins.SFProcessor - if hdlr { - funct := val.(func(plugins.SFHandler) plugins.SFProcessor) - prc = funct(hdl) - } else { - funct := val.(func() plugins.SFProcessor) - prc = funct() + logger.Trace.Println("Found processor in function map: " + name) + con = val + } else { + err := p.TryToLoadPlugin(dir, name) + if err != nil { + return nil, err + } + if val, ok := p.procFuncMap[name]; ok { + logger.Trace.Println("Found processor from dynamic loading: " + name) + con = val } + } + if con != nil { + var prc plugins.SFProcessor + funct := con.(func() plugins.SFProcessor) + prc = funct() return prc, nil } - return nil, fmt.Errorf("Plugin '%s' not found in plugin cache", name) + return nil, fmt.Errorf("plugin '%s' not found in plugin cache", name) } // GetDriver retrieves a cached plugin driver by name. @@ -241,5 +242,5 @@ func (p *PluginCache) GetDriver(name string) (plugins.SFDriver, error) { drv := funct() return drv, nil } - return nil, fmt.Errorf("Driver '%s' not found in plugin cache", name) + return nil, fmt.Errorf("driver '%s' not found in plugin cache", name) } diff --git a/driver/sysflow/config.go b/driver/sysflow/config.go new file mode 100644 index 00000000..72714b7e --- /dev/null +++ b/driver/sysflow/config.go @@ -0,0 +1,4 @@ +package sysflow + +const OutChanConfig = "out" +const PathConfig = "path" diff --git a/driver/sysflow/filedriver.go b/driver/sysflow/filedriver.go index 66aa4bd0..25cd099a 100644 --- a/driver/sysflow/filedriver.go +++ b/driver/sysflow/filedriver.go @@ -16,19 +16,23 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +// Package sysflow implements pluggable drivers for SysFlow ingestion. package sysflow import ( "bufio" "errors" - "io/ioutil" + "io/fs" "os" + "time" "github.com/linkedin/goavro" + "github.com/paulbellamy/ratecounter" "github.com/sysflow-telemetry/sf-apis/go/converter" "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/plugins" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" ) const ( @@ -41,9 +45,9 @@ func getFiles(filename string) ([]string, error) { return nil, err } else if fi.IsDir() { logger.Trace.Println("File is a directory") - var files []os.FileInfo + var files []fs.DirEntry var err error - if files, err = ioutil.ReadDir(filename); err != nil { + if files, err = os.ReadDir(filename); err != nil { return nil, err } for _, file := range files { @@ -65,7 +69,12 @@ func getFiles(filename string) ([]string, error) { // FileDriver represents reading a sysflow file from source type FileDriver struct { pipeline plugins.SFPipeline + config map[string]interface{} file *os.File + + // Rate counter + rc *ratecounter.RateCounter + lastRcTs time.Time } // NewFileDriver creates a new file driver object @@ -84,22 +93,44 @@ func (s *FileDriver) Register(pc plugins.SFPluginCache) { } // Init initializes the file driver with the pipeline -func (s *FileDriver) Init(pipeline plugins.SFPipeline) error { +func (s *FileDriver) Init(pipeline plugins.SFPipeline, config map[string]interface{}) error { s.pipeline = pipeline + s.config = config + if logger.IsEnabled(logger.Perf) { + s.rc = ratecounter.NewRateCounter(1 * time.Second) + s.lastRcTs = time.Now() + } return nil } // Run runs the file driver func (s *FileDriver) Run(path string, running *bool) error { - channel := s.pipeline.GetRootChannel() - sfChannel := channel.(*plugins.SFChannel) + var channel interface{} + configpath := path + if s.config == nil { + channel = s.pipeline.GetRootChannel() + } else { + if v, o := s.config[OutChanConfig].(string); o { + ch, err := s.pipeline.GetChannel(v) + if err != nil { + return err + } + channel = ch + } else { + return errors.New("out tag does not exist in driver configuration for driver " + fileDriverName) + } + if v, o := s.config[PathConfig].(string); o { + configpath = v + } + } + sfChannel := channel.(*plugins.Channel[*sfgo.SysFlow]) records := sfChannel.In logger.Trace.Println("Loading file: ", path) sfobjcvter := converter.NewSFObjectConverter() - files, err := getFiles(path) + files, err := getFiles(configpath) if err != nil { logger.Error.Println("Files error: ", err) return err @@ -127,15 +158,25 @@ func (s *FileDriver) Run(path string, running *bool) error { break } records <- sfobjcvter.ConvertToSysFlow(datum) + + // Increment rate counter + if logger.IsEnabled(logger.Perf) { + s.rc.Incr(1) + if time.Since(s.lastRcTs) > (15 * time.Second) { + logger.Perf.Println("File driver rate (events/sec): ", s.rc.Rate()) + s.lastRcTs = time.Now() + } + } } s.file.Close() if !*running { break } } - logger.Warn.Println("Closing main channel") + logger.Trace.Println("Closing main channel filedriver") close(records) s.pipeline.Wait() + logger.Trace.Println("Exiting Process() function filedriver") return nil } diff --git a/core/policyengine/engine/enrichment.go b/driver/sysflow/readmsgunixdarwin.go similarity index 67% rename from core/policyengine/engine/enrichment.go rename to driver/sysflow/readmsgunixdarwin.go index bb4c5c61..2165e464 100644 --- a/core/policyengine/engine/enrichment.go +++ b/driver/sysflow/readmsgunixdarwin.go @@ -16,13 +16,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package engine -// Handler defines an interface for SysFlow enrichment routines. -type Handler interface { - Init(confPath string) error - ProcessSync(r *Record) (interface{}, error) - ProcessAsync(r *Record, callback func(o interface{})) error - Cleanup() error +//go:build darwin + +// Package sysflow implements pluggable drivers for SysFlow ingestion. +package sysflow + +// readMsgUnix reads a message from a unix socket (compatible with the Darwin architecture). +func (s *StreamingDriver) readMsgUnix(buf []byte, oobuf []byte) error { + _, _, _, _, err := s.conn.ReadMsgUnix(buf[:], oobuf[:]) + return err } diff --git a/driver/sysflow/readmsgunixlinux.go b/driver/sysflow/readmsgunixlinux.go new file mode 100644 index 00000000..863b0eca --- /dev/null +++ b/driver/sysflow/readmsgunixlinux.go @@ -0,0 +1,37 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux + +// Package sysflow implements pluggable drivers for SysFlow ingestion. +package sysflow + +import ( + "fmt" + "syscall" +) + +// readMsgUnix reads a message from a unix socket (compatible with the Linux architecture). +func (s *StreamingDriver) readMsgUnix(buf []byte, oobuf []byte) error { + _, _, flags, _, err := s.conn.ReadMsgUnix(buf[:], oobuf[:]) + if flags != syscall.MSG_CMSG_CLOEXEC { + return fmt.Errorf("ReadMsgUnix flags = %v, want %v (MSG_CMSG_CLOEXEC)", flags, syscall.MSG_CMSG_CLOEXEC) + } + return err +} diff --git a/driver/sysflow/streamingdriver.go b/driver/sysflow/streamingdriver.go index b70655f3..c70534d2 100644 --- a/driver/sysflow/streamingdriver.go +++ b/driver/sysflow/streamingdriver.go @@ -16,13 +16,18 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + +//go:build linux || darwin + +// Package sysflow implements pluggable drivers for SysFlow ingestion. package sysflow import ( "bytes" + "errors" "net" "os" + "path/filepath" "github.com/actgardner/gogen-avro/v7/compiler" "github.com/actgardner/gogen-avro/v7/vm" @@ -45,6 +50,7 @@ const ( // StreamingDriver represents a streaming sysflow datasource type StreamingDriver struct { pipeline plugins.SFPipeline + config map[string]interface{} conn *net.UnixConn } @@ -64,25 +70,45 @@ func (s *StreamingDriver) Register(pc plugins.SFPluginCache) { } // Init initializes the driver -func (s *StreamingDriver) Init(pipeline plugins.SFPipeline) error { +func (s *StreamingDriver) Init(pipeline plugins.SFPipeline, config map[string]interface{}) error { s.pipeline = pipeline + s.config = config return nil } // Run runs the driver func (s *StreamingDriver) Run(path string, running *bool) error { - channel := s.pipeline.GetRootChannel() - sfChannel := channel.(*plugins.SFChannel) + var channel interface{} + if s.config == nil { + channel = s.pipeline.GetRootChannel() + } else { + if v, o := s.config[OutChanConfig].(string); o { + ch, err := s.pipeline.GetChannel(v) + if err != nil { + return err + } + channel = ch + } else { + return errors.New("out tag does not exist in driver configuration for driver " + fileDriverName) + } + } + sfChannel := channel.(*plugins.Channel[*sfgo.SysFlow]) records := sfChannel.In if err := os.RemoveAll(path); err != nil { - logger.Error.Println("remove error:", err) + logger.Error.Println("Remove error: ", err) + return err + } + + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0600); err != nil { + logger.Error.Println("Unable to create directory: ", err) return err } l, err := net.ListenUnix("unixpacket", &net.UnixAddr{Name: path, Net: "unixpacket"}) if err != nil { - logger.Error.Println("listen error:", err) + logger.Error.Println("Listen error: ", err) return err } defer l.Close() @@ -90,36 +116,38 @@ func (s *StreamingDriver) Run(path string, running *bool) error { sFlow := sfgo.NewSysFlow() deser, err := compiler.CompileSchemaBytes([]byte(sFlow.Schema()), []byte(sFlow.Schema())) if err != nil { - logger.Error.Println("compiler error:", err) + logger.Error.Println("Compilation error: ", err) return err } for *running { + health := false buf := make([]byte, BuffSize) oobuf := make([]byte, OOBuffSize) reader := bytes.NewReader(buf) s.conn, err = l.AcceptUnix() if err != nil { - logger.Error.Println("accept error:", err) + logger.Error.Println("Accept error: ", err) break } + logger.Health.Println("Successfully accepted new input stream") for *running { sFlow = sfgo.NewSysFlow() - _, _, flags, _, err := s.conn.ReadMsgUnix(buf[:], oobuf[:]) + err := s.readMsgUnix(buf[:], oobuf[:]) if err != nil { - logger.Error.Println("read error:", err) + logger.Error.Println("Read error: ", err) break } - if flags == 0 { - reader.Reset(buf) - err = vm.Eval(reader, deser, sFlow) - if err != nil { - logger.Error.Println("deserialize:", err) - } - records <- sFlow - } else { - logger.Error.Println("Flag error ReadMsgUnix:", flags) + reader.Reset(buf) + err = vm.Eval(reader, deser, sFlow) + if err != nil { + logger.Error.Println("Deserialization error: ", err) + } + if !health { + logger.Health.Println("Successfully read first record from input stream") + health = true } + records <- sFlow } s.conn.Close() } diff --git a/makefile.manifest.inc b/makefile.manifest.inc index 32ef63ae..a2daa2b8 100644 --- a/makefile.manifest.inc +++ b/makefile.manifest.inc @@ -1,11 +1,13 @@ # -# Copyright (C) 2020 IBM Corporation. +# Copyright (C) 2024 IBM Corporation. # # Authors: # Frederico Araujo # Teryl Taylor # -SYSFLOW_VERSION?=0.1.0 +SYSFLOW_VERSION?=0.6.3 SYSFLOW_BUILD_NUMBER?=1 -SYSFLOW_JSON_SCHEMA_VERSION=2 \ No newline at end of file +SYSFLOW_JSON_SCHEMA_VERSION=5 +SYSFLOW_ECS_VERSION=8.2 +UBI_VERSION=9.3-1610 diff --git a/plugins/actions/example/Makefile b/plugins/actions/example/Makefile new file mode 100644 index 00000000..f12f36f8 --- /dev/null +++ b/plugins/actions/example/Makefile @@ -0,0 +1,15 @@ +GOCMD=go +GOBUILD=$(GOCMD) build -buildmode=plugin -trimpath -tags exclude_graphdriver_btrfs +OUTPUT=../../../resources/actions +INSTALL=/usr/local/sysflow/resources/actions + +.PHONY: all +all: + mkdir -p $(OUTPUT); $(GOBUILD) -o $(OUTPUT)/now.so . + +.PHONY: install +install: all + mkdir -p $(INSTALL) + cp $(OUTPUT)/now.so $(INSTALL) + + diff --git a/plugins/actions/example/README.md b/plugins/actions/example/README.md new file mode 100644 index 00000000..574463e1 --- /dev/null +++ b/plugins/actions/example/README.md @@ -0,0 +1,93 @@ +# Action Plugins + +User-defined actions can be plugged to SysFlow's Policy Engine rule declarations to perform additional processing on matched records. + +## Interface + +Actions are implemented via the golang plugin mechanism. An action must implement the following interface, defined in the `github.com/sysflow-telemetry/sf-processor/core/policyengine/engine` package. + +```go +// Prototype of an action function +type ActionFunc func(r *Record) error + +// Action interface for user-defined actions +type Action interface { + GetName() string + GetFunc() ActionFunc +} +``` + +Actions have a name and an action function. Within a single policy engine instance, action names must be unique. User-defined actions cannot re-declare built-in actions. Reusing names of user-defined actions overwrites previously registered actions. + +The action function receives the current record as an argument and thus has access to all record attributes. The action result can be stored in the record context via the context modifier methods. + +## Pre-requisites + +* Go 1.17 (if building locally, without the plugin builder) + +## Build + +The `now` action is a pluggable action that creates a tag containing the current time in nanosecond precision. + +First, in the root of sf-processor, build the processor and the action plugin. Note, this plugin's shared object is generated in `resources/actions/now.so`. + +```bash +make build && make -C plugins/actions/example +``` + +Then, run: + +```bash +cd driver && ./sfprocessor -log=quiet -config=../plugins/actions/example/pipeline.actions.json ../resources/traces/tcp.sf +``` + +## Plugin builder + +To build the plugin for release, Go requires the code to be compiled with the exact package versions that the SysFlow processor was compiled with. The easiest way to achieve this is to use the pre-built `plugin-builder` Docker image in your build. This option also works for building plugins for deployment with the SysFlow binary packages. + +Below is an example of how this can be achieved. Set $TAG to a SysFlow release (>=0.4.0), `edge`, or `dev`. + +First, build the plugin: + +```bash +docker run --rm \ + -v $(pwd)/plugins:/go/src/github.com/sysflow-telemetry/sf-processor/plugins \ + -v $(pwd)/resources:/go/src/github.com/sysflow-telemetry/sf-processor/resources \ + sysflowtelemetry/plugin-builder:$TAG \ + make -C /go/src/github.com/sysflow-telemetry/sf-processor/plugins/actions/example +``` + +To test it, run the pre-built processor with the example configuration and trace. + +```bash +docker run --rm \ + -v $(pwd)/plugins:/usr/local/sysflow/plugins \ + -v $(pwd)/resources:/usr/local/sysflow/resources \ + -w /usr/local/sysflow/bin \ + --entrypoint=/usr/local/sysflow/bin/sfprocessor \ + sysflowtelemetry/sf-processor:$TAG \ + -log=quiet -config=../plugins/actions/example/pipeline.actions.json ../resources/traces/tcp.sf +``` + +In the output, observe that all records matching the policy speficied in `pipeline.actions.json` are tagged by action `now` with the tag `now_in_nanos`. For example: + +```plain +{ + "version": 4, + "endts": 0, + "opflags": [ + "EXEC" + ], + ... + "policies": [ + { + "id": "Action example", + "desc": "user-defined action example", + "priority": 0 + } + ], + "tags": [ + "now_in_nanos:1645409122055957900" + ] +} +``` diff --git a/plugins/actions/example/go.mod b/plugins/actions/example/go.mod new file mode 100644 index 00000000..6d07b94d --- /dev/null +++ b/plugins/actions/example/go.mod @@ -0,0 +1,39 @@ +// +// Copyright (C) 2021 IBM Corporation. +// +// Authors: +// Andreas Schade +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +module github.com/sysflow-telemetry/sf-processor/plugins/actions/example + +go 1.19 + +require github.com/sysflow-telemetry/sf-processor/core v0.0.0-20220221021811-25c7181c2904 + +require ( + github.com/actgardner/gogen-avro/v7 v7.3.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 // indirect + github.com/paulbellamy/ratecounter v0.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 // indirect + github.com/tidwall/gjson v1.14.1 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect +) + +replace github.com/sysflow-telemetry/sf-processor/core => ../../../core diff --git a/plugins/actions/example/go.sum b/plugins/actions/example/go.sum new file mode 100644 index 00000000..8ada4cd7 --- /dev/null +++ b/plugins/actions/example/go.sum @@ -0,0 +1,43 @@ +github.com/actgardner/gogen-avro/v7 v7.3.1 h1:6JJU3o7168lcyIB6uXYyYdflCsJT3aMFKZPSpSc4toI= +github.com/actgardner/gogen-avro/v7 v7.3.1/go.mod h1:1d45RpDvI29sU7l9wUxlRTEglZSdQSbd6bDbWJaEMgo= +github.com/alecthomas/participle v0.7.1 h1:2bN7reTw//5f0cugJcTOnY/NYZcWQOaajW+BwZB5xWs= +github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0 h1:j7MyDjg6pb7A2ziow17FDZ2Oj5vGnJsLyDmjpN4Jkcg= +github.com/bradleyjkemp/sigma-go v0.5.1 h1:2a747+swYse4KfIvLRCg49q118MSONk5+W/JeGM40cc= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= +github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw= +github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= +github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230905191833-17e4c751b04c h1:5BCJMIuiysHlYJe+nr/203cqIS6cpTIssbUD8v88VOU= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230905191833-17e4c751b04c/go.mod h1:eo1ATE056Rqb9LhE4LA/0Y2AHfV//1zdCw0py4/S5HM= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 h1:ZxzwimQe2R4kYorqS33/l+m/+SXWMzPn1cLtpA1ExA0= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300/go.mod h1:rvE0WXuIQmACykrVpAKxP5Crf/7KvZplUTULATmAuf4= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/plugins/actions/example/now.go b/plugins/actions/example/now.go new file mode 100644 index 00000000..e9c907de --- /dev/null +++ b/plugins/actions/example/now.go @@ -0,0 +1,47 @@ +// +// Copyright (C) 2021 IBM Corporation. +// +// Authors: +// Andreas Schade +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package main + +import ( + "strconv" + "time" + + "github.com/sysflow-telemetry/sf-processor/core/policyengine/engine" + "github.com/sysflow-telemetry/sf-processor/core/policyengine/source/flatrecord" +) + +type MyAction struct{} + +func (a *MyAction) GetName() string { + return "now" +} + +func (a *MyAction) GetFunc() engine.ActionFunc[*flatrecord.Record] { + return addMyTag +} + +func addMyTag(r *flatrecord.Record) error { + r.Ctx.AddTags("now_in_nanos:" + strconv.FormatInt(time.Now().UnixNano(), 10)) + return nil +} + +var Action MyAction + +// This function is not run when module is used as a plugin. +func main() {} diff --git a/resources/pipelines/pipeline.runtimeintegrity.json b/plugins/actions/example/pipeline.actions.json similarity index 65% rename from resources/pipelines/pipeline.runtimeintegrity.json rename to plugins/actions/example/pipeline.actions.json index 945b471e..5ad176ad 100644 --- a/resources/pipelines/pipeline.runtimeintegrity.json +++ b/plugins/actions/example/pipeline.actions.json @@ -10,16 +10,14 @@ "processor": "policyengine", "in": "flat flattenerchan", "out": "evt eventchan", - "policies": "../resources/policies/runtimeintegrity" + "policies": "../plugins/actions/example/policy.yaml", + "mode": "alert" }, { "processor": "exporter", "in": "evt eventchan", - "export": "syslog", - "proto": "tcp", - "tag": "sysflow", - "host": "localhost", - "port": "514" + "export": "terminal", + "format": "json" } ] } diff --git a/plugins/actions/example/policy.yaml b/plugins/actions/example/policy.yaml new file mode 100644 index 00000000..2ffba7bf --- /dev/null +++ b/plugins/actions/example/policy.yaml @@ -0,0 +1,4 @@ +- rule: Action example + desc: user-defined action example + condition: sf.opflags = EXEC + actions: [now] diff --git a/plugins/example/Makefile b/plugins/example/Makefile deleted file mode 100644 index d0501cdf..00000000 --- a/plugins/example/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -GOCMD=go -GOBUILD=$(GOCMD) build -buildmode=plugin -OUTPUT=../../resources/plugins - -.PHONY: all -all: - mkdir -p $(OUTPUT); $(GOBUILD) -o $(OUTPUT)/example.so . - - diff --git a/plugins/example/go.sum b/plugins/example/go.sum deleted file mode 100644 index a548dd45..00000000 --- a/plugins/example/go.sum +++ /dev/null @@ -1,228 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ= -github.com/actgardner/gogen-avro v6.5.0+incompatible h1:P73NiZR/S0lBWQDkK6mbvdgBXRc6e0/AaaSTqu/AvLI= -github.com/actgardner/gogen-avro v6.5.0+incompatible/go.mod h1:N2PzqZtS+5w9xxGp2daeykhWdTL0lBiRhbbvkVj4Yd8= -github.com/actgardner/gogen-avro/v7 v7.1.1 h1:fAKfqQNIDIXq4Pwop3Fqu+0Tym5PuAX/cMVbdEIuVdM= -github.com/actgardner/gogen-avro/v7 v7.1.1/go.mod h1:DALbHv5zAeoz7KJ/fPAvl+d8Ixcy6x8Fjo+PO0YM8mU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antlr/antlr4 v0.0.0-20200417160354-8c50731894e0/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/enriquebris/goconcurrentqueue v0.6.0/go.mod h1:wGJhQNFI4wLNHleZLo5ehk1puj8M6OIl0tOjs3kwJus= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= -github.com/linkedin/goavro/v2 v2.9.7/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw= -github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201026195524-bd9cb63ccc96 h1:XbL0XPH5OZMVH5d0RhM0g9VXOKclsy9hVUh6+cem73c= -github.com/sysflow-telemetry/sf-apis/go v0.0.0-20201026195524-bd9cb63ccc96/go.mod h1:rpTKky267xtopNUCoInTEZiaFkOrawhzE0HaZMEvIAI= -github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.ibm.com/sysflow/sf-processor/core v0.0.0-20201027030609-879f8d66a4f0 h1:VdMs3aXRSLjG9CIiDE+McWtP9NnuUNjIH1RwkfNn3k0= -github.ibm.com/sysflow/sf-processor/core v0.0.0-20201027030609-879f8d66a4f0/go.mod h1:ECpqHM00b9hruN6vA+SdVNam/NZ2iwem//fOnfBwmVM= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= diff --git a/plugins/handlers/printer/Makefile b/plugins/handlers/printer/Makefile new file mode 100644 index 00000000..7e9ed942 --- /dev/null +++ b/plugins/handlers/printer/Makefile @@ -0,0 +1,9 @@ +GOCMD=go +GOBUILD=$(GOCMD) build -buildmode=plugin -trimpath -tags exclude_graphdriver_btrfs +OUTPUT=../../../resources/handlers + +.PHONY: all +all: + mkdir -p $(OUTPUT); $(GOBUILD) -o $(OUTPUT)/printer.so . + + diff --git a/plugins/handlers/printer/README.md b/plugins/handlers/printer/README.md new file mode 100644 index 00000000..2179028f --- /dev/null +++ b/plugins/handlers/printer/README.md @@ -0,0 +1,94 @@ +# Handler Plugins + +User-defined handler modules can be plugged to the built-in SysFlow `processor` plugin to implement custom data processing and analytic pipelines. + +## Interface + +Handlers are implemented via the golang plugin mechanism. A handler must implement the following interface, defined in the `github.com/sysflow-telemetry/sf-apis/go/plugins` package. + +```go +// SFHandler defines the SysFlow handler interface. +type SFHandler interface { + RegisterChannel(pc SFPluginCache) + RegisterHandler(hc SFHandlerCache) + Init(conf map[string]interface{}) error + IsEntityEnabled() bool + HandleHeader(sf *CtxSysFlow, hdr *sfgo.SFHeader) error + HandleContainer(sf *CtxSysFlow, cont *sfgo.Container) error + HandleProcess(sf *CtxSysFlow, proc *sfgo.Process) error + HandleFile(sf *CtxSysFlow, file *sfgo.File) error + HandleNetFlow(sf *CtxSysFlow, nf *sfgo.NetworkFlow) error + HandleNetEvt(sf *CtxSysFlow, ne *sfgo.NetworkEvent) error + HandleFileFlow(sf *CtxSysFlow, ff *sfgo.FileFlow) error + HandleFileEvt(sf *CtxSysFlow, fe *sfgo.FileEvent) error + HandleProcFlow(sf *CtxSysFlow, pf *sfgo.ProcessFlow) error + HandleProcEvt(sf *CtxSysFlow, pe *sfgo.ProcessEvent) error + SetOutChan(ch []interface{}) + Cleanup() +} +``` + +Each `Handle*` function receives the current SysFlow record being processed along with its corresponding parsed record type. Custom processing code should be implemented using these functions. + +## Pre-requisites + +* Go 1.17 (if building locally, without the plugin builder) + +## Build + +The `printer` handler is a pluggable handler that logs select SysFlow records to the standard output. This plugin doesn't define any output channels, so it acts as a plugin sink (last plugin in a pipeline). + +To run this example, in the root of sf-processor, build the processor and the handler plugin. Note, this plugin's shared object is generated in `resources/handlers/printer.so`. + +```bash +make build && make -C plugins/handlers/printer +``` + +Then, run: + +```bash +cd driver && ./sfprocessor -log=info -config=../plugins/handlers/printer/pipeline.printer.json ../resources/traces/tcp.sf +``` + +## Plugin builder + +To build the plugin for release, Go requires the code to be compiled with the exact package versions that the SysFlow processor was compiled with. The easiest way to achieve this is to use the pre-built `plugin-builder` Docker image in your build. This option also works for building plugins for deployment with the SysFlow binary packages. + +Below is an example of how this can be achieved. Set $TAG to a SysFlow release (>=0.4.0), `edge`, or `dev`. + +First, build the plugin: + +```bash +docker run --rm \ + -v $(pwd)/plugins:/go/src/github.com/sysflow-telemetry/sf-processor/plugins \ + -v $(pwd)/resources:/go/src/github.com/sysflow-telemetry/sf-processor/resources \ + sysflowtelemetry/plugin-builder:$TAG \ + make -C /go/src/github.com/sysflow-telemetry/sf-processor/plugins/handlers/printer +``` + +To test it, run the pre-built processor with the example configuration and trace. + +```bash +docker run --rm \ + -v $(pwd)/plugins:/usr/local/sysflow/plugins \ + -v $(pwd)/resources:/usr/local/sysflow/resources \ + -w /usr/local/sysflow/bin \ + --entrypoint=/usr/local/sysflow/bin/sfprocessor \ + sysflowtelemetry/sf-processor:$TAG \ + -log=info -config=../plugins/handlers/printer/pipeline.printer.json ../resources/traces/tcp.sf +``` + +The output on the above pre-recorded trace should look like this: + +```plain +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./server, 13823 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./server, 3 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./server, 3 +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./client, 13824 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./client, 3 +[Info] 2022/02/21 15:39:58 printer.go:100: FileFlow ./client, 3 +[Info] 2022/02/21 15:39:58 printer.go:94: NetworkFlow ./client, 8080 +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./client, 13824 +[Info] 2022/02/21 15:39:58 printer.go:94: NetworkFlow ./server, 8080 +[Info] 2022/02/21 15:39:58 printer.go:118: ProcEvt ./server, 13823 +``` diff --git a/driver/manifest/manifest.go b/plugins/handlers/printer/go.mod similarity index 63% rename from driver/manifest/manifest.go rename to plugins/handlers/printer/go.mod index d50af4cc..bb7cc7e7 100644 --- a/driver/manifest/manifest.go +++ b/plugins/handlers/printer/go.mod @@ -17,18 +17,15 @@ // See the License for the specific language governing permissions and // limitations under the License. // -package manifest +module github.com/sysflow-telemetry/sf-processor/plugins/handlers/printer -// Manifest constants. Do not edit it. Values are replaced by make. -const ( - Version = "0.1.0" - JSONSchemaVersion = 2 - BuildNumber = "1" -) +go 1.19 + +require github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 -// manifest config keys -const ( - VersionKey string = "version" - JSONSchemaVersionKey string = "jsonschemaversion" - BuildNumberKey string = "buildnumber" +require ( + github.com/actgardner/gogen-avro/v7 v7.3.1 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect ) diff --git a/plugins/handlers/printer/go.sum b/plugins/handlers/printer/go.sum new file mode 100644 index 00000000..645d31c2 --- /dev/null +++ b/plugins/handlers/printer/go.sum @@ -0,0 +1,27 @@ +github.com/actgardner/gogen-avro/v7 v7.3.1 h1:6JJU3o7168lcyIB6uXYyYdflCsJT3aMFKZPSpSc4toI= +github.com/actgardner/gogen-avro/v7 v7.3.1/go.mod h1:1d45RpDvI29sU7l9wUxlRTEglZSdQSbd6bDbWJaEMgo= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= +github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw= +github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230404030540-37e5fa8614fc h1:3+lsQOjaTym5jHq3X2q/NwrCLVD0BHnNqfvR7mky44Y= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230404030540-37e5fa8614fc/go.mod h1:hK3FNloWIvlioheWODPJcA3TOxJbxMafoUezq3ZNCww= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 h1:ZxzwimQe2R4kYorqS33/l+m/+SXWMzPn1cLtpA1ExA0= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300/go.mod h1:rvE0WXuIQmACykrVpAKxP5Crf/7KvZplUTULATmAuf4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/plugins/handlers/printer/pipeline.printer.json b/plugins/handlers/printer/pipeline.printer.json new file mode 100644 index 00000000..5887516c --- /dev/null +++ b/plugins/handlers/printer/pipeline.printer.json @@ -0,0 +1,10 @@ +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "printer", + "handlerlibpath": "../resources/handlers", + "in": "sysflow sysflowchan" + } + ] +} diff --git a/plugins/handlers/printer/printer.go b/plugins/handlers/printer/printer.go new file mode 100644 index 00000000..00c8fd6f --- /dev/null +++ b/plugins/handlers/printer/printer.go @@ -0,0 +1,137 @@ +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package main + +import ( + "github.com/sysflow-telemetry/sf-apis/go/logger" + "github.com/sysflow-telemetry/sf-apis/go/plugins" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" +) + +const ( + handlerName string = "printer" +) + +// Handler exports a symbol for this plugin. +var Handler Printer + +// Printer defines the main class for the flatterner plugin. +type Printer struct { +} + +// NewPrinter creates a new Printer instance. +func NewPrinter() plugins.SFHandler { + return new(Printer) +} + +// RegisterChannel registers channels to plugin cache. +func (s *Printer) RegisterChannel(pc plugins.SFPluginCache) { +} + +// RegisterHandler registers handler to handler cache. +func (s *Printer) RegisterHandler(hc plugins.SFHandlerCache) { + hc.AddHandler(handlerName, NewPrinter) +} + +// Init initializes the handler with a configuration map. +func (s *Printer) Init(conf map[string]interface{}) error { + return nil +} + +// IsEntityEnabled is used to check if the flattener returns entity records. +func (s *Printer) IsEntityEnabled() bool { + return false +} + +// SetOutChan sets the plugin output channel. +func (s *Printer) SetOutChan(chObj []interface{}) { +} + +// Cleanup tears down resources. +func (s *Printer) Cleanup() { + logger.Trace.Println("Calling Cleanup on Printer channel") +} + +// HandleHeader processes Header entities. +func (s *Printer) HandleHeader(sf *plugins.CtxSysFlow, hdr *sfgo.SFHeader) error { + return nil +} + +// HandleContainer processes Container entities. +func (s *Printer) HandleContainer(sf *plugins.CtxSysFlow, cont *sfgo.Container) error { + return nil +} + +// HandlePod processes Pod entities. +func (s *Printer) HandlePod(sf *plugins.CtxSysFlow, cont *sfgo.Pod) error { + return nil +} + +// HandleK8sEvt processes K8s Events. +func (s *Printer) HandleK8sEvt(sf *plugins.CtxSysFlow, ke *sfgo.K8sEvent) error { + return nil +} + +// HandleProcess processes Process entities. +func (s *Printer) HandleProcess(sf *plugins.CtxSysFlow, proc *sfgo.Process) error { + return nil +} + +// HandleFile processes File entities. +func (s *Printer) HandleFile(sf *plugins.CtxSysFlow, file *sfgo.File) error { + return nil +} + +// HandleNetFlow processes Network Flows. +func (s *Printer) HandleNetFlow(sf *plugins.CtxSysFlow, nf *sfgo.NetworkFlow) error { + logger.Info.Printf("NetworkFlow %s, %d", sf.Process.Exe, nf.Dport) + return nil +} + +// HandleFileFlow processes File Flows. +func (s *Printer) HandleFileFlow(sf *plugins.CtxSysFlow, ff *sfgo.FileFlow) error { + logger.Info.Printf("FileFlow %s, %d", sf.Process.Exe, ff.Fd) + return nil +} + +// HandleFileEvt processes File Events. +func (s *Printer) HandleFileEvt(sf *plugins.CtxSysFlow, fe *sfgo.FileEvent) error { + logger.Info.Printf("FileEvt %s, %d", sf.Process.Exe, fe.Tid) + return nil +} + +// HandleFileEvt processes Net Events. +func (s *Printer) HandleNetEvt(sf *plugins.CtxSysFlow, ne *sfgo.NetworkEvent) error { + logger.Info.Printf("NetEvt %s, %d", sf.Process.Exe, ne.Tid) + return nil +} + +// HandleProcEvt processes Process Events. +func (s *Printer) HandleProcEvt(sf *plugins.CtxSysFlow, pe *sfgo.ProcessEvent) error { + logger.Info.Printf("ProcEvt %s, %d", sf.Process.Exe, pe.Tid) + return nil +} + +// HandleProcEvt processes Process Flows. +func (s *Printer) HandleProcFlow(sf *plugins.CtxSysFlow, pf *sfgo.ProcessFlow) error { + logger.Info.Printf("ProcFlow %s, %v", sf.Process.Exe, pf.ProcOID) + return nil +} + +// This function is not run when module is used as a plugin. +func main() {} diff --git a/plugins/processors/example/Makefile b/plugins/processors/example/Makefile new file mode 100644 index 00000000..b41ff1e3 --- /dev/null +++ b/plugins/processors/example/Makefile @@ -0,0 +1,9 @@ +GOCMD=go +GOBUILD=$(GOCMD) build -buildmode=plugin -trimpath -tags exclude_graphdriver_btrfs +OUTPUT=../../../resources/plugins + +.PHONY: all +all: + mkdir -p $(OUTPUT); $(GOBUILD) -o $(OUTPUT)/example.so . + + diff --git a/plugins/processors/example/README.md b/plugins/processors/example/README.md new file mode 100644 index 00000000..f39c182d --- /dev/null +++ b/plugins/processors/example/README.md @@ -0,0 +1,91 @@ +# Processor Plugins + +User-defined plugins can be plugged and extend the sf-processor pipeline. These are the most generic type of plugins, from which all built-in processor plugins are build. Check the `core` package for examples. We have built-in processor plugins for flattening the telemetry stream, implementing a policy engine, and creating event exporters. + +## Interface + +Processor plugins (or just plugins) are implemented via the golang plugin mechanism. A plugin must implement the following interface, defined in the `github.com/sysflow-telemetry/sf-apis/go/plugins` package. + +```go +// SFProcessor defines the SysFlow processor interface. +type SFProcessor interface { + Register(pc SFPluginCache) + Init(conf map[string]interface{}) error + Process(ch interface{}, wg *sync.WaitGroup) + GetName() string + SetOutChan(ch []interface{}) + Cleanup() +} +``` + +The `Process` function is the main function of the plugin. It's where the "main loop" of the plugin should be implemented. It receives the input channel configured in the custom plugin's block in the pipeline configuration. It also received the pipeline thread WaitGroup. Custom processing code should be implemented using this function. `Init` is called once, when the pipeline is loaded. `Cleanup` is called when the pipeline is terminated. `SetOutChannel` receives a slice with the output channels configured in the plugin's block in the pipeline configuration. + +When loading a pipeline, sf-processor performs a series of health checks before the pipeline is enabled. If these health checks fail, the processor terminates. To enable health checks on custom plugins, implement the `Test` function defined in the interface below. For an example, check `core/exporter/exporter.go`. + +```go +// SFTestableProcessor defines a testable SysFlow processor interface. +type SFTestableProcessor interface { + SFProcessor + Test() (bool, error) +} +``` + +## Pre-requisites + +* Go 1.17 (if building locally, without the plugin builder) + +## Build + +The `example` plugin is a custom plugin that illustrates how to implement a minimal plugin that reads the records from the input channel and logs them to the standard output. + +To run this example, in the root of sf-processor, build the processor and the example plugin. Note, this plugin's shared object is generated in `resources/plugins/example.so`. + +```bash +make build && make -C plugins/processors/example +``` + +Then, run: + +```bash +cd driver && ./sfprocessor -log=info -config=../plugins/processors/example/pipeline.example.json ../resources/traces/tcp.sf +``` + +## Plugin builder + +To build the plugin for release, Go requires the code to be compiled with the exact package versions that the SysFlow processor was compiled with. The easiest way to achieve this is to use the pre-built `plugin-builder` Docker image in your build. This option also works for building plugins for deployment with the SysFlow binary packages. + +Below is an example of how this can be achieved. Set $TAG to a SysFlow release (>=0.4.0), `edge`, or `dev`. + +First, build the plugin: + +```bash +docker run --rm \ + -v $(pwd)/plugins:/go/src/github.com/sysflow-telemetry/sf-processor/plugins \ + -v $(pwd)/resources:/go/src/github.com/sysflow-telemetry/sf-processor/resources \ + sysflowtelemetry/plugin-builder:$TAG \ + make -C /go/src/github.com/sysflow-telemetry/sf-processor/plugins/processors/example +``` + +To test it, run the pre-built processor with the example configuration and trace. + +```bash +docker run --rm \ + -v $(pwd)/plugins:/usr/local/sysflow/plugins \ + -v $(pwd)/resources:/usr/local/sysflow/resources \ + -w /usr/local/sysflow/bin \ + --entrypoint=/usr/local/sysflow/bin/sfprocessor \ + sysflowtelemetry/sf-processor:$TAG \ + -log=info -config=../plugins/processors/example/pipeline.example.json ../resources/traces/tcp.sf +``` + +The output on the above pre-recorded trace should look like this: + +```plain +[Health] 2022/02/21 12:55:19 pipeline.go:246: Health checks: passed +[Info] 2022/02/21 12:55:19 main.go:147: Successfully loaded pipeline configuration +[Info] 2022/02/21 12:55:19 pipeline.go:170: Starting the processing pipeline +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./server, 13823 +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./client, 13824 +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./client, 13824 +[Info] 2022/02/21 12:55:19 example.go:75: Process Event: ./server, 13823 +``` diff --git a/plugins/example/example.go b/plugins/processors/example/example.go similarity index 67% rename from plugins/example/example.go rename to plugins/processors/example/example.go index dfc79f66..6fcd2593 100644 --- a/plugins/example/example.go +++ b/plugins/processors/example/example.go @@ -1,4 +1,3 @@ -// // Copyright (C) 2020 IBM Corporation. // // Authors: @@ -9,14 +8,13 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// package main import ( @@ -24,7 +22,7 @@ import ( "github.com/sysflow-telemetry/sf-apis/go/logger" "github.com/sysflow-telemetry/sf-apis/go/plugins" - "github.ibm.com/sysflow/sf-processor/core/flattener" + "github.com/sysflow-telemetry/sf-apis/go/sfgo" ) const ( @@ -48,7 +46,7 @@ func (s *Example) GetName() string { } // Init initializes the plugin with a configuration map. -func (s *Example) Init(conf map[string]string) error { +func (s *Example) Init(conf map[string]interface{}) error { return nil } @@ -58,25 +56,29 @@ func (s *Example) Register(pc plugins.SFPluginCache) { } // Process implements the main interface of the plugin. -func (s *Example) Process(ch interface{}, wg *sync.WaitGroup) { - cha := ch.(*flattener.FlatChannel) - record := cha.In - logger.Trace.Println("Example channel capacity:", cap(record)) - defer wg.Done() - logger.Trace.Println("Starting Example") - for { - fc, ok := <-record - if !ok { - logger.Trace.Println("Channel closed. Shutting down.") - break +func (s *Example) Process(ch []interface{}, wg *sync.WaitGroup) { + for _, chi := range ch { + cha := chi.(plugins.Channel[*sfgo.FlatRecord]) + record := cha.In + logger.Trace.Println("Example channel capacity:", cap(record)) + defer wg.Done() + logger.Trace.Println("Starting Example") + for { + fc, ok := <-record + if !ok { + logger.Trace.Println("Channel closed. Shutting down.") + break + } + if fc.Ints[sfgo.SYSFLOW_IDX][sfgo.SF_REC_TYPE] == sfgo.PROC_EVT { + logger.Info.Printf("Process Event: %s, %d", fc.Strs[sfgo.SYSFLOW_IDX][sfgo.PROC_EXE_STR], fc.Ints[sfgo.SYSFLOW_IDX][sfgo.EV_PROC_TID_INT]) + } } - logger.Info.Println(fc) } logger.Trace.Println("Exiting Example") } // SetOutChan sets the output channel of the plugin. -func (s *Example) SetOutChan(ch interface{}) {} +func (s *Example) SetOutChan(ch []interface{}) {} // Cleanup tears down plugin resources. func (s *Example) Cleanup() {} diff --git a/plugins/processors/example/go.mod b/plugins/processors/example/go.mod new file mode 100644 index 00000000..6b7c43ae --- /dev/null +++ b/plugins/processors/example/go.mod @@ -0,0 +1,34 @@ +// +// Copyright (C) 2020 IBM Corporation. +// +// Authors: +// Frederico Araujo +// Teryl Taylor +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +module github.com/sysflow-telemetry/sf-processor/plugins/processors/example + +go 1.19 + +require github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 + +require ( + github.com/actgardner/gogen-avro/v7 v7.3.1 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 // indirect + github.com/stretchr/testify v1.7.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/sysflow-telemetry/sf-processor/core => ../../../core diff --git a/plugins/processors/example/go.sum b/plugins/processors/example/go.sum new file mode 100644 index 00000000..d8429d2a --- /dev/null +++ b/plugins/processors/example/go.sum @@ -0,0 +1,28 @@ +github.com/actgardner/gogen-avro/v7 v7.3.1 h1:6JJU3o7168lcyIB6uXYyYdflCsJT3aMFKZPSpSc4toI= +github.com/actgardner/gogen-avro/v7 v7.3.1/go.mod h1:1d45RpDvI29sU7l9wUxlRTEglZSdQSbd6bDbWJaEMgo= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= +github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw= +github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230905191833-17e4c751b04c h1:5BCJMIuiysHlYJe+nr/203cqIS6cpTIssbUD8v88VOU= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230905191833-17e4c751b04c/go.mod h1:eo1ATE056Rqb9LhE4LA/0Y2AHfV//1zdCw0py4/S5HM= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300 h1:ZxzwimQe2R4kYorqS33/l+m/+SXWMzPn1cLtpA1ExA0= +github.com/sysflow-telemetry/sf-apis/go v0.0.0-20230929141246-bc28a59e1300/go.mod h1:rvE0WXuIQmACykrVpAKxP5Crf/7KvZplUTULATmAuf4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/plugins/example/pipeline.example.json b/plugins/processors/example/pipeline.example.json similarity index 100% rename from plugins/example/pipeline.example.json rename to plugins/processors/example/pipeline.example.json diff --git a/resources/mappings/ecs_mapping.json b/resources/mappings/ecs_mapping.json new file mode 100644 index 00000000..128b2630 --- /dev/null +++ b/resources/mappings/ecs_mapping.json @@ -0,0 +1,512 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date_nanos" + }, + "agent" : { + "properties" : { + "type" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "version" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + } + } + }, + "container" : { + "properties" : { + "id" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "image" : { + "properties" : { + "id" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + } + } + }, + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "runtime" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + }, + "sf_privileged" : { + "type" : "boolean" + } + } + }, + "destination" : { + "properties" : { + "address" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "bytes" : { + "type" : "long" + }, + "ip" : { + "type" : "ip" + }, + "packets" : { + "type" : "long" + }, + "port" : { + "type" : "integer" + } + } + }, + "ecs" : { + "properties" : { + "version" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 16 + } + } + }, + "event" : { + "properties" : { + "action" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "category" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + }, + "duration" : { + "type" : "long" + }, + "end" : { + "type" : "date_nanos" + }, + "kind" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + }, + "original" : { + "type" : "text", + "norms": false, + "index": false + }, + "severity": { + "type" : "short" + }, + "sf_ret" : { + "type" : "integer" + }, + "sf_state" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + }, + "sf_type" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 2 + }, + "start" : { + "type" : "date_nanos" + }, + "type" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + }, + "reason" : { + "type": "text", + "norms": false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + }, + "file" : { + "properties" : { + "directory" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "hash" : { + "properties" : { + "md5" : { + "type" : "keyword", + "norms": false, + "ignore_above": 32 + }, + "sha1" : { + "type" : "keyword", + "norms": false, + "ignore_above": 40 + }, + "sha256" : { + "type" : "keyword", + "norms": false, + "ignore_above": 64 + } + } + }, + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "path" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "target_path" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "type" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + } + } + }, + "host" : { + "properties" : { + "id" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "ip" : { + "type" : "ip" + }, + "name" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "network" : { + "properties" : { + "bytes" : { + "type" : "long" + }, + "community_id" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "iana_number" : { + "type" : "short" + }, + "transport" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 16 + } + } + }, + "orchestrator": { + "properties" : { + "namespace" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "resource" : { + "properties" : { + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "type": { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + } + } + }, + "type": { + "type" : "keyword", + "norms": false, + "ignore_above" : 32 + } + } + }, + "pod": { + "properties" : { + "hostip": { + "type" : "ip" + }, + "id" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "internalip": { + "type" : "ip" + }, + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "namespace" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "nodename" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "restartcnt": { + "type": "integer" + }, + "ts": { + "type" : "date_nanos" + } + } + }, + "process" : { + "properties" : { + "args" : { + "type" : "text", + "norms": false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "args_count" : { + "type" : "short" + }, + "command_line" : { + "type" : "text", + "norms": false + }, + "executable" : { + "type" : "text", + "norms": false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "hash" : { + "properties" : { + "md5" : { + "type" : "keyword", + "norms": false, + "ignore_above": 32 + }, + "sha1" : { + "type" : "keyword", + "norms": false, + "ignore_above": 40 + }, + "sha256" : { + "type" : "keyword", + "norms": false, + "ignore_above": 64 + } + } + }, + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "parent" : { + "properties" : { + "args" : { + "type" : "text", + "norms": false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "args_count" : { + "type" : "short" + }, + "command_line" : { + "type" : "text", + "norms": false + }, + "executable" : { + "type" : "text", + "norms": false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 512 + } + } + }, + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "pid" : { + "type" : "integer" + }, + "start" : { + "type" : "date_nanos" + } + } + }, + "pid" : { + "type" : "integer" + }, + "start" : { + "type" : "date_nanos" + } + } + }, + "sf_file_action" : { + "properties" : { + "bytes_read" : { + "type" : "long" + }, + "bytes_written" : { + "type" : "long" + }, + "opflags" : { + "type" : "integer" + }, + "read_ops" : { + "type" : "long" + }, + "write_ops" : { + "type" : "long" + } + } + }, + "service": { + "type" : "nested", + "include_in_root" : true, + "properties" : { + "clusterip": { + "type" : "ip" + }, + "id" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 64 + }, + "name" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "namespace" : { + "type" : "keyword", + "norms": false, + "ignore_above" : 256 + }, + "ports" : { + "type" : "nested", + "include_in_root" : true, + "properties" : { + "port": { + "type" : "integer" + }, + "targetport" : { + "type" : "integer" + }, + "nodeport" : { + "type" : "integer" + }, + "proto" : { + "type" : "keyword", + "norms" : false, + "ignore_above" : 32 + } + } + } + } + }, + "source" : { + "properties" : { + "address" : { + "type" : "keyword", + "norms" : false, + "ignore_above" : 256 + }, + "bytes" : { + "type" : "long" + }, + "ip" : { + "type" : "ip" + }, + "packets" : { + "type" : "long" + }, + "port" : { + "type" : "integer" + } + } + }, + "user" : { + "properties" : { + "group" : { + "properties" : { + "id" : { + "type" : "integer" + }, + "name" : { + "type" : "keyword", + "norms" : false, + "ignore_above" : 256 + } + } + }, + "id" : { + "type" : "integer" + }, + "name" : { + "type" : "keyword", + "norms" : false, + "ignore_above" : 256 + } + } + } + } + } +} diff --git a/resources/pipelines/pipeline.distribution.json b/resources/pipelines/pipeline.distribution.json index be5278dc..ee0fb19c 100644 --- a/resources/pipelines/pipeline.distribution.json +++ b/resources/pipelines/pipeline.distribution.json @@ -10,16 +10,18 @@ "processor": "policyengine", "in": "flat flattenerchan", "out": "evt eventchan", - "policies": "/usr/local/sysflow/resources/policies/" + "policies": "/usr/local/sysflow/resources/policies", + "mode": "alert" }, { "processor": "exporter", "in": "evt eventchan", "export": "syslog", - "proto": "tcp", - "tag": "sysflow", - "host": "localhost", - "port": "514" + "format": "json", + "syslog.proto": "tcp", + "syslog.tag": "sysflow", + "syslog.host": "localhost", + "syslog.port": "514" } ] } diff --git a/resources/pipelines/pipeline.elk.json b/resources/pipelines/pipeline.elk.json new file mode 100644 index 00000000..f9b72ab2 --- /dev/null +++ b/resources/pipelines/pipeline.elk.json @@ -0,0 +1,31 @@ +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "policyengine", + "in": "flat flattenerchan", + "out": "evt eventchan", + "policies": "../resources/policies/distribution/filter.yaml", + "mode": "alert" + }, + { + "processor": "exporter", + "in": "evt eventchan", + "export": "es", + "format": "ecs", + "es.addresses": "https://localhost:9200", + "es.index": "my-index", + "es.username": "elastic", + "es.password": "changeme", + "es.bulk.numWorkers": "1", + "es.bulk.flushBuffer": "5000000", + "es.bulk.flushTimeout": "30s", + "buffer": "1000" + } + ] +} diff --git a/resources/pipelines/pipeline.findings.json b/resources/pipelines/pipeline.findings.json new file mode 100644 index 00000000..aac62d18 --- /dev/null +++ b/resources/pipelines/pipeline.findings.json @@ -0,0 +1,36 @@ +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "policyengine", + "in": "flat flattenerchan", + "out": "evt eventchan", + "policies": "../resources/policies/distribution/filter.yaml", + "mode": "alert" + }, + { + "processor": "exporter", + "in": "evt eventchan", + "export": "es", + "format": "ecs", + "findings.apikey": "findings API key (do not set it if reading from secret vault)", + "findings.url": "findings API URL (default: https://us-south.secadvisor.cloud.ibm.com/findings", + "findings.accountid": "findings API account ID", + "findings.provider": "findings API provider", + "findings.sqlqueryurl": "SQL Query URL (default: https://us.sql-query.cloud.ibm.com/sqlquery)", + "findings.sqlquerycrn": "SQL Query instance crn", + "findings.region": "findings API region", + "findings.s3region": "S3 region", + "findings.s3bucket": "S3 bucket", + "findings.path": "findings events path (default: /mnt/occurrences)", + "findings.pool.capacity": "findings event pool capacity (default: 250)", + "findings.pool.maxage": "findings event pool age limit in minutes (default: 1440)", + "vault.secrets": "true|false (set to true if using vaults)" + } + ] +} diff --git a/resources/pipelines/pipeline.local.json b/resources/pipelines/pipeline.local.json new file mode 100644 index 00000000..a7c868c7 --- /dev/null +++ b/resources/pipelines/pipeline.local.json @@ -0,0 +1,27 @@ +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "policyengine", + "in": "flat flattenerchan", + "out": "evt eventchan", + "policies": "/etc/sysflow/policies/distribution/filter.yaml", + "mode": "alert" + }, + { + "processor": "exporter", + "in": "evt eventchan", + "export": "syslog", + "format": "json", + "syslog.proto": "udp", + "syslog.tag": "sysflow", + "syslog.host": "localhost", + "syslog.port": "514" + } + ] +} diff --git a/resources/pipelines/pipeline.template.json b/resources/pipelines/pipeline.template.json index 105cd635..97a458d6 100644 --- a/resources/pipelines/pipeline.template.json +++ b/resources/pipelines/pipeline.template.json @@ -5,29 +5,53 @@ "processor": "sysflowreader", "handler": "flattener", "in": "sysflow sysflowchan", - "out": "flat flattenerchan" + "out": "flat flattenerchan", + "filter.enabled": "on|off (default: off)", + "filter.maxage": "time decay in minutes (default: 24H)" }, { "processor": "policyengine", "in": "flat flattenerchan", "out": "evt eventchan", "policies": "file|dir path (default: /usr/local/sf-processor/conf/)", - "mode": "alert|filter (default: alert)" + "mode": "alert|enrich (default: enrich)", + "monitor": "none|local (default: none)", + "monitor.interval": "policy monitoring interval (default is 30 seconds)", + "concurrency": "number of engine threads (default is 5)" , + "actiondir": "dir path to action .so files" }, { "processor": "exporter", "in": "evt eventchan", - "export": "terminal|file|syslog (default: terminal)", - "flat": "false|true (default: false)", - "path": "output file path (default: ./export.out)", - "proto": "rsyslog protocol tcp|udp|tcp+tls (default: tcp)", - "tag": "rsyslog tag (default: sysflow)", - "source": "rsyslog source hostname (default: hostname)", - "host": "rsyslog host (default: localhost)", - "port": "ryslog port (default: 514)", - "format": "json", - "type": "telemetry|batch (default: telemetry)", - "buffer": "event batching aggregation buffer (default: 0)" + "export": "terminal|file|syslog|es|findings|null (default: terminal)", + "format": "json|ecs|occurrence", + "buffer": "event aggregation buffer (default: 0)", + "vault.secrets": "true|false", + "vault.path": "/run/secrets (default)", + "file.path": "output file path (default: ./export.out)", + "syslog.proto": "rsyslog protocol tcp|udp|tcp+tls (default: tcp)", + "syslog.tag": "rsyslog tag (default: sysflow)", + "syslog.source": "rsyslog source hostname (default: hostname)", + "syslog.host": "rsyslog host (default: localhost)", + "syslog.port": "ryslog port (default: 514)", + "es.addresses": "ip1,ip2,... (comma-separated list)", + "es.index": "elastic index (default: sysflow)", + "es.username": "elastic username (do not set it if reading from secret vault)", + "es.password": "elastic password (do not set it if reading from secret vault)", + "es.bulk.numWorkers": "number of bulk exporter workers (default: 0)", + "es.bulk.flushBuffer": "bulk exporter buffer size (default: 5000000)", + "es.bulk.flushTimeout": "bulk exporter flush timeout in seconds (default: 30)", + "findings.apikey": "findings API key (do not set it if reading from secret vault)", + "findings.url": "findings API URL (default: https://us-south.secadvisor.cloud.ibm.com/findings", + "findings.accountid": "findings API account ID", + "findings.provider": "findings API provider", + "findings.note": "findings API node ID", + "findings.sqlqueryurl": "SQL Query URL (default: https://us.sql-query.cloud.ibm.com/sqlquery)", + "findings.sqlquerycrn": "SQL Query instance crn", + "findings.region": "findings API region", + "findings.path": "findings events path (default: /mnt/occurrences)", + "findings.pool.capacity": "findings event pool capacity (default: 250)", + "findings.pool.maxage": "findings event pool age limit in minutes (default: 1440)" } ] -} \ No newline at end of file +} diff --git a/resources/pipelines/pipeline.terminal.json b/resources/pipelines/pipeline.terminal.json new file mode 100644 index 00000000..b7ede681 --- /dev/null +++ b/resources/pipelines/pipeline.terminal.json @@ -0,0 +1,24 @@ +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "policyengine", + "in": "flat flattenerchan", + "out": "evt eventchan", + "policies": "/etc/sysflow/policies/distribution/filter.yaml", + "mode": "alert" + }, + { + "processor": "exporter", + "in": "evt eventchan", + "export": "terminal", + "format": "json" + } + ] +} + diff --git a/resources/policies/distribution/filter.yaml b/resources/policies/distribution/filter.yaml index 72ef9942..60ac9b5f 100644 --- a/resources/policies/distribution/filter.yaml +++ b/resources/policies/distribution/filter.yaml @@ -1,558 +1,677 @@ - -##### Macros - -- macro: FileFlow - condition: sf.type=FF - -- macro: FileEvent - condition: sf.type=FE - -- macro: ProcessEvent - condition: sf.type=PE - -- macro: NetworkFlow - condition: sf.type=NF - -- macro: setns_syscall - condition: FileFlow and sf.opflags in (SETNS) - -- macro: exit_syscall - condition: ProcessEvent and sf.opflags = EXIT - -- macro: exec_syscall - condition: ProcessEvent and sf.opflags = EXEC - -- macro: clone_syscall - condition: ProcessEvent and sf.opflags = CLONE - -- macro: unlink_syscall - condition: FileEvent and sf.opflags = UNLINK - -- macro: setuid_syscall - condition: ProcessEvent and sf.opflags = SETUID - -- macro: rename_syscall - condition: FileEvent and sf.opflags = RENAME - -- macro: mkdir_syscall - condition: FileEvent and sf.opflags = MKDIR - -- macro: rmdir_syscall - condition: FileEvent and sf.opflags = RMDIR - -- macro: link_syscall - condition: FileEvent and sf.opflags = LINK - -- macro: symlink_syscall - condition: FileEvent and sf.opflags = SYMLINK - -- macro: FileOpen - condition: FileFlow and sf.opflags in (OPEN) - -- macro: file_open_write - condition: FileOpen and sf.file.is_open_write = true - -- macro: file_open_read - condition: FileOpen and sf.file.is_open_read = true - -- macro: file_write - condition: FileFlow and sf.opflags in (WRITE) - -- macro: file_read - condition: FileFlow and sf.opflags in (READ) - -- macro: file_read_or_file_opened_for_read - condition: file_read or file_open_read - -- macro: file_write_or_file_opened_for_write - condition: file_write or file_open_write - -- list: _infrastructure_containers - items: [ocp, ceph, csi-provisioner, csi-attacher, csi-snapshotter] - -- macro: infrastructure_containers - condition: sf.container.image pmatch (_infrastructure_containers) - -- macro: nginx_ingress_controller_container - condition: sf.container.image pmatch (nginx-ingress-controller) - -#### Process Clone tuning - -- list: _os_level_noisy_process_clone_by_process - items: [/usr/bin/runc, /usr/libexec/crio/conmon, /proc/self/exe, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/bin/hyperkube, /usr/lib/systemd/systemd-journald, /usr/bin/dpkg-deb, /usr/bin/dpkg, /usr/bin/apt-get, /usr/lib/systemd/systemd-udevd, /usr/bin/apt-config, /var/lib/dpkg/info/vim-runtime.postinst, /usr/bin/docker, /usr/share/debconf/frontend, /usr/lib/apt/apt.systemd.daily, /usr/lib/apt/methods/gpgv, /usr/sbin/sshd, /usr/sbin/syslog-ng, /lib/systemd/systemd-journald, /lib/systemd/systemd-udevd, /usr/bin/apt-key, /var/lib/cni/bin/openshift-sdn, /var/lib/cni/bin/multus, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/appstreamcli, /usr/lib/update-notifier/update-motd-hwe-eol, /usr/lib/update-notifier/apt-check, /usr/bin/run-parts, /usr/lib/update-notifier/update-motd-updates-available, /usr/bin/containerd-shim, /usr/bin/gpgconf, /usr/bin/ceph, /usr/bin/openshift-sdn-node] - -- list: _os_level_noisy_process_clone_by_parent_process - items: [/usr/bin/runc, /usr/bin/hyperkube, /usr/bin/dpkg, /usr/bin/apt-get, /usr/bin/apt-key, /usr/bin/appstreamcli, /usr/bin/containerd, /var/lib/dpkg/info/vim.postinst] - -- list: _openshift_infrastructure_container_noisy_process_clone_by_process - items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/runc, grpc_health_probe, /prometheus/sh, /usr/bin/dig, /usr/libexec/crio/conmon, /usr/bin/crio, /usr/local/bin/rook, /usr/bin/appregistry-server, /usr/sbin/haproxy, /usr/sbin/tuned-adm, /usr/bin/bash, "\u003cNA\u003e"] - -- list: _openshift_infrastructure_container_noisy_process_clone_by_parent_process - items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/crio, /bin/bash, /usr/bin/sh, /usr/bin/bash, /usr/share/openvswitch/scripts/ovs-ctl, /proc/self/exe, /usr/bin/ceph, /usr/bin/openshift-tuned, /usr/sbin/tuned-adm] - -- macro: _drop_out_noisy_process_clone_events_from_bash_proc_self_exe - condition: clone_syscall and bash_and_parent_process_proc_self_exe_with_cri_grandparents - -- macro: _drop_out_noisy_process_clone_events - condition: clone_syscall - and ((sf.proc.exe in (_os_level_noisy_process_clone_by_process) or sf.pproc.exe in (_os_level_noisy_process_clone_by_parent_process)) - or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_process_clone_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_process_clone_by_parent_process)))) - -#### File Read tuning - -- list: _os_level_noisy_file_read_by_process - items: [/usr/bin/hyperkube, /usr/lib/systemd/systemd, /usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/crio, /usr/lib/systemd/systemd-journald, /usr/sbin/sshd, /lib/systemd/systemd-journald, /usr/sbin/irqbalance, /lib/systemd/systemd, /usr/bin/dbus-daemon, /usr/bin/updatedb.mlocate, /lib/systemd/systemd-udevd, /usr/bin/apt-config, /lib/systemd/system-generators/systemd-sysv-generator, /usr/sbin/cron, /usr/bin/dpkg, /usr/bin/mandb, /bin/systemctl, /usr/bin/apt-get, /usr/bin/lsb_release, /usr/bin/dockerd, /bin/networkctl, /sbin/ldconfig.real, /lib/systemd/systemd-sysctl, /lib/systemd/systemd-networkd, /usr/bin/docker, /usr/bin/containerd-shim, /usr/sbin/syslog-ng, /lib/systemd/systemd-resolved, /usr/bin/kubelet, /usr/bin/mongod, /usr/bin/mongo, /usr/bin/prometheus, /usr/lib/accountsservice/accounts-daemon, /usr/lib/systemd/systemd-logind, /usr/lib/systemd/systemd-cgroups-agent, /usr/lib/policykit-1/polkitd, /usr/bin/dpkg-divert, /usr/lib/update-notifier/apt-check, /usr/lib/systemd/systemd-resolved, /usr/bin/apt-key, /usr/bin/hwe-support-status, /usr/sbin/dpkg-preconfigure, /usr/lib/cnf-update-db, /usr/bin/appstreamcli, /usr/bin/gpg-connect-agent, /usr/lib/packagekit/packagekitd, /usr/libexec/gsd-housekeeping, /usr/bin/systemd-detect-virt, /usr/bin/networkctl, /usr/lib/systemd/systemd-networkd, /usr/sbin/rsyslogd, /usr/sbin/NetworkManager, /usr/bin/id, /usr/bin/containerd, /usr/bin/update-alternatives, /usr/bin/gdbus, /usr/bin/dpkg-maintscript-helper, /usr/bin/find, /usr/lib/systemd/systemd-udevd, /usr/bin/test] - -- list: _os_level_noisy_file_read_by_parent_process - items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/hyperkube, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/bin/apt-get, /usr/bin/dpkg-deb, /usr/sbin/sshd, /usr/bin/run-parts, /bin/run-parts, /usr/bin/apt-key, /usr/lib/ubuntu-release-upgrader/release-upgrade-motd, /usr/bin/dpkg, /usr/share/debconf/frontend, /usr/bin/dockerd, /var/lib/dpkg/info/vim.postinst, /usr/sbin/add-shell, /usr/local/bin/docker-compose, /var/lib/dpkg/info/mime-support.postinst, /usr/lib/systemd/systemd-udevd, /var/lib/cni/bin/openshift-sdn, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/update-notifier/update-motd-updates-available, /usr/lib/update-notifier/update-motd-hwe-eol, /usr/lib/apt/methods/gpgv, /usr/bin/appstreamcli, /usr/libexec/gnome-session-binary, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/gpgconf] - -- list: _openshift_infrastructure_container_noisy_file_read_by_process - items: [/usr/bin/node_exporter, /usr/bin/curl, /usr/bin/ovs-vsctl, /usr/bin/ovs-appctl, /usr/bin/prometheus, /usr/bin/ceph, /usr/bin/ceph-mds, /usr/bin/ceph-mgr, /usr/bin/ceph-osd, /usr/local/bin/rook, /usr/bin/ceph-mon, /sbin/ldconfig, /usr/sbin/ldconfig, /usr/bin/ovs-ofctl, /usr/share/openvswitch/scripts/ovs-ctl, /usr/share/grafana/bin/grafana-server, /bin/bash, /rootfs/usr/bin/journalctl, /usr/bin/cat, /usr/bin/sed, /usr/bin/sleep, /usr/bin/thanos, /usr/bin/bash, /usr/bin/openshift-router, /usr/bin/alertmanager, /usr/bin/dockerregistry, /usr/bin/dig, /usr/bin/tail, /usr/bin/ls, /usr/bin/kube-rbac-proxy, /usr/bin/cp, /usr/bin/coredns, /usr/bin/machine-config-daemon, /usr/bin/oauth-proxy, /usr/bin/telemeter-client, /usr/bin/kube-state-metrics, /usr/bin/grep, /usr/bin/openshift-state-metrics, /usr/bin/prometheus-config-reloader, /usr/bin/cmp, /usr/bin/openshift-tuned, /usr/local/bin/helm-operator, /manager, /usr/sbin/ovs-vswitchd, /usr/bin/appregistry-server, /usr/share/grafana/bin/grafana-server, /usr/bin/uname, /usr/sbin/ovsdb-server] - -- list: _openshift_infrastructure_container_noisy_file_read_by_parent_process - items: [/usr/libexec/crio/conmon, /usr/bin/runc, /usr/bin/machine-config-daemon, /usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/openshift-tuned, /usr/bin/crio, /usr/local/bin/rook, /usr/local/bin/rook, /rook/rook, /usr/bin/ceph, /usr/bin/dumb-init, /usr/bin/openshift-sdn-node, /usr/sbin/ovsdb-server, /usr/sbin/tuned-adm] - -- macro: _drop_out_noisy_file_read_events_from_nginx_ingress_controller - condition: file_read_or_file_opened_for_read and nginx_ingress_controller_container and sf.pproc.exe = /usr/bin/dumb-init - -- macro: _drop_file_read_list_of_file_paths - condition: file_read_or_file_opened_for_read - and sf.file.path in (/etc/ld.so.cache) - -## macro to define /proc/self/exe running in a host, its grandparents includes /usr/bin/runc, /usr/libexec/crio/conmon and /usr/bin/crio -## and not exists sf.container.type -> or another way to limit it to the host -## the second macro looking for proc.exe /bin/bash and pproc.exe: /proc/self/exe - -- macro: proc_self_exe_running_in_host_with_cri_grandparents - condition: sf.proc.exe = /proc/self/exe - and (sf.proc.aexe in (/usr/bin/runc) and sf.proc.aexe in (/usr/libexec/crio/conmon) and sf.proc.aexe in (/usr/bin/crio)) - -- macro: bash_and_parent_process_proc_self_exe_with_cri_grandparents - condition: sf.proc.exe = /bin/bash and sf.pproc.exe = /proc/self/exe - and (sf.proc.aexe in (/usr/bin/runc) and sf.proc.aexe in (/usr/libexec/crio/conmon) and sf.proc.aexe in (/usr/bin/crio)) - - -- macro: _drop_out_noisy_file_read_events_from_proc_self_exe - condition: file_read_or_file_opened_for_read and (proc_self_exe_running_in_host_with_cri_grandparents or bash_and_parent_process_proc_self_exe_with_cri_grandparents) - and (sf.file.path in (/proc/self/mountinfo, /etc/group, /proc/self/setgroups, /proc/cpuinfo, /proc/sys/kernel/cap_last_cap, /etc/passwd, /proc/self/status, /proc/filesystems, /proc/self/fd, /sys/kernel/mm/hugepages, /proc/meminfo, /lib64/libc.so.6, /lib64/libnss_files.so.2, /lib64/libdl.so.2, /lib64/libtinfo.so.5, /etc/nsswitch.conf, /proc/filesystems, /etc/ld.so.cache) - or (sf.file.path startswith /proc/self/task/ and sf.file.path contains /attr/current) - or (sf.file.type in (u, p))) - -- macro: _drop_out_noisy_file_read_events - condition: file_read_or_file_opened_for_read and not (file_write_or_file_opened_for_write or setns_syscall) - and ((sf.proc.exe in (_os_level_noisy_file_read_by_process) or sf.pproc.exe in (_os_level_noisy_file_read_by_parent_process)) - or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_file_read_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_file_read_by_parent_process)))) - -#### File Modify tuning - -- list: _os_level_noisy_file_modify_by_process - items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/crio, /usr/bin/hyperkube, /usr/lib/systemd/systemd-logind, /usr/lib/systemd/systemd-journald, /usr/lib/systemd/systemd-udevd, /usr/sbin/NetworkManager, /usr/lib/systemd/systemd, /usr/bin/dbus-daemon, /usr/bin/apt-get, /usr/lib/update-notifier/apt-check, /usr/bin/dpkg, /usr/bin/dockerd, /usr/lib/apt/methods/gpgv, /usr/bin/update-alternatives, /usr/bin/lsb_release, /usr/bin/containerd, /usr/lib/systemd/systemd-networkd, /usr/lib/systemd/systemd-resolved] - -- list: _os_level_noisy_file_modify_by_parent_process - items: [/usr/libexec/crio/conmon, /usr/bin/runc, /usr/bin/hyperkube, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/sbin/sshd, /usr/bin/dbus-daemon, /usr/bin/dockerd, /lib/systemd/systemd-journald, /lib/systemd/systemd, /lib/systemd/systemd-udevd, /lib/systemd/systemd-logind, /lib/systemd/systemd-timesyncd, /lib/systemd/systemd-resolved, /lib/systemd/systemd-networkd, /usr/bin/dpkg, /usr/lib/systemd/systemd, /usr/bin/update-mime-database, /usr/lib/systemd/systemd-journald, /usr/lib/systemd/systemd-networkd, /usr/lib/systemd/systemd-udevd, /usr/lib/systemd/systemd-resolved, /usr/lib/systemd/systemd-timesyncd, /usr/lib/systemd/systemd-logind, /usr/bin/dpkg-deb, /usr/bin/apt-get, /usr/local/bin/docker-compose, /usr/bin/apt-key, /usr/bin/update-alternatives, /usr/bin/containerd, /usr/bin/appstreamcli, /usr/lib/update-notifier/update-motd-updates-available, /usr/bin/apt-config, /usr/lib/apt/methods/gpgv, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/lib/update-notifier/apt-check, /var/lib/dpkg/info/vim.postinst] - -- list: _openshift_infrastructure_container_noisy_file_modify_by_process - items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/sbin/ovs-vswitchd, /usr/bin/curl, /usr/bin/cat, /usr/bin/sh, /usr/bin/oauth-proxy, /usr/bin/ovs-vsctl, /usr/bin/ovs-appctl, /usr/bin/sed, /usr/sbin/ovsdb-server, /usr/bin/appregistry-server, /usr/sbin/haproxy, /usr/bin/dig, /sbin/ldconfig, /usr/bin/ceph-osd, /usr/local/bin/rook, /usr/bin/ovs-ofctl, /usr/bin/ls, /usr/bin/grep, /usr/bin/prometheus-config-reloader, /usr/bin/ceph-mds, /usr/bin/openshift-tuned, /usr/bin/openshift-state-metrics, /usr/bin/openshift-router, /usr/sbin/tuned-adm, /usr/sbin/chronyd, /usr/bin/alertmanager, /usr/sbin/tuned-adm] - -- list: _openshift_infrastructure_container_noisy_file_modify_by_parent_process - items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/sbin/ovs-vswitchd, /usr/bin/dumb-init, /usr/local/bin/rook, /usr/bin/openshift-sdn-node] - -- macro: _drop_out_noisy_file_modify_events - condition: file_write_or_file_opened_for_write and not setns_syscall - and ((sf.proc.exe in (_os_level_noisy_file_modify_by_process) or sf.pproc.exe in (_os_level_noisy_file_modify_by_parent_process)) - or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_file_modify_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_file_modify_by_parent_process)))) - -- macro: _drop_file_write_list_of_file_paths - condition: file_write_or_file_opened_for_write - and sf.file.path in (/run/systemd/userdb/io.systemd.DynamicUser, /run/systemd/notify, /dev/pts/1, /dev/null, /proc/self/attr/keycreate, /var/lib/grafana/grafana.db-journal) - -- macro: _drop_file_write_from_rsyslogd - condition: file_write_or_file_opened_for_write - and sf.proc.exe = /usr/sbin/rsyslogd - and sf.file.directory = /var/log - -- macro: _drop_file_write_from_sshd_to_unix_domain - condition: file_write_or_file_opened_for_write - and sf.proc.exe = /usr/sbin/sshd - and sf.file.type = u - -- macro: _drop_file_write_from_tar - condition: file_write_or_file_opened_for_write - and sf.proc.exe = /usr/bin/tar - and sf.file.directory = /var/lib/dpkg/tmp.ci - -- macro: _drop_file_write_from_nginx_ingress_controller - condition: file_write_or_file_opened_for_write and nginx_ingress_controller_container and sf.file.path = /tmp/nginx-status-server.sock - -- macro: _drop_file_write_from_proc_self_exe_part_1 - condition: file_write_or_file_opened_for_write and proc_self_exe_running_in_host_with_cri_grandparents - and sf.file.path startswith /proc/self/task/ and sf.file.path contains /attr/exec - -## the following case results in around 60k events /proc/self/exe writing to a unix domain, file details are null - -- macro: _drop_file_write_from_proc_self_exe_part_2 - condition: file_write_or_file_opened_for_write and proc_self_exe_running_in_host_with_cri_grandparents and sf.file.type in (u, p) - -#### Process exit tuning - -- macro: _drop_thread_exit_events - condition: exit_syscall and sf.proc.pid != sf.proc.tid - -- list: _os_level_noisy_process_exit_by_process - items: [/usr/bin/runc, /usr/libexec/crio/conmon, /proc/self/exe, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/sbin/sshd, /usr/bin/dpkg-deb, /usr/bin/apt-key, /usr/bin/dpkg, /usr/bin/apt-config, /usr/bin/cat, /usr/bin/cmp, /usr/bin/dpkg-split, /usr/lib/systemd/systemd-udevd, /usr/bin/sed, /usr/bin/readlink, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/lsb_release, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/apt/methods/gpgv, /usr/bin/test] - -- list: _os_level_noisy_process_exit_by_parent_process - items: [/usr/bin/runc, /usr/bin/hyperkube, /usr/bin/apt-key, /usr/bin/dpkg-deb, /usr/bin/dpkg, /usr/bin/apt-config, /usr/bin/apt-get, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/systemd/systemd-udevd, /usr/bin/appstreamcli, /usr/lib/update-notifier/apt-check, /usr/lib/apt/methods/gpgv, /usr/bin/run-parts, /usr/lib/update-notifier/update-motd-updates-available, /usr/lib/ubuntu-release-upgrader/release-upgrade-motd, /var/lib/dpkg/info/vim.postinst, /usr/bin/dpkg-maintscript-helper, /usr/sbin/cron] - -- list: _openshift_infrastructure_container_noisy_process_exit_by_process - items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/sleep, /usr/bin/cat, /usr/bin/curl, /usr/bin/bash, /usr/bin/ovs-vsctl, /usr/bin/sh, /bin/bash, /usr/bin/ovs-appctl, /usr/bin/sed, /usr/bin/ovs-ofctl, /sbin/ldconfig, /proc/self/exe, /prometheus/sh, /usr/bin/runc, /usr/bin/sed, /usr/bin/cp, /usr/bin/ls, /usr/bin/ceph, /usr/sbin/ldconfig, /usr/bin/grep, /usr/bin/cmp, /usr/bin/dig, /usr/libexec/crio/conmon] - -- list: _openshift_infrastructure_container_noisy_process_exit_by_parent_process - items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/runc, /usr/bin/ceph, /usr/local/bin/rook, /usr/libexec/crio/conmon, /usr/bin/openshift-sdn-node] - -- macro: _drop_out_noisy_process_exit_events_from_bash_proc_self_exe - condition: exit_syscall and bash_and_parent_process_proc_self_exe_with_cri_grandparents - -- macro: _drop_out_noisy_process_exit_events - condition: exit_syscall - and ((sf.proc.exe in (_os_level_noisy_process_exit_by_process) or sf.pproc.exe in (_os_level_noisy_process_exit_by_parent_process)) - or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_process_exit_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_process_exit_by_parent_process)))) - -#### setns tuning - -- list: _os_level_noisy_setns_events_by_process - items: [/usr/bin/runc, /var/lib/cni/bin/openshift-sdn, /usr/bin/dockerd] - -- list: _os_level_noisy_setns_events_by_parent_process - items: [/usr/bin/crio, /usr/bin/dockerd] - -- macro: _drop_out_noisy_setns_events_events - condition: setns_syscall and (sf.proc.exe in (_os_level_noisy_setns_events_by_process) or sf.pproc.exe in (_os_level_noisy_setns_events_by_parent_process)) - -- macro: _drop_noisy_setns_events_from_proc_self_exe - condition: setns_syscall and proc_self_exe_running_in_host_with_cri_grandparents - -#### Process execution tuning - -- list: _os_level_noisy_process_execution_by_process - items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/sbin/sshd, /usr/bin/dpkg, /usr/bin/dpkg-deb, /usr/bin/dpkg-split, /usr/lib/update-notifier/apt-check, /usr/bin/gpg-connect-agent, /usr/bin/lsb_release, /usr/bin/update-alternatives, /usr/bin/date, /usr/bin/test] - -- list: _os_level_noisy_process_execution_by_parent_process - items: [/usr/bin/hyperkube, /usr/bin/runc, /usr/bin/crio, /usr/libexec/crio/conmon, /usr/bin/dpkg, /usr/bin/dpkg-deb, /usr/bin/apt-key, /usr/bin/apt-get, /usr/bin/apt-config, /usr/share/debconf/frontend, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/apt/apt.systemd.daily, /usr/bin/run-parts, /usr/lib/update-notifier/apt-check, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/appstreamcli, /usr/bin/gpgconf, /usr/lib/apt/methods/gpgv] - -- list: _openshift_infrastructure_container_noisy_process_execution_by_process - items: [/usr/sbin/iptables, /usr/sbin/chroot, /usr/bin/sleep, /usr/bin/cat, /usr/bin/curl, /usr/bin/ovs-vsctl, /usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/sed, /usr/bin/ovs-appctl, /usr/bin/ovs-ofctl, /usr/sbin/iptables-save, /usr/bin/openshift-sdn-node, /sbin/ldconfig, /usr/bin/ceph, /usr/bin/ls, /usr/bin/cp, /usr/sbin/ldconfig, /usr/bin/cmp, /usr/bin/dig, /usr/bin/grep, /proc/self/exe, /usr/bin/lsblk] - -- list: _openshift_infrastructure_container_noisy_process_execution_by_parent_process - items: [/usr/bin/openshift-sdn-node, /usr/share/openvswitch/scripts/ovs-ctl, /usr/local/bin/rook, /usr/bin/ceph, /usr/bin/ceph-mgr, /var/lib/haproxy/reload-haproxy, /usr/bin/openshift-router, /usr/bin/openshift-tuned] - - - macro: _drop_out_noisy_process_execution_events_from_bash_proc_self_exe - condition: exec_syscall and bash_and_parent_process_proc_self_exe_with_cri_grandparents - -- macro: _drop_out_noisy_process_execution_events - condition: exec_syscall - and ((sf.proc.exe in (_os_level_noisy_process_execution_by_process) or sf.pproc.exe in (_os_level_noisy_process_execution_by_parent_process)) - or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_process_execution_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_process_execution_by_parent_process)))) - -### File delete tuning - -- list: _os_level_noisy_file_delete_by_process - items: [/usr/bin/crio, /usr/bin/hyperkube, /usr/bin/runc, /usr/lib/systemd/systemd-udevd, /usr/lib/systemd/systemd, /usr/libexec/crio/conmon, /lib/systemd/systemd-udevd, /usr/bin/apt-get, /usr/bin/dpkg, /usr/bin/update-alternatives, /usr/bin/dockerd, /usr/lib/systemd/systemd-networkd, /usr/bin/containerd, /usr/lib/apt/methods/gpgv, /usr/lib/ubuntu-release-upgrader/check-new-release, /usr/bin/containerd-shim, /usr/lib/systemd/systemd-resolved, /usr/lib/systemd/systemd-logind, /usr/lib/systemd/systemd-journald, /usr/bin/dpkg-divert, /usr/bin/appstreamcli] - -- list: _os_level_noisy_file_delete_by_parent_process - items: [/usr/bin/runc, /usr/bin/apt-get, /usr/bin/dpkg, /usr/bin/appstreamcli, /usr/lib/update-notifier/update-motd-updates-available, /usr/bin/apt-key] - -- list: _openshift_infrastructure_container_noisy_file_delete_by_process - items: [/usr/bin/appregistry-server, /usr/local/bin/rook, /usr/bin/prometheus, /usr/bin/ceph-mon, /usr/share/grafana/bin/grafana-server, /usr/bin/openshift-tuned, /usr/sbin/tuned-adm] - -- macro: _drop_out_noisy_file_delete_events - condition: unlink_syscall - and ((sf.proc.exe in (_os_level_noisy_file_delete_by_process) or sf.pproc.exe in (_os_level_noisy_file_delete_by_parent_process)) - or (infrastructure_containers and sf.proc.exe in (_openshift_infrastructure_container_noisy_file_delete_by_process))) - -### Network Flows tuning - -- list: _os_level_noisy_network_flows_by_process - items: [/usr/bin/crio, /usr/sbin/haproxy, /usr/bin/hyperkube, /usr/sbin/chronyd, /var/lib/cni/bin/multus, /usr/bin/coredns, /usr/lib/systemd/systemd-resolved] - -- list: _os_level_noisy_network_flows_by_parent_process - items: [/usr/bin/runc, /usr/bin/apt-get] - -- list: _openshift_infrastructure_container_noisy_network_flows_by_process - items: [/usr/bin/ceph, /usr/bin/oauth-proxy, /usr/bin/thanos, /usr/bin/kube-rbac-proxy, /usr/bin/ceph-osd, /usr/bin/prom-kube-proxy, /usr/bin/alertmanager, /usr/local/bin/rook, /usr/bin/ceph-mds, /usr/bin/cm-adapter, /usr/bin/telemeter-client, /usr/share/grafana/bin/grafana-server, /usr/bin/machine-config-daemon, /usr/bin/node_exporter, /usr/bin/openshift-tuned, /usr/bin/kube-state-metrics, /usr/bin/openshift-state-metrics, /usr/bin/openshift-router] - -- list: _openshift_infrastructure_container_noisy_network_flows_by_parent_process - items: [/usr/libexec/crio/conmon] - -- macro: _drop_out_noisy_network_flows_from_runc_parent_process - condition: NetworkFlow and sf.pproc.exe = /usr/bin/runc and sf.net.dip = 127.0.0.1 and sf.net.dport in (9, 9090, 50051) - -- macro: _drop_out_noisy_network_flows_from_curl - condition: NetworkFlow and infrastructure_containers and sf.proc.exe = /usr/bin/curl and sf.net.dip = 127.0.0.1 and sf.net.dport = 9090 - -- macro: _drop_out_network_flows_from_log_forwarder_utilities - condition: NetworkFlow and sf.proc.exe in (/usr/sbin/syslog-ng) and sf.net.dport = 514 - -- macro: _drop_out_noisy_network_flows - condition: NetworkFlow - and ( (sf.proc.exe in (_os_level_noisy_network_flows_by_process) or sf.pproc.exe in (_os_level_noisy_network_flows_by_parent_process)) - or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_network_flows_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_network_flows_by_parent_process)))) - -### setuid tuning - -- list: _os_level_noisy_setuid_events_by_process - items: [/usr/bin/runc, /usr/sbin/sshd] - -- macro: _drop_noisy_setuid_events_from_proc_self_exe - condition: setuid_syscall and proc_self_exe_running_in_host_with_cri_grandparents - -- macro: _drop_out_noisy_setuid_events - condition: setuid_syscall - and sf.proc.exe in (_os_level_noisy_setuid_events_by_process) - -### File rename tuning - -- list: _os_level_noisy_file_rename_events_by_process - items: [/usr/bin/runc, /usr/bin/hyperkube, /lib/systemd/systemd-udevd, /usr/bin/dpkg, /usr/lib/systemd/systemd-udevd, /usr/sbin/NetworkManager, /usr/bin/crio, /usr/libexec/crio/conmon, /usr/lib/systemd/systemd, /usr/lib/systemd/systemd-journald, /usr/sbin/logrotate, /usr/bin/dockerd] - -- list: _openshift_infrastructure_container_noisy_file_rename_events_by_process - items: [/usr/bin/alertmanager, /usr/bin/prometheus] - -- macro: _drop_out_noisy_file_rename_events - condition: rename_syscall - and ( (sf.proc.exe in (_os_level_noisy_file_rename_events_by_process)) - or (infrastructure_containers and sf.proc.exe in (_openshift_infrastructure_container_noisy_file_rename_events_by_process))) - -### Directory Creation tuning - -- list: _os_level_noisy_directory_creation_by_process - items: [/usr/bin/dpkg, /usr/lib/systemd/systemd, /usr/bin/apt-get, /usr/bin/update-mime-database, /usr/bin/dpkg-deb, /usr/bin/dockerd, /usr/lib/systemd/systemd-logind, /usr/bin/runc, /usr/sbin/update-rc.d, /usr/bin/containerd, /usr/local/bin/docker-compose, /usr/bin/top, /usr/bin/containerd-shim] - -- list: _os_level_noisy_directory_creation_by_parent_process - items: [/usr/bin/dpkg-deb, /var/lib/dpkg/info/shared-mime-info.postinst, /usr/bin/containerd-shim, /usr/bin/apt-get, /usr/bin/containerd-shim, /usr/bin/containerd, /usr/bin/apt-key] - -- macro: _drop_out_noisy_directory_creation_events - condition: mkdir_syscall - and (sf.proc.exe in (_os_level_noisy_directory_creation_by_process) or sf.pproc.exe in (_os_level_noisy_directory_creation_by_parent_process)) - -### Directory Removal tuning - -- list: _os_level_noisy_directory_removal_by_process - items: [/usr/bin/appstreamcli, /usr/lib/systemd/systemd, /usr/bin/apt-get, /usr/bin/dpkg, /usr/bin/hyperkube, /usr/bin/prometheus] - -- list: _os_level_noisy_directory_removal_by_parent_process - items: [/usr/bin/dpkg, /usr/lib/systemd/systemd, /usr/bin/apt-get] - -- macro: _drop_out_noisy_directory_removal_events - condition: rmdir_syscall - and (sf.proc.exe in (_os_level_noisy_directory_removal_by_process) or sf.pproc.exe in (_os_level_noisy_directory_removal_by_parent_process)) - -### Soft Link tuning - -- list: _os_level_noisy_soft_link_creation_by_process - items: [/usr/bin/dpkg, /usr/bin/apt-get, /usr/bin/update-alternatives, /usr/lib/systemd/systemd, /usr/bin/dockerd, /usr/sbin/update-rc.d, /usr/bin/hyperkube, /usr/lib/systemd/systemd-udevd] - -- macro: _drop_out_noisy_soft_link_creation_events - condition: symlink_syscall - and sf.proc.exe in (_os_level_noisy_soft_link_creation_by_process) - - -### Hard Link tuning - -- list: _os_level_noisy_hard_link_creation_by_process - items: [/usr/bin/dpkg, /usr/bin/dpkg-divert] - -- macro: _drop_out_noisy_hard_link_creation_events - condition: link_syscall - and sf.proc.exe in (_os_level_noisy_hard_link_creation_by_process) - - - - -##### Global filter - -- filter: __global__ - condition: _drop_out_noisy_process_clone_events - or _drop_out_noisy_process_clone_events_from_bash_proc_self_exe - or _drop_out_noisy_file_read_events - or _drop_out_noisy_file_read_events_from_nginx_ingress_controller - or _drop_file_read_list_of_file_paths - or _drop_out_noisy_file_read_events_from_proc_self_exe - or _drop_out_noisy_file_modify_events - or _drop_file_write_list_of_file_paths - or _drop_file_write_from_rsyslogd - or _drop_file_write_from_tar - or _drop_file_write_from_sshd_to_unix_domain - or _drop_file_write_from_nginx_ingress_controller - or _drop_file_write_from_proc_self_exe_part_1 - or _drop_file_write_from_proc_self_exe_part_2 - or _drop_thread_exit_events - or _drop_out_noisy_process_exit_events_from_bash_proc_self_exe - or _drop_out_noisy_process_exit_events - or _drop_out_noisy_setns_events_events - or _drop_noisy_setns_events_from_proc_self_exe - or _drop_out_noisy_process_execution_events - or _drop_out_noisy_process_execution_events_from_bash_proc_self_exe - or _drop_out_noisy_file_delete_events - or _drop_out_noisy_network_flows_from_runc_parent_process - or _drop_out_network_flows_from_log_forwarder_utilities - or _drop_out_noisy_network_flows_from_curl - or _drop_out_noisy_network_flows - or _drop_out_noisy_setuid_events - or _drop_noisy_setuid_events_from_proc_self_exe - or _drop_out_noisy_file_rename_events - or _drop_out_noisy_directory_creation_events - or _drop_out_noisy_directory_removal_events - or _drop_out_noisy_soft_link_creation_events - or _drop_out_noisy_hard_link_creation_events - -##### Rules - -- rule: File Opened with Read Permissions - desc: File Opened with Read Permissions - condition: file_open_read and not (file_write_or_file_opened_for_write or file_read or setns_syscall) - action: [alert] - priority: low - prefilter: [FF] - -- rule: File Read - desc: File Read - condition: file_read and not file_write - action: [alert] - priority: low - prefilter: [FF] - -- rule: File Modified - desc: File Modified - condition: file_write - action: [alert] - priority: low - prefilter: [FF] - -- rule: File Opened with Write Permissions - desc: File Opened with Write Permissions - condition: file_open_write and not (file_write or setns_syscall) - action: [alert] - priority: low - prefilter: [FF] - -- rule: Directory created - desc: when a directory will be created - condition: sf.opflags = MKDIR - action: [alert] - priority: low - prefilter: [FE] - -- rule: Directory removed - desc: when a directory will be removed - condition: sf.opflags = RMDIR - action: [alert] - priority: low - prefilter: [FE] - -- rule: Hard link created - desc: when process creates hard link to an existing file - condition: sf.opflags = LINK - action: [alert] - priority: low - prefilter: [FE] - -- rule: Soft link created - desc: when process creates soft link to an existing file - condition: sf.opflags = SYMLINK - action: [alert] - priority: low - prefilter: [FE] - -- rule: File deleted - desc: when a file will be deleted - condition: sf.opflags = UNLINK - action: [alert] - priority: low - prefilter: [FE] - -- rule: File renamed - desc: when a file will be renamed - condition: sf.opflags = RENAME - action: [alert] - priority: low - prefilter: [FE] - -- rule: UID of process was changed - desc: UID of process was changed - condition: sf.opflags = SETUID - action: [alert] - priority: low - prefilter: [PE] - -- rule: Process cloned - desc: Process cloned - condition: clone_syscall - action: [alert] - priority: low - prefilter: [PE] - -- rule: Execution of a file - desc: Execution of a file - condition: exec_syscall - action: [alert] - priority: low - tags: [test] - prefilter: [PE] - -- rule: Process or thread exit - desc: Process or thread exit - condition: exit_syscall - action: [alert] - priority: low - prefilter: [PE] - - -- rule: Process entered namespace - desc: Process entered namespace - condition: setns_syscall - action: [alert] - priority: low - prefilter: [FF] - -- rule: Process Created a Network Connection - desc: Process Created a Network Connection - condition: sf.opflags = CONNECT - action: [alert] - priority: low - prefilter: [NF] - -- rule: Process Accepted a Network Connection - desc: Network Flow ingress - condition: sf.opflags = ACCEPT - action: [alert] - priority: low - prefilter: [NF] - -- rule: Process Sending and Receiving Network Data - desc: Network Flow ingress and engress - condition: sf.opflags = SEND and sf.opflags = RECV - action: [alert] - priority: low - prefilter: [NF] - -- rule: Process Sending Network Data - desc: Network Flow engress - condition: sf.opflags = SEND and not sf.opflags = RECV - action: [alert] - priority: low - prefilter: [NF] - -- rule: Process Receiving Network Data - desc: Network Flow ingress - condition: sf.opflags = RECV and not sf.opflags = SEND - action: [alert] - priority: low - prefilter: [NF] - -- rule: Network Connection Closed - desc: Network Connection Closed - condition: sf.opflags = CLOSE - action: [alert] - priority: low - prefilter: [NF] \ No newline at end of file +##### Macros + +- macro: FileFlow + condition: sf.type=FF + +- macro: FileEvent + condition: sf.type=FE + +- macro: ProcessEvent + condition: sf.type=PE + +- macro: NetworkFlow + condition: sf.type=NF + +- macro: setns_syscall + condition: FileFlow and sf.opflags = SETNS + +- macro: exit_syscall + condition: ProcessEvent and sf.opflags = EXIT + +- macro: exec_syscall + condition: ProcessEvent and sf.opflags = EXEC + +- macro: clone_syscall + condition: ProcessEvent and sf.opflags = CLONE + +- macro: unlink_syscall + condition: FileEvent and sf.opflags = UNLINK + +- macro: setuid_syscall + condition: ProcessEvent and sf.opflags = SETUID + +- macro: rename_syscall + condition: FileEvent and sf.opflags = RENAME + +- macro: mkdir_syscall + condition: FileEvent and sf.opflags = MKDIR + +- macro: rmdir_syscall + condition: FileEvent and sf.opflags = RMDIR + +- macro: link_syscall + condition: FileEvent and sf.opflags = LINK + +- macro: symlink_syscall + condition: FileEvent and sf.opflags = SYMLINK + +- macro: FileOpen + condition: FileFlow and sf.opflags = OPEN + +- macro: file_open_write + condition: FileOpen and sf.file.is_open_write = true + +- macro: file_open_read + condition: FileOpen and sf.file.is_open_read = true + +- macro: file_write + condition: FileFlow and sf.opflags = WRITE + +- macro: file_read + condition: FileFlow and sf.opflags = READ + +- macro: file_read_or_file_opened_for_read + condition: file_read or file_open_read + +- macro: file_write_or_file_opened_for_write + condition: file_write or file_open_write + +- list: _infrastructure_containers + items: [ocp, openshift, ocs, ceph, csi-provisioner, csi-attacher, csi-snapshotter, container-native-virtualization, ose-local-storage-diskmaker, mcg-core-rhel, rook-ceph-rhel, openshift4, cephcsi-rhel, openshift-service-mesh, distributed-tracing, ose-csi-external] + +- macro: infrastructure_containers + condition: sf.container.image pmatch (_infrastructure_containers) + +- macro: nginx_ingress_controller_container + condition: sf.container.image pmatch (nginx-ingress-controller) + +- macro: nvidia_gpu_operator + condition: sf.container.image pmatch (gpu-operator) + +#### Process Clone tuning + +- macro: _drop_thread_clone_events + condition: clone_syscall and sf.proc.pid != sf.proc.tid + +- list: _os_level_noisy_process_clone_by_process + items: [/usr/bin/runc, /usr/libexec/crio/conmon, /proc/self/exe, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/bin/hyperkube, /usr/lib/systemd/systemd-journald, /usr/bin/dpkg-deb, /usr/bin/dpkg, /usr/bin/apt-get, /usr/lib/systemd/systemd-udevd, /usr/bin/apt-config, /var/lib/dpkg/info/vim-runtime.postinst, /usr/bin/docker, /usr/share/debconf/frontend, /usr/lib/apt/apt.systemd.daily, /usr/lib/apt/methods/gpgv, /usr/sbin/sshd, /usr/sbin/syslog-ng, /lib/systemd/systemd-journald, /lib/systemd/systemd-udevd, /usr/bin/apt-key, /var/lib/cni/bin/openshift-sdn, /var/lib/cni/bin/multus, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/appstreamcli, /usr/lib/update-notifier/update-motd-hwe-eol, /usr/lib/update-notifier/apt-check, /usr/bin/run-parts, /usr/lib/update-notifier/update-motd-updates-available, /usr/bin/containerd-shim, /usr/bin/gpgconf, /usr/bin/ceph, /usr/bin/openshift-sdn-node, /usr/bin/ldd, /usr/bin/kubelet, /usr/libexec/nm-dispatcher, /usr/bin/ldd, /usr/bin/kubelet, /usr/local/bin/kubectl] + +- list: _os_level_noisy_process_clone_by_parent_process + items: [/usr/bin/runc, /usr/bin/hyperkube, /usr/bin/dpkg, /usr/bin/apt-get, /usr/bin/apt-key, /usr/bin/appstreamcli, /usr/bin/containerd, /var/lib/dpkg/info/vim.postinst, /usr/libexec/nm-dispatcher] + +- list: _openshift_infrastructure_container_noisy_process_clone_by_process + items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/runc, grpc_health_probe, /prometheus/sh, /usr/bin/dig, /usr/libexec/crio/conmon, /usr/bin/crio, /usr/local/bin/rook, /usr/bin/appregistry-server, /usr/sbin/haproxy, /usr/sbin/tuned-adm, /usr/bin/bash, "\u003cNA\u003e", /usr/bin/nmstatectl, /usr/bin/diskmaker, /usr/local/bin/kubernetes-nmstate, /opt/ibm/java/jre/bin/java, /usr/bin/node, /usr/bin/ansible-runner, /usr/libexec/qemu-kvm, /usr/local/bin/cephcsi, /usr/bin/machine-config-daemon, /bin/sh, /usr/bin/ansible-playbook, /usr/bin/python2, /usr/bin/python2.7, /usr/bin/ansible-runner, /usr/local/bin/ansible-operator, /usr/bin/uname, /var/lib/haproxy/reload-haproxy, /usr/bin/openshift-router, /usr/bin/curl, /usr/local/bin/sidecar-injector, /usr/local/bin/galley] + +- list: _openshift_infrastructure_container_noisy_process_clone_by_parent_process + items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/crio, /bin/bash, /usr/bin/sh, /usr/bin/bash, /usr/share/openvswitch/scripts/ovs-ctl, /proc/self/exe, /usr/bin/ceph, /usr/bin/openshift-tuned, /usr/sbin/tuned-adm, /usr/bin/python2, /usr/bin/ansible-runner, /usr/bin/ansible-playbook] + +- macro: _drop_out_noisy_process_clone_events_from_ansible + condition: clone_syscall and ansible_in_infrastructure_containers + +- macro: bash_and_parent_process_proc_self_exe_with_cri_grandparents + condition: sf.proc.exe = /bin/bash and sf.pproc.exe = /proc/self/exe + and (sf.proc.aexe in (/usr/bin/runc) and sf.proc.aexe in (/usr/libexec/crio/conmon) and sf.proc.aexe in (/usr/bin/crio)) + +- macro: _drop_out_noisy_process_clone_events_from_bash_proc_self_exe + condition: clone_syscall and bash_and_parent_process_proc_self_exe_with_cri_grandparents + +- macro: _drop_out_noisy_process_clone_events + condition: clone_syscall + and ((sf.proc.exe in (_os_level_noisy_process_clone_by_process) or sf.pproc.exe in (_os_level_noisy_process_clone_by_parent_process)) + or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_process_clone_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_process_clone_by_parent_process)))) + +#### File Read tuning + +- list: _os_level_noisy_file_read_by_process + items: [/usr/bin/hyperkube, /usr/lib/systemd/systemd, /usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/crio, /usr/lib/systemd/systemd-journald, /usr/sbin/sshd, /lib/systemd/systemd-journald, /usr/sbin/irqbalance, /lib/systemd/systemd, /usr/bin/dbus-daemon, /usr/bin/updatedb.mlocate, /lib/systemd/systemd-udevd, /usr/bin/apt-config, /lib/systemd/system-generators/systemd-sysv-generator, /usr/sbin/cron, /usr/bin/dpkg, /usr/bin/mandb, /bin/systemctl, /usr/bin/apt-get, /usr/bin/lsb_release, /usr/bin/dockerd, /bin/networkctl, /sbin/ldconfig.real, /lib/systemd/systemd-sysctl, /lib/systemd/systemd-networkd, /usr/bin/docker, /usr/bin/containerd-shim, /usr/sbin/syslog-ng, /lib/systemd/systemd-resolved, /usr/bin/kubelet, /usr/bin/mongod, /usr/bin/mongo, /usr/bin/prometheus, /usr/lib/accountsservice/accounts-daemon, /usr/lib/systemd/systemd-logind, /usr/lib/systemd/systemd-cgroups-agent, /usr/lib/policykit-1/polkitd, /usr/bin/dpkg-divert, /usr/lib/update-notifier/apt-check, /usr/lib/systemd/systemd-resolved, /usr/bin/apt-key, /usr/bin/hwe-support-status, /usr/sbin/dpkg-preconfigure, /usr/lib/cnf-update-db, /usr/bin/appstreamcli, /usr/bin/gpg-connect-agent, /usr/lib/packagekit/packagekitd, /usr/libexec/gsd-housekeeping, /usr/bin/systemd-detect-virt, /usr/bin/networkctl, /usr/lib/systemd/systemd-networkd, /usr/sbin/rsyslogd, /usr/sbin/NetworkManager, /usr/bin/id, /usr/bin/containerd, /usr/bin/update-alternatives, /usr/bin/gdbus, /usr/bin/dpkg-maintscript-helper, /usr/bin/find, /usr/lib/systemd/systemd-udevd, /usr/bin/test, /usr/libexec/nm-dispatcher, /usr/bin/logger, /usr/libexec/chrony-helper, /usr/sbin/agetty, /usr/bin/basename, /usr/sbin/sssd, /usr/bin/ceph, /usr/lib/polkit-1/polkitd, /usr/sbin/lsof, /usr/bin/date, /usr/bin/systemd-tmpfiles, /usr/sbin/unbound-anchor] + +- list: _os_level_noisy_file_read_by_parent_process + items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/hyperkube, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/bin/apt-get, /usr/bin/dpkg-deb, /usr/sbin/sshd, /usr/bin/run-parts, /bin/run-parts, /usr/bin/apt-key, /usr/lib/ubuntu-release-upgrader/release-upgrade-motd, /usr/bin/dpkg, /usr/share/debconf/frontend, /usr/bin/dockerd, /var/lib/dpkg/info/vim.postinst, /usr/sbin/add-shell, /usr/local/bin/docker-compose, /var/lib/dpkg/info/mime-support.postinst, /usr/lib/systemd/systemd-udevd, /var/lib/cni/bin/openshift-sdn, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/update-notifier/update-motd-updates-available, /usr/lib/update-notifier/update-motd-hwe-eol, /usr/lib/apt/methods/gpgv, /usr/bin/appstreamcli, /usr/libexec/gnome-session-binary, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/gpgconf, /usr/bin/ldd, /usr/bin/kubelet, /usr/sbin/sssd, /usr/libexec/nm-dispatcher] + +- list: _openshift_infrastructure_container_noisy_file_read_by_process + items: [/usr/bin/node_exporter, /usr/bin/curl, /usr/bin/ovs-vsctl, /usr/bin/ovs-appctl, /usr/bin/prometheus, /usr/bin/ceph, /usr/bin/ceph-mds, /usr/bin/ceph-mgr, /usr/bin/ceph-osd, /usr/local/bin/rook, /usr/bin/ceph-mon, /sbin/ldconfig, /usr/sbin/ldconfig, /usr/bin/ovs-ofctl, /usr/share/openvswitch/scripts/ovs-ctl, /usr/share/grafana/bin/grafana-server, /bin/bash, /rootfs/usr/bin/journalctl, /usr/bin/cat, /usr/bin/sed, /usr/bin/sleep, /usr/bin/thanos, /usr/bin/bash, /usr/bin/openshift-router, /usr/bin/alertmanager, /usr/bin/dockerregistry, /usr/bin/dig, /usr/bin/tail, /usr/bin/ls, /usr/bin/kube-rbac-proxy, /usr/bin/cp, /usr/bin/coredns, /usr/bin/machine-config-daemon, /usr/bin/oauth-proxy, /usr/bin/telemeter-client, /usr/bin/kube-state-metrics, /usr/bin/grep, /usr/bin/openshift-state-metrics, /usr/bin/prometheus-config-reloader, /usr/bin/cmp, /usr/bin/openshift-tuned, /usr/local/bin/helm-operator, /manager, /usr/sbin/ovs-vswitchd, /usr/bin/appregistry-server, /usr/share/grafana/bin/grafana-server, /usr/bin/uname, /usr/sbin/ovsdb-server, /usr/sbin/haproxy, /usr/bin/nmstatectl, /usr/bin/lsblk, /usr/bin/uname, /bin/lsblk, /bin/lsblk, /usr/bin/stat, /usr/bin/realpath, /usr/bin/systemd-run, /usr/local/bin/kubectl, /usr/sbin/libvirtd, /usr/src/ovs-cni/bin/ovs-marker, /usr/bin/virt-handler, /usr/bin/machine-config-controller, /usr/bin/cm-adapter, /usr/bin/ingress-operator, /usr/bin/openshift-controller-manager, /usr/bin/openshift-sdn-controller, /usr/bin/cluster-node-tuning-operator, /usr/bin/dns-operator, /machine-api-operator, /usr/bin/cluster-autoscaler-operator, /usr/bin/snapshot-controller, /usr/bin/ansible-playbook, /usr/bin/ansible-runner, /usr/local/bin/envoy, /usr/sbin/grafana-server, /usr/local/bin/ansible-operator, /bridge-marker, /usr/bin/diskmaker, /usr/bin/jaeger-operator, /usr/local/bin/sidecar-injector, /usr/bin/csi-resizer, /usr/bin/csi-provisioner, /usr/local/bin/mixs, /usr/bin/hostpath-provisioner-operator, /var/lib/haproxy/reload-haproxy, /manager, /usr/sbin/logrotate, /usr/bin/virt-cdi-operator, /usr/bin/virt-operator, /usr/libexec/qemu-kvm, /usr/bin/vm-import-controller, /usr/bin/node, /usr/bin/virt-cdi-controller, /usr/bin/vm-import-operator, /usr/share/grafana/bin/grafana-server, /usr/local/bin/kubernetes-nmstate, /usr/bin/virt-cdi-apiserver, /usr/local/bin/istio-operator, /usr/sbin/udevadm] + +- list: _openshift_infrastructure_container_noisy_file_read_by_parent_process + items: [/usr/libexec/crio/conmon, /usr/bin/runc, /usr/bin/machine-config-daemon, /usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/openshift-tuned, /usr/bin/crio, /usr/local/bin/rook, /usr/local/bin/rook, /rook/rook, /usr/bin/ceph, /usr/bin/dumb-init, /usr/bin/openshift-sdn-node, /usr/sbin/ovsdb-server, /usr/sbin/tuned-adm, /usr/bin/nmstatectl, /usr/bin/node, /usr/bin/ansible-runner, /usr/bin/ansible-playbook] + +- macro: _drop_out_noisy_file_read_events_from_nginx_ingress_controller + condition: file_read_or_file_opened_for_read and nginx_ingress_controller_container and sf.pproc.exe = /usr/bin/dumb-init + +- macro: _drop_out_noisy_file_read_events_from_specific_high_level_dirs + condition: file_read_or_file_opened_for_read + and (sf.file.directory startswith /usr + or sf.file.directory startswith /proc + or sf.file.directory startswith /sys + or sf.file.directory startswith /lib + or sf.file.directory startswith //sys + or sf.file.directory startswith /dev + or sf.file.directory startswith /tmp) + +- macro: _drop_out_noisy_file_read_events_from_pipes_and_sockets + condition: file_read_or_file_opened_for_read and sf.file.type in (u, p) + +- macro: _drop_noisy_file_read_from_ansible_in_infrastructure_containers + condition: file_read_or_file_opened_for_read and ansible_in_infrastructure_containers + +- macro: _drop_out_noisy_file_read_events_from_nvidia_gpu_operator + condition: file_read_or_file_opened_for_read and nvidia_gpu_operator and sf.proc.exe = /usr/bin/gpu-operator + and (sf.file.directory startswith /opt/gpu-operator + or sf.file.directory startswith /var/run + or sf.file.path = /host-etc/os-release) + +- macro: _drop_file_read_list_of_file_paths + condition: file_read_or_file_opened_for_read + and sf.file.path in (/etc/ld.so.cache) + +- macro: _drop_file_read_in_infrastructure_containers_list_of_file_paths + condition: file_read_or_file_opened_for_read + and (sf.file.path in (/etc/python, /etc/localtime, /etc/resolv.conf, /etc/hosts, /etc/nsswitch.conf, /etc/host.conf, /etc/passwd) + or (sf.file.directory startswith /opt/ansible)) + +## macro to define /proc/self/exe running in a host, its grandparents includes /usr/bin/runc, /usr/libexec/crio/conmon and /usr/bin/crio +## and not exists sf.container.type -> or another way to limit it to the host +## the second macro looking for proc.exe /bin/bash and pproc.exe: /proc/self/exe + +- macro: proc_self_exe_running_in_host_with_cri_grandparents + condition: sf.proc.exe = /proc/self/exe + and (sf.proc.aexe in (/usr/bin/runc) and sf.proc.aexe in (/usr/libexec/crio/conmon) and sf.proc.aexe in (/usr/bin/crio)) + +- macro: _drop_out_noisy_file_read_events_from_proc_self_exe + condition: file_read_or_file_opened_for_read and (proc_self_exe_running_in_host_with_cri_grandparents or bash_and_parent_process_proc_self_exe_with_cri_grandparents) + and sf.file.path in (/etc/group, /etc/passwd, /etc/nsswitch.conf) + +- macro: _drop_out_noisy_file_read_events + condition: file_read_or_file_opened_for_read and not (file_write_or_file_opened_for_write or setns_syscall) + and ((sf.proc.exe in (_os_level_noisy_file_read_by_process) or sf.pproc.exe in (_os_level_noisy_file_read_by_parent_process)) + or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_file_read_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_file_read_by_parent_process)))) + +#### File Modify tuning + +- list: _os_level_noisy_file_modify_by_process + items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/bin/crio, /usr/bin/hyperkube, /usr/lib/systemd/systemd-logind, /usr/lib/systemd/systemd-journald, /usr/lib/systemd/systemd-udevd, /usr/sbin/NetworkManager, /usr/lib/systemd/systemd, /usr/bin/dbus-daemon, /usr/bin/apt-get, /usr/lib/update-notifier/apt-check, /usr/bin/dpkg, /usr/bin/dockerd, /usr/lib/apt/methods/gpgv, /usr/bin/update-alternatives, /usr/bin/lsb_release, /usr/bin/containerd, /usr/lib/systemd/systemd-networkd, /usr/lib/systemd/systemd-resolved, /usr/bin/kubelet, /usr/sbin/irqbalance, /usr/bin/ceph] + +- list: _os_level_noisy_file_modify_by_parent_process + items: [/usr/libexec/crio/conmon, /usr/bin/runc, /usr/bin/hyperkube, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/sbin/sshd, /usr/bin/dbus-daemon, /usr/bin/dockerd, /lib/systemd/systemd-journald, /lib/systemd/systemd, /lib/systemd/systemd-udevd, /lib/systemd/systemd-logind, /lib/systemd/systemd-timesyncd, /lib/systemd/systemd-resolved, /lib/systemd/systemd-networkd, /usr/bin/dpkg, /usr/lib/systemd/systemd, /usr/bin/update-mime-database, /usr/lib/systemd/systemd-journald, /usr/lib/systemd/systemd-networkd, /usr/lib/systemd/systemd-udevd, /usr/lib/systemd/systemd-resolved, /usr/lib/systemd/systemd-timesyncd, /usr/lib/systemd/systemd-logind, /usr/bin/dpkg-deb, /usr/bin/apt-get, /usr/local/bin/docker-compose, /usr/bin/apt-key, /usr/bin/update-alternatives, /usr/bin/containerd, /usr/bin/appstreamcli, /usr/lib/update-notifier/update-motd-updates-available, /usr/bin/apt-config, /usr/lib/apt/methods/gpgv, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/lib/update-notifier/apt-check, /var/lib/dpkg/info/vim.postinst, /usr/bin/kubelet] + +- list: _openshift_infrastructure_container_noisy_file_modify_by_process + items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/sbin/ovs-vswitchd, /usr/bin/curl, /usr/bin/cat, /usr/bin/sh, /usr/bin/oauth-proxy, /usr/bin/ovs-vsctl, /usr/bin/ovs-appctl, /usr/bin/sed, /usr/sbin/ovsdb-server, /usr/bin/appregistry-server, /usr/sbin/haproxy, /usr/bin/dig, /sbin/ldconfig, /usr/bin/ceph-osd, /usr/local/bin/rook, /usr/bin/ovs-ofctl, /usr/bin/ls, /usr/bin/grep, /usr/bin/prometheus-config-reloader, /usr/bin/ceph-mds, /usr/bin/openshift-tuned, /usr/bin/openshift-state-metrics, /usr/bin/openshift-router, /usr/sbin/tuned-adm, /usr/sbin/chronyd, /usr/bin/alertmanager, /usr/sbin/tuned-adm, /usr/bin/nmstatectl, /usr/bin/lsblk, /usr/bin/virt-launcher, /usr/src/ovs-cni/bin/ovs-marker, /usr/sbin/libvirtd, /usr/local/bin/sidecar-injector, /usr/sbin/rsyslogd, /usr/sbin/grafana-server, /usr/sbin/logrotate, /usr/libexec/qemu-kvm, /usr/bin/ansible-playbook, /usr/share/grafana/bin/grafana-server] + +- list: _openshift_infrastructure_container_noisy_file_modify_by_parent_process + items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/sbin/ovs-vswitchd, /usr/bin/dumb-init, /usr/local/bin/rook, /usr/bin/openshift-sdn-node, /usr/bin/virt-launcher] + +- macro: _drop_out_noisy_file_modify_events + condition: file_write_or_file_opened_for_write and not setns_syscall + and ((sf.proc.exe in (_os_level_noisy_file_modify_by_process) or sf.pproc.exe in (_os_level_noisy_file_modify_by_parent_process)) + or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_file_modify_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_file_modify_by_parent_process)))) + +- macro: _drop_file_write_list_of_file_paths + condition: file_write_or_file_opened_for_write + and sf.file.path in (/run/systemd/userdb/io.systemd.DynamicUser, /run/systemd/notify, /dev/pts/1, /dev/null, /proc/self/attr/keycreate, /var/lib/grafana/grafana.db-journal) + +- macro: _drop_file_write_in_infrastructure_containers_list_of_file_paths + condition: file_write_or_file_opened_for_write and infrastructure_containers + and sf.file.path in (/tmp/health) + +- macro: _drop_file_write_from_rsyslogd_and_auditd + condition: file_write_or_file_opened_for_write + and sf.proc.exe in (/usr/sbin/rsyslogd, /usr/sbin/auditd) + and sf.file.directory startswith /var/log + +- macro: _drop_file_write_from_sshd_to_unix_domain + condition: file_write_or_file_opened_for_write + and sf.proc.exe = /usr/sbin/sshd + and sf.file.type = u + +- macro: _drop_file_write_from_os_processes_to_pipes + condition: file_write_or_file_opened_for_write + and sf.pproc.exe = /usr/bin/ldd + and sf.file.type = p + +- macro: _drop_file_write_from_tar + condition: file_write_or_file_opened_for_write + and sf.proc.exe = /usr/bin/tar + and sf.file.directory = /var/lib/dpkg/tmp.ci + +- macro: _drop_file_write_from_auditd + condition: file_write_or_file_opened_for_write + and sf.proc.exe = /usr/sbin/auditd + and sf.file.directory = /var/log/audit/ + +- macro: ansible_in_infrastructure_containers + condition: infrastructure_containers + and (sf.proc.exe in (/usr/bin/ansible-playbook, /usr/bin/ansible-runner, /usr/local/bin/ansible-operator) or sf.proc.aexe in (/usr/bin/ansible-playbook, /usr/local/bin/ansible-operator)) + +- macro: _drop_noisy_file_write_from_ansible_in_infrastructure_containers + condition: file_write_or_file_opened_for_write + and ansible_in_infrastructure_containers + and (sf.file.directory startswith /tmp + or sf.file.directory startswith /tmp/ansible-operator/ + or sf.file.directory startswith /opt/ansible/ + or sf.file.directory startswith /dev/pts) + +- macro: _drop_write_from_infrastructure_containers_to_pipe_or_unix_domain_socket + condition: file_write_or_file_opened_for_write and infrastructure_containers + and sf.file.type in (p, u) + +- macro: _drop_file_write_from_infrastructure_containers_to_unix_socket + condition: file_write_or_file_opened_for_write and infrastructure_containers + and sf.file.type = u + and (sf.proc.exe in (/usr/bin/nmstatectl, /usr/bin/lsblk, /usr/bin/virt-launcher, /usr/sbin/libvirtd, /usr/bin/node, /usr/local/bin/cephcsi, /usr/libexec/qemu-kvm, /usr/bin/virt-handler) + or sf.pproc.exe in (/usr/bin/node)) + +- macro: _drop_file_write_from_some_processes_to_unix_socket + condition: file_write_or_file_opened_for_write + and sf.file.type = u + and (sf.proc.exe in (/usr/bin/ldd, /usr/bin/kubelet, /usr/libexec/sssd/sssd_nss) + or sf.pproc.exe in (/usr/bin/ldd, /usr/bin/kubelet)) + +- macro: _drop_file_write_from_nvidia_gpu_operator_to_unix_socket_and_pipes + condition: file_write_or_file_opened_for_write and nvidia_gpu_operator + and sf.file.type in (u, p) + and sf.proc.exe = /usr/bin/gpu-operator + +- macro: _drop_file_write_from_kubelet_specific_file_paths + condition: file_write_or_file_opened_for_write + and sf.proc.exe = /usr/bin/kubelet + and (sf.file.directory startswith /sys/fs/cgroup/ + or sf.file.directory startswith /var/lib/kubelet/pods/) + +- macro: _drop_file_write_from_nginx_ingress_controller + condition: file_write_or_file_opened_for_write and nginx_ingress_controller_container and sf.file.path = /tmp/nginx-status-server.sock + +- macro: _drop_file_write_from_proc_self_exe_part_1 + condition: file_write_or_file_opened_for_write and proc_self_exe_running_in_host_with_cri_grandparents + and ((sf.file.path startswith /proc/self/task/ and sf.file.path contains /attr/exec) + or sf.file.path = /proc/thread-self/attr/exec) + +## the following case results in around 60k events /proc/self/exe writing to a unix domain, file details are null + +- macro: _drop_file_write_from_proc_self_exe_part_2 + condition: file_write_or_file_opened_for_write and proc_self_exe_running_in_host_with_cri_grandparents and sf.file.type in (u, p) + +#### Process exit tuning + +- macro: _drop_thread_exit_events + condition: exit_syscall and sf.proc.pid != sf.proc.tid + +- list: _os_level_noisy_process_exit_by_process + items: [/usr/bin/runc, /usr/libexec/crio/conmon, /proc/self/exe, /usr/bin/crio, /usr/lib/systemd/systemd, /usr/sbin/sshd, /usr/bin/dpkg-deb, /usr/bin/apt-key, /usr/bin/dpkg, /usr/bin/apt-config, /usr/bin/cat, /usr/bin/cmp, /usr/bin/dpkg-split, /usr/lib/systemd/systemd-udevd, /usr/bin/sed, /usr/bin/readlink, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/lsb_release, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/apt/methods/gpgv, /usr/bin/test, /usr/bin/ldd, /usr/libexec/chrony-helper, /usr/lib/systemd/systemd-cgroups-agent, /usr/bin/basename, /usr/bin/logger, /usr/bin/ldd, /usr/bin/ceph, /usr/sbin/lsof, /usr/bin/jq, /usr/bin/sleep, /usr/bin/ls, /usr/bin/rm, /usr/bin/uname, /usr/sbin/haproxy, /usr/sbin/pidof, /var/lib/haproxy/reload-haproxy, /usr/bin/openshift-router] + +- list: _os_level_noisy_process_exit_by_parent_process + items: [/usr/bin/runc, /usr/bin/hyperkube, /usr/bin/apt-key, /usr/bin/dpkg-deb, /usr/bin/dpkg, /usr/bin/apt-config, /usr/bin/apt-get, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/systemd/systemd-udevd, /usr/bin/appstreamcli, /usr/lib/update-notifier/apt-check, /usr/lib/apt/methods/gpgv, /usr/bin/run-parts, /usr/lib/update-notifier/update-motd-updates-available, /usr/lib/ubuntu-release-upgrader/release-upgrade-motd, /var/lib/dpkg/info/vim.postinst, /usr/bin/dpkg-maintscript-helper, /usr/sbin/cron, /usr/bin/ldd, /usr/bin/kubelet, /usr/libexec/nm-dispatcher, /usr/bin/ldd] + +- list: _openshift_infrastructure_container_noisy_process_exit_by_process + items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/sleep, /usr/bin/cat, /usr/bin/curl, /usr/bin/bash, /usr/bin/ovs-vsctl, /usr/bin/sh, /bin/bash, /usr/bin/ovs-appctl, /usr/bin/sed, /usr/bin/ovs-ofctl, /proc/self/exe, /prometheus/sh, /usr/bin/runc, /usr/bin/sed, /usr/bin/cp, /usr/bin/ls, /usr/bin/ceph, /usr/sbin/ldconfig, /usr/bin/grep, /usr/bin/cmp, /usr/bin/dig, /usr/libexec/crio/conmon, /usr/bin/lsblk, /sbin/ldconfig, /usr/bin/uname, /usr/bin/nmstatectl, /usr/bin/grep, /usr/bin/sleep, /usr/bin/stat, /usr/bin/realpath, /usr/bin/mkdir, /usr/bin/date, /usr/bin/ansible-runner, /usr/bin/logger, /usr/sbin/logrotate, /usr/bin/find, /bin/sh, /usr/bin/ansible-playbook, /usr/bin/python2, /usr/bin/python2.7, /usr/bin/chmod, /usr/bin/coreutils] + +- list: _openshift_infrastructure_container_noisy_process_exit_by_parent_process + items: [/usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/runc, /usr/bin/ceph, /usr/local/bin/rook, /usr/libexec/crio/conmon, /usr/bin/openshift-sdn-node, /usr/bin/nmstatectl, /usr/bin/ansible-runner, /usr/bin/node, /usr/local/bin/cephcsi, /usr/bin/python2, /usr/bin/ansible-playbook] + +- macro: _drop_out_noisy_process_exit_events_from_bash_proc_self_exe + condition: exit_syscall and bash_and_parent_process_proc_self_exe_with_cri_grandparents + +- macro: _drop_out_noisy_process_exit_events + condition: exit_syscall + and ((sf.proc.exe in (_os_level_noisy_process_exit_by_process) or sf.pproc.exe in (_os_level_noisy_process_exit_by_parent_process)) + or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_process_exit_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_process_exit_by_parent_process)))) + +#### setns tuning + +- list: _os_level_noisy_setns_events_by_process + items: [/usr/bin/runc, /var/lib/cni/bin/openshift-sdn, /usr/bin/dockerd] + +- list: _os_level_noisy_setns_events_by_parent_process + items: [/usr/bin/crio, /usr/bin/dockerd] + +- macro: _drop_out_noisy_setns_events_events + condition: setns_syscall and (sf.proc.exe in (_os_level_noisy_setns_events_by_process) or sf.pproc.exe in (_os_level_noisy_setns_events_by_parent_process)) + +- macro: _drop_noisy_setns_events_from_proc_self_exe + condition: setns_syscall and proc_self_exe_running_in_host_with_cri_grandparents + +#### Process execution tuning + +- list: _os_level_noisy_process_execution_by_process + items: [/usr/bin/runc, /usr/libexec/crio/conmon, /usr/sbin/sshd, /usr/bin/dpkg, /usr/bin/dpkg-deb, /usr/bin/dpkg-split, /usr/lib/update-notifier/apt-check, /usr/bin/gpg-connect-agent, /usr/bin/lsb_release, /usr/bin/update-alternatives, /usr/bin/date, /usr/bin/test, /usr/libexec/chrony-helper, /usr/libexec/nm-dispatcher, /usr/lib/systemd/systemd-cgroups-agent, /usr/bin/basename, /usr/bin/logger, /usr/bin/nice, /usr/bin/ceph, /usr/sbin/lsof] + +- list: _os_level_noisy_process_execution_by_parent_process + items: [/usr/bin/hyperkube, /usr/bin/runc, /usr/bin/crio, /usr/libexec/crio/conmon, /usr/bin/dpkg, /usr/bin/dpkg-deb, /usr/bin/apt-key, /usr/bin/apt-get, /usr/bin/apt-config, /usr/share/debconf/frontend, /var/lib/dpkg/info/vim-runtime.postinst, /usr/lib/apt/apt.systemd.daily, /usr/bin/run-parts, /usr/lib/update-notifier/apt-check, /usr/lib/update-notifier/update-motd-fsck-at-reboot, /usr/bin/appstreamcli, /usr/bin/gpgconf, /usr/lib/apt/methods/gpgv, /usr/bin/kubelet, /usr/bin/ldd, /usr/libexec/nm-dispatcher] + +- list: _openshift_infrastructure_container_noisy_process_execution_by_process + items: [/usr/sbin/iptables, /usr/sbin/chroot, /usr/bin/curl, /usr/bin/ovs-vsctl, /usr/share/openvswitch/scripts/ovs-ctl, /usr/bin/sed, /usr/bin/ovs-appctl, /usr/bin/ovs-ofctl, /usr/sbin/iptables-save, /usr/bin/openshift-sdn-node, /sbin/ldconfig, /usr/bin/ceph, /usr/bin/ls, /usr/bin/cp, /usr/sbin/ldconfig, /usr/bin/cmp, /usr/bin/dig, /usr/bin/grep, /proc/self/exe, /usr/bin/lsblk, /sbin/ldconfig, /usr/bin/uname, /usr/bin/nmstatectl, /usr/bin/grep, /usr/bin/sleep, /usr/bin/stat, /usr/bin/realpath, /usr/bin/cat, /usr/bin/df, /usr/bin/mkdir, /usr/bin/systemd-run, /usr/local/bin/kubectl, /usr/bin/ansible-runner, /usr/bin/find, /usr/bin/jq] + +- list: _openshift_infrastructure_container_noisy_process_execution_by_parent_process + items: [/usr/bin/openshift-sdn-node, /usr/share/openvswitch/scripts/ovs-ctl, /usr/local/bin/rook, /usr/bin/ceph, /usr/bin/ceph-mgr, /var/lib/haproxy/reload-haproxy, /usr/bin/openshift-router, /usr/bin/openshift-tuned, /usr/bin/nmstatectl, /usr/bin/node, /usr/bin/ansible-runner, /usr/bin/ansible-playbook] + +- macro: _drop_out_noisy_process_execution_events_from_ansible + condition: exec_syscall and ansible_in_infrastructure_containers + +- macro: _drop_out_noisy_process_execution_events_from_bash_proc_self_exe + condition: exec_syscall and bash_and_parent_process_proc_self_exe_with_cri_grandparents + +- macro: _drop_out_noisy_process_execution_events + condition: exec_syscall + and ((sf.proc.exe in (_os_level_noisy_process_execution_by_process) or sf.pproc.exe in (_os_level_noisy_process_execution_by_parent_process)) + or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_process_execution_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_process_execution_by_parent_process)))) + +### File delete tuning + +- list: _os_level_noisy_file_delete_by_process + items: [/usr/bin/crio, /usr/bin/hyperkube, /usr/bin/runc, /usr/lib/systemd/systemd-udevd, /usr/lib/systemd/systemd, /usr/libexec/crio/conmon, /lib/systemd/systemd-udevd, /usr/bin/apt-get, /usr/bin/dpkg, /usr/bin/update-alternatives, /usr/bin/dockerd, /usr/lib/systemd/systemd-networkd, /usr/bin/containerd, /usr/lib/apt/methods/gpgv, /usr/lib/ubuntu-release-upgrader/check-new-release, /usr/bin/containerd-shim, /usr/lib/systemd/systemd-resolved, /usr/lib/systemd/systemd-logind, /usr/lib/systemd/systemd-journald, /usr/bin/dpkg-divert, /usr/bin/appstreamcli, /usr/bin/kubelet, /usr/sbin/logrotate] + +- list: _os_level_noisy_file_delete_by_parent_process + items: [/usr/bin/runc, /usr/bin/apt-get, /usr/bin/dpkg, /usr/bin/appstreamcli, /usr/lib/update-notifier/update-motd-updates-available, /usr/bin/apt-key] + +- list: _openshift_infrastructure_container_noisy_file_delete_by_process + items: [/usr/bin/appregistry-server, /usr/local/bin/rook, /usr/bin/prometheus, /usr/bin/ceph-mon, /usr/share/grafana/bin/grafana-server, /usr/bin/openshift-tuned, /usr/sbin/tuned-adm, /usr/bin/prometheus-config-reloader, /usr/bin/ansible-runner, /usr/bin/prometheus-config-reloader, /usr/local/bin/galley, /usr/bin/ansible-runner, /usr/bin/ansible-playbook, /usr/bin/prometheus, /usr/local/bin/ansible-operator, /usr/sbin/grafana-server, /usr/sbin/haproxy] + +- macro: _drop_out_noisy_file_deletion_events_from_ansible + condition: unlink_syscall and ansible_in_infrastructure_containers + +- macro: _drop_out_noisy_file_delete_events + condition: unlink_syscall + and ((sf.proc.exe in (_os_level_noisy_file_delete_by_process) or sf.pproc.exe in (_os_level_noisy_file_delete_by_parent_process)) + or (infrastructure_containers and sf.proc.exe in (_openshift_infrastructure_container_noisy_file_delete_by_process))) + +### Network Flows tuning + +- list: _os_level_noisy_network_flows_by_process + items: [/usr/bin/crio, /usr/sbin/haproxy, /usr/bin/hyperkube, /usr/sbin/chronyd, /var/lib/cni/bin/multus, /usr/bin/coredns, /usr/lib/systemd/systemd-resolved, /usr/bin/kubelet, /usr/sbin/unbound-anchor, /usr/sbin/NetworkManager, /usr/sbin/sssd] + +- list: _os_level_noisy_network_flows_by_parent_process + items: [/usr/bin/runc, /usr/bin/apt-get] + +- list: _openshift_infrastructure_container_noisy_network_flows_by_process + items: [/usr/bin/ceph, /usr/bin/oauth-proxy, /usr/bin/thanos, /usr/bin/kube-rbac-proxy, /usr/bin/ceph-osd, /usr/bin/prom-kube-proxy, /usr/bin/alertmanager, /usr/local/bin/rook, /usr/bin/ceph-mds, /usr/bin/cm-adapter, /usr/bin/telemeter-client, /usr/share/grafana/bin/grafana-server, /usr/bin/machine-config-daemon, /usr/bin/node_exporter, /usr/bin/openshift-tuned, /usr/bin/kube-state-metrics, /usr/bin/openshift-state-metrics, /usr/bin/openshift-router, /usr/bin/dig, /usr/bin/node, /usr/bin/nmstatectl, /usr/local/bin/mixs, /usr/sbin/grafana-server, /go/bin/collector-linux, /go/bin/query-linux, /go/bin/all-in-one-linux, /opt/kiali/kiali, /usr/bin/ansible-playbook, /usr/local/bin/ansible-operator, /usr/local/bin/envoy, /usr/src/ovs-cni/bin/ovs-marker, /bridge-marker, /opt/kiali/kiali, /usr/bin/virt-handler, /usr/local/bin/noobaa-operator, /usr/local/bin/kubectl, /usr/bin/virt-operator, /usr/bin/virt-cdi-apiserver, /usr/bin/csi-provisioner, /usr/bin/csi-attacher, /usr/bin/diskmaker, /usr/local/bin/ocs-operator, /usr/bin/csi-resizer, /usr/bin/jaeger-operator, /usr/bin/snapshot-controller, /usr/bin/cluster-node-tuning-operator, /usr/bin/virt-cdi-operator, /usr/bin/hostpath-provisioner-operator, /usr/bin/local-storage-operator, /usr/bin/vm-import-operator, /usr/bin/ansible-runner, /usr/bin/virt-cdi-controller, /usr/local/bin/kubernetes-nmstate, /usr/bin/vm-import-controller, /usr/bin/openshift-sdn-node, /usr/bin/radosgw-admin, /usr/local/bin/pilot-discovery, /usr/bin/appregistry-server] + +- list: _openshift_infrastructure_container_noisy_network_flows_by_parent_process + items: [/usr/libexec/crio/conmon, /var/lib/haproxy/reload-haproxy] + +- macro: _drop_out_noisy_network_flows_from_runc_parent_process + condition: NetworkFlow and sf.pproc.exe = /usr/bin/runc and sf.net.dip = 127.0.0.1 and sf.net.dport in (9, 9090, 50051) + +- macro: _drop_out_noisy_network_flows_from_curl + condition: NetworkFlow and infrastructure_containers and sf.proc.exe = /usr/bin/curl and sf.net.dip = 127.0.0.1 and sf.net.dport = 9090 + +- macro: _drop_out_noisy_network_flows_from_ansible + condition: NetworkFlow and ansible_in_infrastructure_containers + +- macro: _drop_out_network_flows_from_log_forwarder_utilities + condition: NetworkFlow and sf.proc.exe in (/usr/sbin/syslog-ng) and sf.net.dport = 514 + +- macro: _drop_out_noisy_network_flows + condition: NetworkFlow + and ( (sf.proc.exe in (_os_level_noisy_network_flows_by_process) or sf.pproc.exe in (_os_level_noisy_network_flows_by_parent_process)) + or (infrastructure_containers and (sf.proc.exe in (_openshift_infrastructure_container_noisy_network_flows_by_process) or sf.pproc.exe in (_openshift_infrastructure_container_noisy_network_flows_by_parent_process)))) + +### setuid tuning + +- list: _os_level_noisy_setuid_events_by_process + items: [/usr/bin/runc, /usr/sbin/sshd, /usr/lib/systemd/systemd, /usr/libexec/crio/conmon] + +- macro: _drop_noisy_setuid_events_from_proc_self_exe + condition: setuid_syscall and proc_self_exe_running_in_host_with_cri_grandparents + +- macro: _drop_out_noisy_setuid_events + condition: setuid_syscall + and sf.proc.exe in (_os_level_noisy_setuid_events_by_process) + +### File rename tuning + +- list: _os_level_noisy_file_rename_events_by_process + items: [/usr/bin/runc, /usr/bin/hyperkube, /lib/systemd/systemd-udevd, /usr/bin/dpkg, /usr/lib/systemd/systemd-udevd, /usr/sbin/NetworkManager, /usr/bin/crio, /usr/libexec/crio/conmon, /usr/lib/systemd/systemd, /usr/lib/systemd/systemd-journald, /usr/sbin/logrotate, /usr/bin/dockerd, /usr/bin/kubelet, /usr/sbin/chronyd] + +- list: _openshift_infrastructure_container_noisy_file_rename_events_by_process + items: [/usr/bin/alertmanager, /usr/bin/prometheus, /usr/bin/prometheus-config-reloader, /usr/bin/ansible-runner, /usr/bin/ansible-playbook, /usr/sbin/ovsdb-server] + +- macro: _drop_out_noisy_file_rename_events_from_ansible + condition: rename_syscall and ansible_in_infrastructure_containers + +- macro: _drop_out_noisy_file_rename_events_from_mongodb + condition: rename_syscall and sf.container.image pmatch (mongodb) and sf.file.path in (/data/mongo/cluster/shard1/diagnostic.data/metrics.interim.temp, /data/mongo/cluster/shard1/WiredTiger.turtle.set) + +- macro: _drop_out_noisy_file_rename_events + condition: rename_syscall + and ((sf.proc.exe in (_os_level_noisy_file_rename_events_by_process)) + or (infrastructure_containers and sf.proc.exe in (_openshift_infrastructure_container_noisy_file_rename_events_by_process))) + +### Directory Creation tuning + +- list: _os_level_noisy_directory_creation_by_process + items: [/usr/bin/dpkg, /usr/lib/systemd/systemd, /usr/bin/apt-get, /usr/bin/update-mime-database, /usr/bin/dpkg-deb, /usr/bin/dockerd, /usr/lib/systemd/systemd-logind, /usr/bin/runc, /usr/sbin/update-rc.d, /usr/bin/containerd, /usr/local/bin/docker-compose, /usr/bin/top, /usr/bin/containerd-shim, /usr/bin/kubelet, /usr/local/bin/kubectl, /usr/bin/crio, /usr/lib/systemd/systemd-journald] + +- list: _os_level_noisy_directory_creation_by_parent_process + items: [/usr/bin/dpkg-deb, /var/lib/dpkg/info/shared-mime-info.postinst, /usr/bin/containerd-shim, /usr/bin/apt-get, /usr/bin/containerd-shim, /usr/bin/containerd, /usr/bin/apt-key] + +- list: _openshift_infrastructure_container_noisy_directory_creation_by_process + items: [/usr/bin/oc, /usr/bin/python2, /usr/bin/ansible-runner, /usr/bin/prometheus] + +- macro: _drop_out_noisy_directory_creation_in_infrastructure_containers_list_of_file_paths + condition: mkdir_syscall + and ( sf.file.path in (/opt/ansible/.ansible, /opt/ansible, /opt) + or (sf.file.path startswith /opt/ansible/.ansible or sf.file.path startswith /tmp/ansible-operator)) + +- macro: _drop_out_noisy_directory_creation_events + condition: mkdir_syscall + and ((sf.proc.exe in (_os_level_noisy_directory_creation_by_process) or sf.pproc.exe in (_os_level_noisy_directory_creation_by_parent_process)) + or (infrastructure_containers and sf.proc.exe in (_openshift_infrastructure_container_noisy_directory_creation_by_process))) + +### Directory Removal tuning + +- list: _os_level_noisy_directory_removal_by_process + items: [/usr/bin/appstreamcli, /usr/lib/systemd/systemd, /usr/bin/apt-get, /usr/bin/dpkg, /usr/bin/hyperkube, /usr/bin/prometheus] + +- list: _os_level_noisy_directory_removal_by_parent_process + items: [/usr/bin/dpkg, /usr/lib/systemd/systemd, /usr/bin/apt-get] + +- macro: _drop_out_noisy_directory_removal_from_ansible + condition: rmdir_syscall and ansible_in_infrastructure_containers + +- macro: _drop_out_noisy_directory_removal_events + condition: rmdir_syscall + and (sf.proc.exe in (_os_level_noisy_directory_removal_by_process) or sf.pproc.exe in (_os_level_noisy_directory_removal_by_parent_process)) + +### Soft Link tuning + +- list: _os_level_noisy_soft_link_creation_by_process + items: [/usr/bin/dpkg, /usr/bin/apt-get, /usr/bin/update-alternatives, /usr/lib/systemd/systemd, /usr/bin/dockerd, /usr/sbin/update-rc.d, /usr/bin/hyperkube, /usr/lib/systemd/systemd-udevd, /usr/bin/kubelet, /usr/libexec/crio/conmon, /usr/bin/crio] + +- list: _openshift_infrastructure_container_noisy_soft_link_creation_by_process + items: [/usr/local/bin/ansible-operator, /usr/bin/runc, /usr/bin/virt-operator] + +- macro: _drop_out_noisy_soft_link_creation_events + condition: symlink_syscall + and (sf.proc.exe in (_os_level_noisy_soft_link_creation_by_process) + or (infrastructure_containers and sf.proc.exe in (_openshift_infrastructure_container_noisy_soft_link_creation_by_process))) + +### Hard Link tuning + +- list: _os_level_noisy_hard_link_creation_by_process + items: [/usr/bin/dpkg, /usr/bin/dpkg-divert] + +- list: _openshift_infrastructure_container_noisy_hard_link_creation_by_process + items: [/usr/bin/ansible-runner, /usr/bin/ansible-playbook] + +- macro: _drop_out_noisy_hard_link_creation_events + condition: link_syscall + and (sf.proc.exe in (_os_level_noisy_hard_link_creation_by_process) + or (infrastructure_containers and sf.proc.exe in (_openshift_infrastructure_container_noisy_hard_link_creation_by_process))) + +##### Global drop rule + +- drop: __global__ + condition: _drop_out_noisy_process_clone_events + or _drop_thread_clone_events + or _drop_out_noisy_process_clone_events_from_ansible + or _drop_out_noisy_process_clone_events_from_bash_proc_self_exe + or _drop_out_noisy_file_read_events + or _drop_out_noisy_file_read_events_from_nginx_ingress_controller + or _drop_out_noisy_file_read_events_from_nvidia_gpu_operator + or _drop_file_read_list_of_file_paths + or _drop_out_noisy_file_read_events_from_proc_self_exe + or _drop_out_noisy_file_read_events_from_specific_high_level_dirs + or _drop_out_noisy_file_read_events_from_pipes_and_sockets + or _drop_noisy_file_read_from_ansible_in_infrastructure_containers + or _drop_file_read_in_infrastructure_containers_list_of_file_paths + or _drop_out_noisy_file_modify_events + or _drop_file_write_list_of_file_paths + or _drop_file_write_in_infrastructure_containers_list_of_file_paths + or _drop_file_write_from_rsyslogd_and_auditd + or _drop_file_write_from_tar + or _drop_file_write_from_auditd + or _drop_file_write_from_os_processes_to_pipes + or _drop_file_write_from_kubelet_specific_file_paths + or _drop_file_write_from_infrastructure_containers_to_unix_socket + or _drop_file_write_from_some_processes_to_unix_socket + or _drop_file_write_from_sshd_to_unix_domain + or _drop_file_write_from_nginx_ingress_controller + or _drop_file_write_from_nvidia_gpu_operator_to_unix_socket_and_pipes + or _drop_noisy_file_write_from_ansible_in_infrastructure_containers + or _drop_write_from_infrastructure_containers_to_pipe_or_unix_domain_socket + or _drop_file_write_from_proc_self_exe_part_1 + or _drop_file_write_from_proc_self_exe_part_2 + or _drop_thread_exit_events + or _drop_out_noisy_process_exit_events_from_bash_proc_self_exe + or _drop_out_noisy_process_exit_events + or _drop_out_noisy_setns_events_events + or _drop_noisy_setns_events_from_proc_self_exe + or _drop_out_noisy_process_execution_events + or _drop_out_noisy_process_execution_events_from_ansible + or _drop_out_noisy_process_execution_events_from_bash_proc_self_exe + or _drop_out_noisy_file_delete_events + or _drop_out_noisy_file_deletion_events_from_ansible + or _drop_out_noisy_network_flows_from_runc_parent_process + or _drop_out_network_flows_from_log_forwarder_utilities + or _drop_out_noisy_network_flows_from_curl + or _drop_out_noisy_network_flows_from_ansible + or _drop_out_noisy_network_flows + or _drop_out_noisy_setuid_events + or _drop_noisy_setuid_events_from_proc_self_exe + or _drop_out_noisy_file_rename_events + or _drop_out_noisy_file_rename_events_from_ansible + or _drop_out_noisy_file_rename_events_from_mongodb + or _drop_out_noisy_directory_creation_events + or _drop_out_noisy_directory_removal_from_ansible + or _drop_out_noisy_directory_creation_in_infrastructure_containers_list_of_file_paths + or _drop_out_noisy_directory_removal_events + or _drop_out_noisy_soft_link_creation_events + or _drop_out_noisy_hard_link_creation_events + +##### Rules + +- rule: File Opened with Read Permissions + desc: File Opened with Read Permissions + condition: file_open_read and not (file_write_or_file_opened_for_write or file_read or setns_syscall) + priority: low + prefilter: [FF] + +- rule: File Read + desc: File Read + condition: file_read and not file_write + priority: low + prefilter: [FF] + +- rule: File Modified + desc: File Modified + condition: file_write + priority: low + prefilter: [FF] + +- rule: File Opened with Write Permissions + desc: File Opened with Write Permissions + condition: file_open_write and not (file_write or setns_syscall) + priority: low + prefilter: [FF] + +- rule: Directory created + desc: when a directory will be created + condition: sf.opflags = MKDIR + priority: low + prefilter: [FE] + +- rule: Directory removed + desc: when a directory will be removed + condition: sf.opflags = RMDIR + priority: low + prefilter: [FE] + +- rule: Hard link created + desc: when process creates hard link to an existing file + condition: sf.opflags = LINK + priority: low + prefilter: [FE] + +- rule: Soft link created + desc: when process creates soft link to an existing file + condition: sf.opflags = SYMLINK + priority: low + prefilter: [FE] + +- rule: File deleted + desc: when a file will be deleted + condition: sf.opflags = UNLINK + priority: low + prefilter: [FE] + +- rule: File renamed + desc: when a file will be renamed + condition: sf.opflags = RENAME + priority: low + prefilter: [FE] + +- rule: UID of process was changed + desc: UID of process was changed + condition: sf.opflags = SETUID + priority: low + prefilter: [PE] + +- rule: Process cloned + desc: Process cloned + condition: clone_syscall + priority: low + prefilter: [PE] + +- rule: Execution of a file + desc: Execution of a file + condition: exec_syscall + priority: low + tags: [test] + prefilter: [PE] + +- rule: Process or thread exit + desc: Process or thread exit + condition: exit_syscall + priority: low + prefilter: [PE] + + +- rule: Process entered namespace + desc: Process entered namespace + condition: setns_syscall + priority: low + prefilter: [FF] + +- rule: Process Created a Network Connection + desc: Process Created a Network Connection + condition: sf.opflags = CONNECT + priority: medium + prefilter: [NF] + +- rule: Process Accepted a Network Connection + desc: Network Flow ingress + condition: sf.opflags = ACCEPT + priority: medium + prefilter: [NF] + +- rule: Process Sending and Receiving Network Data + desc: Network Flow ingress and engress + condition: sf.opflags in (SEND, WRITE) and sf.opflags in (RECV, READ) + priority: medium + prefilter: [NF] + +- rule: Process Sending Network Data + desc: Network Flow engress + condition: sf.opflags in (SEND, WRITE) and not sf.opflags in (RECV, READ) + priority: medium + prefilter: [NF] + +- rule: Process Receiving Network Data + desc: Network Flow ingress + condition: sf.opflags in (RECV, READ) and not sf.opflags in (SEND, WRITE) + priority: medium + prefilter: [NF] + +- rule: Network Connection Closed + desc: Network Connection Closed + condition: sf.opflags = CLOSE + priority: low + prefilter: [NF] diff --git a/resources/policies/runtimeintegrity/path.yaml b/resources/policies/runtimeintegrity/path.yaml new file mode 100644 index 00000000..4bfd88e1 --- /dev/null +++ b/resources/policies/runtimeintegrity/path.yaml @@ -0,0 +1,21 @@ +- rule: New pod in namespace robot-shop + desc: Detect new pod in namespace robot-shop + condition: sf.ke.kind = 'K8S_PODS' and sf.ke.action = 'K8S_COMPONENT_ADDED' and sf.ke.message[items.0.namespace] = 'robot-shop' + priority: high + actions: [podname] + prefilter: [KE] + +#- rule: Service extract +# desc: Extract json path from service list as a test +# condition: sf.pod.services[0.clusterip.0] = '10.109.218.161' +# priority: high +# tags: [found_clusterip_in_services] +# prefilter: [PE] + +#- rule: IP extract +# desc: IP extraction test +# condition: sf.pod.internalip[0] = '192.168.59.100' +# priority: high +# tags: [found_internalip] +# prefilter: [NF] + diff --git a/resources/policies/runtimeintegrity/runtimeintegrity.yaml b/resources/policies/runtimeintegrity/runtimeintegrity.yaml index bdfbb8cf..0ee40bec 100644 --- a/resources/policies/runtimeintegrity/runtimeintegrity.yaml +++ b/resources/policies/runtimeintegrity/runtimeintegrity.yaml @@ -500,7 +500,6 @@ and sf.proc.exe != sf.pproc.exe and not sf.pproc.exe = /usr/local/sbin/runc and sf.proc.name pmatch (shell_binaries, compilers, pkg_mgmt_binaries, shell_interpreters, coreutils_mod_binaries) - action: [alert] priority: low tags: [notification, suspicious-process] prefilter: [PE] @@ -511,7 +510,6 @@ and open_write and possibly_webserver and not sf.file.path pmatch (log_paths) - action: [alert] priority: high tags: [actionable-offense, suspicious-process] prefilter: [FF] @@ -521,7 +519,6 @@ condition: sf.opflags = EXEC and (sf.proc.name pmatch (package_mgmt_binaries) or java_package_installer) and container - action: [alert] priority: medium tags: [actionable-offense, suspicious-process] prefilter: [PE] @@ -530,7 +527,6 @@ desc: Downloader is being run inside the container. Could be downloading something malicious condition: sf.opflags = EXEC and sf.proc.name pmatch (downloader_binaries) - action: [alert] priority: high tags: [actionable-offense, suspicious-process] prefilter: [PE] @@ -540,7 +536,6 @@ condition: sf.opflags = EXEC and sf.proc.name in (remote_copy_binaries) and container - action: [alert] priority: high tags: [actionable-offense, suspicious-process] prefilter: [PE] @@ -549,7 +544,6 @@ desc: User/Group was modified, added or deleted condition: sf.opflags = EXEC and sf.proc.name in (modify_passwd_binaries, create_user_binaries, delete_user_binaries, modify_user_binaries, create_grp_binaries, delete_group_binaries, modify_grp_binaries) - action: [alert] priority: high tags: [actionable-offense, suspicious-process] prefilter: [PE] @@ -559,7 +553,6 @@ condition: sf.opflags = EXEC and sf.proc.name pmatch (modify_passwd_binaries, verify_passwd_binaries, user_util_binaries) and container - action: [alert] priority: high tags: [actionable-offense, suspicious-process] prefilter: [PE] @@ -569,7 +562,6 @@ condition: sf.opflags = EXEC and sf.proc.name pmatch (modify_passwd_binaries, verify_passwd_binaries, user_util_binaries) and not container - action: [alert] priority: high tags: [notification, suspicious-process] prefilter: [PE] @@ -577,7 +569,6 @@ - rule: Interactive login detected desc: Interactive login to container detected condition: interactive and container - action: [alert] priority: high tags: [actionable-offense, suspicious-process] prefilter: [PE] @@ -585,7 +576,6 @@ - rule: Interactive login to host system detected desc: Interactive login to host detected condition: interactive and not container - action: [alert] priority: high tags: [notification, suspicious-process] prefilter: [PE] @@ -593,7 +583,6 @@ - rule: Password file modified desc: Password file was modified condition: sf.file.path pmatch (sys_password_files) and open_write - action: [alert] priority: high tags: [actionable-offense, filesystem-tampering] prefilter: [FF,FE] @@ -601,7 +590,6 @@ - rule: History file modified desc: History file modified indicating interactive session condition: sf.file.path pmatch (history_files) and open_write - action: [alert] priority: high tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -609,7 +597,6 @@ - rule: Profile file modified desc: Profile file modified indicating interactive session condition: sf.file.path pmatch (profile_files) and open_write - action: [alert] priority: high tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -617,7 +604,6 @@ - rule: Library preload file modified desc: Library preload file modified could indicate a library injection attack condition: sf.file.path pmatch (ld_preload_files) and open_write - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -626,7 +612,6 @@ desc: Crontab file edited condition: (sf.file.path in (scheduler_files) and open_write) or (sf.file.newpath in (scheduler_files) and overwrite) - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF] @@ -634,7 +619,6 @@ - rule: Write below binary dir desc: an attempt to write to any file below a set of binary directories condition: open_write and bin_dir - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -642,7 +626,6 @@ - rule: Write below system dirs desc: an attempt to write to any file below a set of system directories condition: open_write and system_dir and not privileged_execution - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -650,7 +633,6 @@ - rule: Write to init directories desc: an attempt to write to an init directory could indicate a persisted piece of malware condition: open_write and init_dir - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -658,7 +640,6 @@ - rule: Write to scheduler directories desc: an attempt to write to a scheduler directory could indicate a persisted piece of malware condition: open_write and scheduler_dir - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -670,7 +651,6 @@ and not sf.file.path in (known_root_files) and not sf.file.directory in (known_root_directories) and not known_root_conditions - action: [alert] priority: high tags: [actionable-offense, filesystem-tampering] prefilter: [FF,FE] @@ -679,7 +659,6 @@ desc: an attempt to read any sensitive file (e.g. files containing user/password/authentication information). condition: sensitive_files and open_read and not privileged_execution and not auth_execution - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF] @@ -689,7 +668,6 @@ information). condition: (sf.file.path startswith /var/lib/rpm or sf.file.path startswith /var/lib/dpkg) and open_write - action: [alert] priority: medium tags: [unauthorized-package-install, notification] prefilter: [FF,FE] @@ -703,7 +681,6 @@ information). and not sf.proc.aname in (docker_binaries, k8s_binaries, lxd_binaries) and not sf.proc.aname startswith "runc:" and not sf.file.path startswith /var/run/netns/cni - action: [alert] priority: medium tags: [notification, suspicious-process] prefilter: [FF] @@ -718,7 +695,6 @@ information). and sf.proc.username != root and not sf.proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, nomachine_binaries) and not nrpe_becoming_nagios - action: [alert] priority: medium tags: [notification, suspicious-process] prefilter: [FF] @@ -731,7 +707,6 @@ information). and not sf.proc.name in (dev_creation_binaries) and not sf.file.path in (allowed_dev_files) and not sf.file.path startswith /dev/tty - action: [alert] priority: medium tags: [notification, filesystem-tampering] prefilter: [FF,FE] @@ -743,7 +718,6 @@ information). and inbound_outbound and not sf.proc.name in (systemd, hostid, id) and not login_doing_dns_lookup - action: [alert] priority: medium tags: [notification, suspicious-process] prefilter: [NF] diff --git a/resources/policies/runtimeintegrity/ttps.yaml b/resources/policies/runtimeintegrity/ttps.yaml new file mode 100644 index 00000000..8014264f --- /dev/null +++ b/resources/policies/runtimeintegrity/ttps.yaml @@ -0,0 +1,1005 @@ +###### Lists #################### + +- list: shell_binaries + items: [bash, csh, ksh, sh, tcsh, zsh, dash] + +- list: shell_mgmt_binaries + items: [add-shell, remove-shell] + +- list: script_interpreters + items: [ruby, python, python2.7, python2, python3, python3.5, java, perl, node, js24] + +- list: shell_interpreters + items: [awk, gawk] + +- list: coreutils_binaries + items: [ + truncate, sha1sum, numfmt, fmt, fold, uniq, cut, who, + groups, csplit, sort, expand, printf, printenv, unlink, tee, chcon, stat, + basename, split, nice, "yes", whoami, sha224sum, hostid, users, stdbuf, + base64, unexpand, cksum, od, paste, nproc, pathchk, sha256sum, wc, test, + comm, arch, du, factor, sha512sum, md5sum, tr, runcon, env, dirname, + tsort, join, shuf, install, logname, pinky, nohup, expr, pr, tty, timeout, + tail, "[", seq, sha384sum, nl, head, id, mkfifo, sum, dircolors, ptx, shred, + tac, link, chroot, vdir, chown, touch, ls, dd, uname, "true", pwd, date, + chgrp, chmod, mktemp, cat, mknod, sync, ln, "false", rm, mv, cp, echo, + readlink, sleep, stty, mkdir, df, dir, rmdir + ] + +- list: coreutils_mod_binaries + items: [ + truncate, who, groups, csplit, expand, printenv, unlink, chcon, + split, whoami, users, stdbuf, unexpand, paste, runcon, env, + install, logname, pinky, nohup, tty, id, mkfifo, shred, + link, chroot, chown, touch, dd, chgrp, chmod, mktemp, mknod, + ln, rm, mv, cp, rmdir + ] + +- list: login_binaries + items: [ + login, systemd, '"(systemd)"', systemd-logind, su, + nologin, faillog, lastlog, newgrp, sg + ] + +- list: repositories + items: [git, svn] + + - list: modify_passwd_binaries + items: [ + chpasswd, chgpasswd, passwd + ] + +- list: verify_passwd_binaries + items: [ unix_chkpwd ] + +- list: create_user_binaries + items: [ useradd, newusers ] + +- list: delete_user_binaries + items: [ userdel ] + +- list: modify_user_binaries + items: [ usermod ] + +- list: create_grp_binaries + items: [ groupadd, newusers ] + +- list: delete_group_binaries + items: [ groupdel ] + +- list: modify_grp_binaries + items: [ groupmod ] + +- list: user_util_binaries + items: [ + shadowconfig, grpck, pwunconv, grpconv, pwck, + vipw, pwconv, cppw, + grpunconv, chage, chsh, + gpasswd, chfn, expiry, vigr, cpgr + ] + +- list: k8s_binaries + items: [hyperkube, skydns, kube2sky, exechealthz, weave-net] + +- list: lxd_binaries + items: [lxd, lxcfs] + +- list: http_server_binaries + items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2, node] + +- list: db_server_binaries + items: [mysqld, postgres, sqlplus] + +- list: mysql_mgmt_binaries + items: [mysql_install_d, mysql_ssl_rsa_s] + +- list: postgres_mgmt_binaries + items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] + +- list: db_mgmt_binaries + items: [mysql_mgmt_binaries, postgres_mgmt_binaries] + +- list: nosql_server_binaries + items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] + +- list: gitlab_binaries + items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] + +- list: rpm_binaries + items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, subscription-ma, + repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, + abrt-action-sav, rpmdb_stat, microdnf, rhn_check, yumdb] + +- list: deb_binaries + items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, + frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, + apt-listchanges, unattended-upgr, apt-add-reposit + ] + +- list: package_mgmt_binaries + items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, pip3, sane-utils.post, alternatives, chef-client] + +- list: ssl_mgmt_binaries + items: [ca-certificates] + +- list: dhcp_binaries + items: [dhclient, dhclient-script, 11-dhclient] + +- list: mail_binaries + items: [ + sendmail, sendmail-msp, postfix, procmail, exim4, + pickup, showq, mailq, dovecot, imap-login, imap, + mailmng-core, pop3-login, dovecot-lda, pop3 + ] + +- list: vpn_binaries + items: [openvpn] + +- list: sys_password_files + items: [/etc/shadow, /etc/passwd] + +- list: sensitive_file_names + items: [/etc/sudoers, /etc/pam.conf] + +- list: cron_binaries + items: [anacron, cron, crond, crontab] + +- list: system_users + items: [bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data] + +- list: system_directories + items: [/boot, /lib, /lib64, /usr/lib, /usr/local/lib, /usr/local/sbin, /usr/local/bin, /root/.ssh, /etc] + + - list: init_directories + items: [/etc/init.d] + +- list: history_files + items: [".bash_history", ".ash_history"] + +- list: network_config_files + items: ['/etc/resolv.conf', '/etc/hosts'] + +- list: read_sensitive_file_binaries + items: [ + iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, + vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, + pam-auth-update, pam-config, spamd, polkit-agent-he, lsattr, file, sosreport, + scxcimservera, adclient, rtvscand, cockpit-session, userhelper, ossec-syscheckd + ] + +- list: downloader_binaries + items: [wget, curl] + +- list: remote_copy_binaries + items: [scp, rsync, telnet, ssh, ftp, rcp, sftp] + +- list: known_root_files + items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, /root/.aws/credentials, + /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, + /root/.localstack, /root/.node_repl_history, /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, + /root/.rnd, /root/.wget-hsts] + +- list: known_root_directories + items: [/root/.oracle_jre_usage, /root/.subversion, /root/.nami] + +- list: profile_files + items: [".bashrc", ".bash_profile", ".profile"] + +- list: ld_preload_files + items: ["/etc/ld.so.preload"] + +- list: scheduler_files + items: [/etc/crontab] + +- list: scheduler_directories + items: ["/etc/cron.d", "/etc/cron.daily", "/etc/cron.monthly", "/etc/cron.hourly", "/etc/cron.weekly", + "/usr/lib/cron/tabs", "/var/cron/tabs", "/var/spool/cron/crontabs", "/var/spool/cron"] + +- list: protected_shell_spawning_binaries + items: [ + http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, + fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 + ] + +- list: allowed_dev_files + items: [ + /dev/null, /dev/stdin, /dev/stdout, /dev/stderr, + /dev/random, /dev/urandom, /dev/console, /dev/kmsg + ] + +- list: mesos_shell_binaries + items: [mesos-docker-ex, mesos-slave, mesos-health-ch] + +- list: misc_tools + items: [calico-node] + +- list: edr_tools + items: [/crowdstrike/cmd.sh, besclient, BESClient] + +- list: log_tools + items: [logdna, splunk, rsyslog] + +- list: log_paths + items: [/var/, /tmp/nginx-ingress.private] + +- list: known_setuid_binaries + items: [ + sshd, dbus-daemon-lau, ping, ping6, critical-stack-, pmmcli, + filemng, PassengerAgent, bwrap, osdetect, nginxmng, sw-engine-fpm, + start-stop-daem + ] + + - list: userexec_binaries + items: [sudo, su, suexec, critical-stack, dzdo] + + - list: docker_binaries + items: [docker, dockerd, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current, dockerd-current] + + - list: nomachine_binaries + items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] + + - list: compilers + items: ["g++", gcc, clang, javac] + +- list: shadowutils_binaries + items: [ + chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, + groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, + newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd + ] + +- list: user_mgmt_binaries + items: [login_binaries, passwd_binaries, shadowutils_binaries] + +- list: privileged_processes + items: [/usr/bin/sudo, /usr/local/sbin/runc] + +- list: auth_processes + items: [/opt/ibm/java/bin/keytool, /bin/chown, /bin/chmod, /bin/bash] + +- list: netcat_cmds + items: [nc, ncat] + +- list: netcat_shell_args + items: ['-e /bin/sh', '-e /bin/bash'] + +- list: discovery_cmds + items: [cat, strings, nl, head, tail] + +- list: host_files + items: [/etc/hosts, .ssh/config] + +- list: user_discovery_cmds + items: [w, who, whoami, id, last] + +- list: system_discovery_cmds + items: [uname, lsb_release, lscpu, lshw, lsmod, lspci, lsscsi, lsblk, hwinfo, hostname, inxi, uptime, dmidecode] + +- list: fs_discovery_cmds + items: [mount, df, tree, find] + +- list: net_discovery_cmds + items: [netstat, ss, lsof] + +- list: keylogger_cmds + items: [logkeys, lkl] + +- list: netconfig_discovery_cmds + items: [firewall-cmd, ufw, iptables, netstat, ss, ip, ifconfig, systemd-resolve, route] + +- list: at_cmds + items: [at, atd] + +- list: remote_copy_cmds + items: [scp, rsync, sftp] + +- list: remote_copy_inds + items: ['@', ':'] + +- list: cert_cmds + items: [update-ca-certificates, update-ca-trust] + +- list: security_procs + items: [nessusd, td-agent, packetbeat, filebeat, auditbeat, osqueryd, cbagentd, falcond] + +- list: service_cmds + items: [service, chkconfig, systemctl] + +- list: security_services + items: [iptables, ip6tables, firewalld, cbdaemon, falcon-sensor] + +- list: stop_cmds + items: [stop, disable, off] + +- list: write_send + items: [WRITE, SEND] + +- list: read_recv + items: [READ, RECV] + +###### Macros ################### + +- macro: sed_write + condition: (sf.proc.exe contains 'sed' and sf.proc.args contains '-i') + +- macro: overwrite + condition: sf.opflags = RENAME + +- macro: open_write + condition: (sf.opflags in (write_send) or + (sf.opflags = OPEN and sf.file.openflags in (CREAT)) or + sed_write) + +- macro: open_read + condition: (sf.file.is_open_read = true or sf.opflags = read_recv) + +- macro: interactive + condition: > + ((sf.proc.aname = sshd and sf.proc.name != sshd) or + sf.proc.name = systemd-logind or sf.proc.name = login or sf.proc.tty = true) + +- macro: user_ssh_directory + condition: (sf.file.path startswith '/home' and sf.file.path contains '.ssh') + +- macro: system_dir + condition: > + (sf.file.directory in (system_directories) + or user_ssh_directory) + +- macro: init_dir + condition: > + (fd.directory in (init_directories)) + +- macro: scheduler_dir + condition: > + (fd.directory in (scheduler_directories)) + +- macro: known_root_conditions + condition: (sf.file.path startswith /root/orcexec. + or sf.file.path startswith /root/.m2 + or sf.file.path startswith /root/.npm + or sf.file.path startswith /root/.pki + or sf.file.path startswith /root/.ivy2 + or sf.file.path startswith /root/.config/Cypress + or sf.file.path startswith /root/.config/pulse + or sf.file.path startswith /root/.config/configstore + or sf.file.path startswith /root/jenkins/workspace + or sf.file.path startswith /root/.jenkins + or sf.file.path startswith /root/.cache + or sf.file.path startswith /root/.sbt + or sf.file.path startswith /root/.java + or sf.file.path startswith /root/.glide + or sf.file.path startswith /root/.sonar + or sf.file.path startswith /root/.v8flag + or sf.file.path startswith /root/infaagent + or sf.file.path startswith /root/.local/lib/python + or sf.file.path startswith /root/.pm2 + or sf.file.path startswith /root/.gnupg + or sf.file.path startswith /root/.pgpass + or sf.file.path startswith /root/.theano + or sf.file.path startswith /root/.gradle + or sf.file.path startswith /root/.android + or sf.file.path startswith /root/.ansible + or sf.file.path startswith /root/.crashlytics + or sf.file.path startswith /root/.dbus + or sf.file.path startswith /root/.composer + or sf.file.path startswith /root/.gconf + or sf.file.path startswith /root/.nv + or sf.file.path startswith /root/.local/share/jupyter + or sf.file.path startswith /root/oradiag_root + or sf.file.path startswith /root/workspace + or sf.file.path startswith /root/jvm + or sf.file.path startswith /root/.node-gyp) + +- macro: rename + condition: sf.opflags = RENAME + +- macro: mkdir + condition: sf.opflags = MKDIR + +- macro: remove + condition: sf.opflags in (RMDIR, UNLINK) + +- macro: modify + condition: rename or remove + +- macro: bin_dir + condition: (sf.file.directory startswith /bin or + sf.file.directory startswith /sbin or + sf.file.directory startswith /usr/bin or + sf.file.directory startswith /usr/sbin or + sf.file.directory startswith /usr/local/bin or + sf.file.directory startswith /usr/local/sbin) + +- macro: etc_dir + condition: sf.file.path startswith /etc/ + +- macro: root_dir + condition: (sf.file.directory=/ or sf.file.path startswith /root) + +- macro: sensitive_files + condition: (sf.file.path startswith /etc and + sf.file.path in (sys_password_files)) + +- macro: ssh_port + condition: sf.net.sport=22 + +- macro: running_shell_command + condition: sf.proc.cmdline startswith "sh -c" + +- macro: parent_linux_image_upgrade_script + condition: sf.pproc.name startswith linux-image- + +- macro: parent_node_running_npm + condition: (sf.pproc.cmdline startswith "node /usr/local/bin/npm" or + sf.pproc.cmdline startswith "node /usr/local/nodejs/bin/npm" or + sf.pproc.cmdline startswith "node /opt/rh/rh-nodejs6/root/usr/bin/npm") + +- macro: java_package_installer + condition: (sf.proc.name=java and sf.proc.cmdline contains sbt-launch.jar) or (sf.proc.name=mvn) + +- macro: ansible_running_python + condition: (sf.proc.name in (python, pypy) and sf.proc.cmdline contains ansible) + +- macro: python_running_chef + condition: (sf.proc.name=python and (sf.proc.cmdline contains yum-dump.py or sf.proc.cmdline="python /usr/bin/chef-monitor.py")) + +- macro: python_running_get_pip + condition: (sf.proc.cmdline startswith "python get-pip.py") + +- macro: parent_java_running_zookeeper + condition: (sf.pproc.name=java and sf.pproc.cmdline contains org.apache.zookeeper.server) + +- macro: parent_java_running_kafka + condition: (sf.pproc.name=java and sf.pproc.cmdline contains kafka.Kafka) + +- macro: parent_java_running_elasticsearch + condition: (sf.pproc.name=java and sf.pproc.cmdline contains org.elasticsearch.bootstrap.Elasticsearch) + +- macro: parent_java_running_activemq + condition: (sf.pproc.name=java and sf.pproc.cmdline contains activemq.jar) + +- macro: parent_java_running_cassandra + condition: (sf.pproc.name=java and (sf.proc.cmdline contains "-Dcassandra.config.loader" or sf.pproc.cmdline contains org.apache.cassandra.service.CassandraDaemon)) + +- macro: parent_java_running_jboss_wildfly + condition: (sf.pproc.name=java and sf.pproc.cmdline contains org.jboss) + +- macro: parent_java_running_glassfish + condition: (sf.pproc.name=java and sf.pproc.cmdline contains com.sun.enterprise.glassfish) + +- macro: parent_java_running_hadoop + condition: (sf.pproc.name=java and sf.pproc.cmdline contains org.apache.hadoop) + +- macro: parent_java_running_datastax + condition: (sf.pproc.name=java and sf.pproc.cmdline contains com.datastax) + +- macro: nginx_starting_nginx + condition: (sf.pproc.name=nginx and sf.proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") + +- macro: nginx_running_aws_s3_cp + condition: (sf.pproc.name=nginx and sf.proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") + +- macro: consul_running_net_scripts + condition: (sf.pproc.name=consul and (sf.proc.cmdline startswith "sh -c curl" or sf.proc.cmdline startswith "sh -c nc")) + +- macro: consul_running_alert_checks + condition: (sf.pproc.name=consul and sf.proc.cmdline startswith "sh -c /bin/consul-alerts") + +- macro: serf_script + condition: (sf.proc.cmdline startswith "sh -c serf") + +- macro: check_process_status + condition: (sf.proc.cmdline startswith "sh -c kill -0 ") + +- macro: possibly_parent_java_running_tomcat + condition: (sf.pproc.name contains java and sf.pproc.cmdline contains org.apache.catalina.startup.Bootstrap) + +- macro: protected_shell_spawner + condition: > + (sf.proc.aname in (protected_shell_spawning_binaries) + or parent_java_running_zookeeper + or parent_java_running_kafka + or parent_java_running_elasticsearch + or parent_java_running_activemq + or parent_java_running_cassandra + or parent_java_running_jboss_wildfly + or parent_java_running_glassfish + or parent_java_running_hadoop + or parent_java_running_datastax + or possibly_parent_java_running_tomcat) + +- macro: nrpe_becoming_nagios + condition: (sf.proc.name=nrpe and sf.proc.username=nagios) + +- macro: container + condition: (sf.container.type != host) + +- macro: known_user_in_container + condition: (container and sf.proc.username != "N/A") + +- macro: system_procs + condition: sf.proc.name in (coreutils_binaries, user_mgmt_binaries) + +- macro: login_doing_dns_lookup + condition: (sf.proc.name=login and sf.net.proto=udp and sf.net.sport=53) + +- macro: inbound_outbound + condition: > + ((sf.opflags in (ACCEPT,CONNECT)) or + (sf.file.typechar = 4 or sf.file.typechar = 6) and + (sf.net.ip != "0.0.0.0" and sf.net.mask != "127.0.0.0/8") and + (sf.ret >= 0)) + +- macro: possibly_webserver + condition: (sf.proc.name pmatch (http_server_binaries) + or possibly_parent_java_running_tomcat) + +- macro: privileged_execution + condition: sf.proc.exe in (privileged_processes) + +- macro: allowed_launchers + condition: sf.pproc.exe in (/usr/local/sbin/runc) + or sf.proc.exe pmatch (cgi-bin) + +- macro: auth_execution + condition: sf.proc.exe in (auth_processes) + +- macro: entrypoint + condition: not sf.pproc.pid exists + +- macro: wl + condition: sf.proc.exe in (/echo/echo) + +- macro: parent_sudo + condition: sf.pproc.exe = /usr/bin/sudo + +- macro: ps_discovery_args + condition: (sf.proc.args contains 'e' and sf-process.args contains 'f') or + (sf.proc.args contains 'a' and sf-process.args contains 'u' and sf-process.args contains 'x') + +- macro: home_dir_arg + condition: sf.proc.args endswith '/home' or sf.proc.args endswith '/home/' + +- macro: clear_cmds + condition: ( sf.proc.name = rm or + sf.proc.name = shred or + (sf.proc.name = truncate and sf.prog.args contains '-s0') or + (sf.proc.name = ln and sf.proc.args contains '-sf /dev/null')) + +###### Rules #################### + +- rule: Interactive shell + desc: Interactive shell detected + condition: interactive and not entrypoint + priority: low + tags: [mitre:T1059] + +- rule: Command and Scripting Interpreter + desc: any network activity performed by shell interpreters that are not expected to send or receive any network traffic + condition: sf.proc.name in (shell_binaries) + and inbound_outbound + and not login_doing_dns_lookup + and not entrypoint + priority: medium + tags: [mitre:T1041, mitre:T1059] + +- rule: Privilege escalation + desc: Privilege escalation detected + condition: sf.pproc.uid != 0 and sf.proc.uid = 0 and not entrypoint + priority: high + tags: [mitre:T1068] + +- rule: Untrusted read sensitive file + desc: an attempt to read any sensitive file (e.g. files containing user/password/authentication information) + condition: sensitive_files + and open_read + and not privileged_execution + and not auth_execution + and sf.proc.name in (coreutils_binaries, user_mgmt_binaries) + and not entrypoint + priority: medium + tags: [mitre:T1087] + +- rule: Webserver writing unusual file + desc: Webserver is writing a file other than a log file + condition: sf.file.type = f and + open_write and + possibly_webserver and not sf.file.path pmatch (log_paths) + and not entrypoint + priority: medium + tags: [mitre:T1190] + prefilter: [FF] + +- rule: Suspicious process spawned + desc: Suspicious behavior observed in application spawning another process + condition: sf.opflags = EXEC + and sf.proc.exe != sf.pproc.exe + and not allowed_launchers + and sf.proc.name in (shell_binaries, compilers, pkg_mgmt_binaries, shell_interpreters, coreutils_mod_binaries) + and not entrypoint + priority: low + tags: [mitre:T1106, mitre:T1574] + prefilter: [PE] + +- rule: Crontab file written + desc: Crontab file edited + condition: (sf.file.path in (scheduler_files) and open_write) or + (sf.file.newpath in (scheduler_files) and overwrite) + priority: medium + tags: [mitre:T1053] + +- rule: Unauthorized installer detected + desc: Use of package installer detected in container + condition: sf.opflags = EXEC and + sf.proc.name pmatch (package_mgmt_binaries, java_package_installer) and container + priority: medium + tags: [mitre:T1072] + prefilter: [PE] + +- rule: User/group modified, added or deleted + desc: User/Group was modified, added or deleted + condition: sf.opflags = EXEC and + sf.proc.name in (modify_passwd_binaries, create_user_binaries, delete_user_binaries, modify_user_binaries, create_grp_binaries, delete_group_binaries, modify_grp_binaries) + priority: high + tags: [mitre:T1098, mitre:T1136] + prefilter: [PE] + +- rule: Downloader detected + desc: Downloader is being run inside the container. Could be downloading something malicious + condition: sf.opflags = EXEC and + sf.proc.name pmatch (downloader_binaries) + priority: high + tags: [mitre:T1105] + prefilter: [PE] + +- rule: Password file modified + desc: Password file was modified + condition: sf.file.path pmatch (sys_password_files) and open_write + priority: high + tags: [mitre:T1098] + +- rule: Library preload file modified + desc: Library preload file modified could indicate a library injection attack + condition: sf.file.path pmatch (ld_preload_files) and open_write + priority: medium + tags: [mitre:T1547, mitre:T1554] + +- rule: Remote copy program detected + desc: Remote copy is occurring; could be data exfiltration + condition: sf.opflags = EXEC and + sf.proc.name in (remote_copy_binaries) + priority: high + tags: [mitre:T1020] + prefilter: [PE] + + - rule: Password utilities execution + desc: Password utilities were run in the host system + condition: sf.opflags = EXEC and + sf.proc.name pmatch (modify_passwd_binaries, verify_passwd_binaries, user_util_binaries) + priority: high + tags: [mitre:T1098] + prefilter: [PE] + +- rule: History file modified + desc: History file modified indicating interactive session + condition: sf.file.path pmatch (history_files) and open_write + priority: high + tags: [mitre:T1564] + +- rule: Profile file modified + desc: Profile file modified indicating interactive session + condition: sf.file.path pmatch (profile_files) and open_write + priority: high + tags: [mitre:T1098] + +- rule: Write below binary dir + desc: an attempt to write to any file below a set of binary directories + condition: open_write and bin_dir + priority: medium + tags: [mitre:T1574] + +- rule: Write below system dirs + desc: an attempt to write to any file below a set of system directories + condition: open_write and system_dir and not privileged_execution + priority: medium + tags: [mitre:T1574] + +- rule: Write to init directories + desc: an attempt to write to an init directory could indicate a persisted piece of malware + condition: open_write and init_dir + priority: medium + tags: [mitre:T1574] + +- rule: Write to scheduler directories + desc: an attempt to write to a scheduler directory could indicate a persisted piece of malware + condition: open_write and scheduler_dir + priority: medium + tags: [mitre:T1053] + +- rule: Write below root + desc: an attempt to write to any file directly below / or /root + condition: > + root_dir and open_write + and not sf.file.path in (known_root_files) + and not sf.file.directory in (known_root_directories) + and not known_root_conditions + priority: high + tags: [mitre:T1574] + +- rule: Write Below RPM/DPKG Database + desc: an attempt to write to the rpm/dpkg database + condition: (sf.file.path startswith /var/lib/rpm or + sf.file.path startswith /var/lib/dpkg) and + open_write + priority: medium + tags: [mitre:T1574] + +- rule: Change thread namespace + desc: > + an attempt to change a program/thread\'s namespace (commonly done + as a part of creating a container) by calling setns. + condition: > + sf.opflags = SETNS + and not sf.proc.aname in (docker_binaries, k8s_binaries, lxd_binaries) + and not sf.proc.aname startswith "runc:" + and not sf.file.path startswith /var/run/netns/cni + priority: medium + tags: [mitre:T1574, mitre:T1055] + +- rule: Non sudo setuid + desc: > + an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody" + suing to itself are also excluded, as setuid calls typically involve dropping privileges. + condition: > + sf.opflags = SETUID + and (known_user_in_container or not container) + and sf.proc.username != root + and not sf.proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, nomachine_binaries) + and not nrpe_becoming_nagios + priority: medium + tags: [mitre:T1068] + +- rule: Create files below dev + desc: creating any files below /dev other than known programs that manage devices. Some rootkits hide files in /dev. + condition: > + sf.file.directory = /dev + and open_write + and not sf.proc.name in (dev_creation_binaries) + and not sf.file.path in (allowed_dev_files) + and not sf.file.path startswith /dev/tty + priority: medium + tags: [mitre:T1574] + +- rule: System procs network activity + desc: any network activity performed by system binaries that are not expected to send or receive any network traffic + condition: system_procs + and inbound_outbound + and not sf.proc.name in (systemd, hostid, id) + and not login_doing_dns_lookup + and not entrypoint + and not wl + priority: medium + tags: [mitre:T1543, mitre:T1041] + +- rule: Reverse Unix shell started + desc: creation of a reverse shell process via nc + condition: sf.opflags = EXEC and + sf.proc.name in (netcat_cmds) and sf.proc.args pmatch (netcat_shell_args) + priority: high + tags: [mitre:T1059.004] + prefilter: [PE] + +- rule: Linux and Mac File and Directory Permissions Modification + desc: modification of permissions or owner of a file or a directory in a linux system + condition: sf.opflags = EXEC and + sf.proc.name in (chmod, chown) + priority: high + tags: [mitre:T1222.002] + prefilter: [PE] + +- rule: Process Discovery + desc: gather information about running processes on a system + condition: sf.opflags = EXEC and + ((sf.proc.name = ps and ps_discovery_args) or sf.proc.name = top) + priority: high + tags: [mitre:T1057] + prefilter: [PE] + +- rule: Account Discovery: Local Account + desc: attempt to get a listing of local system accounts + condition: sf.opflags = EXEC and + sf.proc.name in (discovery_cmds) and sf.proc.args in (sys_password_files) + priority: high + tags: [mitre:T1087.001] + prefilter: [PE] + +- rule: Remote System Discovery + desc: > + attempt to get a listing of other systems by IP address, hostname, or other logical + identifier on a network that may be used for Lateral Movement + condition: sf.opflags = EXEC and + sf.proc.name in (discovery_cmds) and sf.proc.args pmatch (host_files) + priority: high + tags: [mitre:T1018] + prefilter: [PE] + +- rule: System Owner/User Discovery + desc: > + attempt to identify the primary user, currently logged in user, set of users + that commonly uses a system, or whether a user is actively using the system + condition: sf.opflags = EXEC and + sf.proc.name in (user_discovery_cmds) + priority: high + tags: [mitre:T1033] + prefilter: [PE] + +- rule: Permission Groups Discovery: Local Groups + desc: attempt to find local system groups and permission settings + condition: sf.opflags = EXEC and + (sf.proc.name = groups or + (sf.proc.name in (discovery_cmds) and sf.proc.args = '/etc/groups')) + priority: high + tags: [mitre:T1069.001] + prefilter: [PE] + +- rule: System Information Discovery + desc: > + attempt to get detailed information about the operating system and hardware, + including version, patches, hotfixes, service packs, and architecture + condition: sf.opflags = EXEC and + sf.proc.name in (system_discovery_cmds) + priority: high + tags: [mitre:T1082] + prefilter: [PE] + +# partially from https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_file_and_directory_discovery.yml +- rule: File and Directory Discovery + desc: enumerate files, directories and volume information + condition: sf.opflags = EXEC and + ((sf.proc.name = ls and (home_dir_arg or sf.proc.args contains '-R')) or + sf.proc.name in (fs_discovery_cmds)) + priority: high + tags: [mitre:T1083] + prefilter: [PE] + +- rule: System Network Connections Discovery + desc: attempt to get a listing of network connections + condition: sf.opflags = EXEC and + sf.proc.name in (net_discovery_cmds) + priority: high + tags: [mitre:T1049] + prefilter: [PE] + +- rule: Shell started by container entry point + desc: Container entry point "node" starts shell sub-process + condition: sf.opflags = EXEC and + container and sf.pproc.name = node and sf.proc.name in (shell_binaries) + priority: high + tags: [mitre:T1059.004] + prefilter: [PE] + +- rule: Large network data transfer with database endpoint + desc: Large amount of data transferred via network connection with database endpoint + condition: ( sf.opflags contains RECV and sf.net.dport = 3306 and sf.flow.rbytes > 1024 ) or + ( sf.opflags contains SEND and sf.net.sport = 3306 and sf.flow.wbytes > 1024 ) + priority: high + tags: [mitre:T1030] + prefilter: [NF] + +- rule: Active Scanning: Scanning IP Blocks + desc: Use of nmap to scan for ports on a remote machine + condition: sf.proc.name = nmap + priority: medium + tags: [mitre:T1595.001] + prefilter: [PE] + +- rule: Input Capture: Keylogging + desc: Use of keylogger to log user keystrokes + condition: sf.proc.name in (keylogger_cmds) + priority: high + tags: [mitre:T1056.001] + prefilter: [PE] + +- rule: Account Manipulation: SSH Authorized Keys + desc: Attempt to modify the SSH authorized_keys file + condition: user_ssh_directory and (sf.file.path endswith 'authorized_keys') and open_write + priority: high + tags: [mitre:T1098.004] + prefilter: [FF] + +- rule: System Network Configuration Modification + desc: Attempt to modify the system network configuration file + condition: sf.file.path in (network_config_files) and open_write + priority: high + tags: [mitre:T1565.001] + prefilter: [FF] + +# from https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_system_network_discovery.yml +- rule: System Network Configuration Discovery + desc: Attempt to get details about the network configuration + condition: sf.opflags = EXEC and + ((sf.proc.name in (discovery_cmds) and sf.proc.args pmatch (network_config_files)) or + sf.proc.name in (netconfig_discovery_cmds)) + priority: high + tags: [mitre:T1016] + prefilter: [PE] + +- rule: Unsecured Credentials: Bash History + desc: Searching the command history for unprotected credentials + condition: sf.opflags = EXEC and + sf.proc.name in (discovery_cmds) and sf.proc.args pmatch (history_files) + priority: high + tags: [mitre:T1552.003] + prefilter: [PE] + +# partially from Sigma https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_shell_clear_cmd_history.yml +- rule: Indicator Removal on Host: Clear Linux or Mac System Logs + desc: Attempts to clear system logs to hide evidence of an intrusion + condition: sf.opflags = EXEC and ( + ( sf.proc.args pmatch (history_files) and clear_cmds) or + ( srf.proc.name = history and sf.proc.args = '-c')) + priority: medium + tags: [mitre:T1070.003] + prefilter: [PE] + +# from Sigma https://github.com/SigmaHQ/sigma/blob/master/rules/linux/at_command.yml +- rule: Scheduled Task/Job At + desc: Detects the use of at/atd + condition: sf.opflags = EXEC and sf.poc.name in (at_cmds) + priority: low + tags: [mitre:T1053.001] + prefilter: [PE] + +# from Sigma https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_base64_decode.yml +- rule: Decode Base64 Encoded Text + desc: Detects usage of base64 utility to decode arbitrary base64-encoded text + condition: sf.opflags = EXEC and sf.proc.name = base64 and sf.proc.args contains '-d' + priority: low + tags: [mitre:T1027] + prefilter: [PE] + +# from Sigma https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_file_copy.yml +- rule: Remote File Copy + desc: Detects the use of tools that copy files from or to remote systems + condition: sf.opflags = EXEC and sf.proc.name in (remote_copy_cmds) and sf.prog.args pmatch (remote_copy_inds) + priority: low + tags: [mitre:T1105] + prefilter: [PE] + +# from Sigma https://github.com/SigmaHAQ/sigma/blob/master/rules/linux/lnx_install_root_certificate.yml +- rule: Install Root Certificate + desc: Detects installation of new root certificates + condition: sf.opflags = EXEC and sf.proc.name in (cert_cmds) + priority: low + tags: [mitre:T1553.004] + prefilter: [PE] + +# from Sigma https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_schedule_task_job_cron.yml +- rule: Scheduled Task/Job: Cron + desc: Detects abuse of the cron utility to perform task scheduling for initial or recurring execution + condition: sf.opflags = EXEC and sf.proc.name = cron + priority: low + tags: [mitre:T1053.003] + prefilter: [PE] + +# from Sigma https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_security_software_discovery.yml +- rule: Security Software Discovery + desc: Detects usage of system utilities (only grep for now) to discover security software discovery + condition: sf.opflags = EXEC and sf.proc.name = grep and sf.proc.args pmatch (security_procs) + priority: low + tags: [mitre:T1518.001] + prefilter: [PE] + +# from Sigma https://github.com/SigmaHQ/sigma/blob/master/rules/linux/lnx_security_tools_disabling.yml +- rule: Impair Defenses: Disable or Modify System Firewall + desc: Detects disabling security tools + condition: sf.opflags = EXEC and + (( sf.proc.name in (service_cmds) and + sf.proc.args pmatch (security_services) and + sf.proc.args pmatch (stop_cmds)) or + ( sf.proc.name = setenforce and sf.proc.args = '0')) + priority: medium + tags: [mitre:T1562.004] + prefilter: [PE] + diff --git a/resources/policies/sigma/config/sysflow.yml b/resources/policies/sigma/config/sysflow.yml new file mode 100644 index 00000000..760c7bb2 --- /dev/null +++ b/resources/policies/sigma/config/sysflow.yml @@ -0,0 +1,23 @@ +title: SysFlow field mapping +order: 1 +backends: + - sf-processor + +logsources: + sysflow: + product: linux + service: sysflow + conditions: + event.provider: sysflow + +fieldmappings: + Image: sf.proc.exe + CommandLine: sf.proc.cmdline + ProcessId: sf.proc.pid + ParentImage: sf.proc.aexe + ParentCommandLine: sf.proc.acmdline + ParentProcessId: sf.pproc.pid + CurrentDirectory: sf.proc.cwd + User: sf.proc.user + DestinationIp: sf.net.dip + TargetFilename: sf.file.path \ No newline at end of file diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_at_command.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_at_command.yml new file mode 100644 index 00000000..8ba08536 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_at_command.yml @@ -0,0 +1,26 @@ +title: Scheduled Task/Job At +id: d2d642d7-b393-43fe-bae4-e81ed5915c4b +status: stable +description: | + Detects the use of at/atd which are utilities that are used to schedule tasks. + They are often abused by adversaries to maintain persistence or to perform task scheduling for initial or recurring execution of malicious code +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1053.002/T1053.002.md +author: Ömer Günal, oscd.community +date: 2020/10/06 +modified: 2022/07/07 +tags: + - attack.persistence + - attack.t1053.002 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: + - '/at' + - '/atd' + condition: selection +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_decode.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_decode.yml new file mode 100644 index 00000000..62c4a1d6 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_decode.yml @@ -0,0 +1,23 @@ +title: Decode Base64 Encoded Text +id: e2072cab-8c9a-459b-b63c-40ae79e27031 +status: test +description: Detects usage of base64 utility to decode arbitrary base64-encoded text +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1027/T1027.md +author: Daniil Yugoslavskiy, oscd.community +date: 2020/10/19 +modified: 2021/11/27 +tags: + - attack.defense_evasion + - attack.t1027 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: '/base64' + CommandLine|contains: '-d' + condition: selection +falsepositives: + - Legitimate activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_execution.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_execution.yml new file mode 100644 index 00000000..eef90857 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_execution.yml @@ -0,0 +1,32 @@ +title: Linux Base64 Encoded Pipe to Shell +id: ba592c6d-6888-43c3-b8c6-689b8fe47337 +status: experimental +description: Detects suspicious process command line that uses base64 encoded input for execution with a shell +references: + - https://github.com/arget13/DDexec +author: pH-T +date: 2022/07/26 +tags: + - attack.defense_evasion + - attack.t1140 +logsource: + product: linux + category: process_creation +detection: + selection_base64: + CommandLine|contains: 'base64 -w0 ' + selection_exec: + - CommandLine|contains: + - '| bash ' + - '| sh ' + - '|bash ' + - '|sh ' + - CommandLine|endswith: + - '| bash' + - '| sh' + - '|bash' + - ' |sh' + condition: selection_base64 and selection_exec +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_shebang_cli.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_shebang_cli.yml new file mode 100644 index 00000000..06fd44c9 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_base64_shebang_cli.yml @@ -0,0 +1,27 @@ +title: Linux Base64 Encoded Shebang In CLI +id: fe2f9663-41cb-47e2-b954-8a228f3b9dff +status: experimental +description: Detects the presence of a base64 version of the shebang in the commandline, which could indicate a malicious payload about to be decoded +references: + - https://www.trendmicro.com/pl_pl/research/20/i/the-evolution-of-malicious-shell-scripts.html + - https://github.com/carlospolop/PEASS-ng/tree/master/linPEAS +author: Nasreddine Bencherchali +date: 2022/09/15 +tags: + - attack.defense_evasion + - attack.t1140 +logsource: + product: linux + category: process_creation +detection: + selection: + CommandLine|contains: + - "IyEvYmluL2Jhc2" #!/bin/bash" + - "IyEvYmluL2Rhc2" #!/bin/dash" + - "IyEvYmluL3pza" #!/bin/zsh" + - "IyEvYmluL2Zpc2" #!/bin/fish + - "IyEvYmluL3No" # !/bin/sh" + condition: selection +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_bpftrace_unsafe_option_usage.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_bpftrace_unsafe_option_usage.yml new file mode 100644 index 00000000..0e93ac17 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_bpftrace_unsafe_option_usage.yml @@ -0,0 +1,23 @@ +title: BPFtrace Unsafe Option Usage +id: f8341cb2-ee25-43fa-a975-d8a5a9714b39 +status: experimental +description: Detects the usage of the unsafe bpftrace option +references: + - https://embracethered.com/blog/posts/2021/offensive-bpf-bpftrace/ + - https://bpftrace.org/ +author: Andreas Hunkeler (@Karneades) +date: 2022/02/11 +tags: + - attack.execution + - attack.t1059.004 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: 'bpftrace' + CommandLine|contains: '--unsafe' + condition: selection +falsepositives: + - Legitimate usage of the unsafe option +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cat_sudoers.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cat_sudoers.yml new file mode 100644 index 00000000..205abdda --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cat_sudoers.yml @@ -0,0 +1,28 @@ +title: Cat Sudoers +id: 0f79c4d2-4e1f-4683-9c36-b5469a665e06 +status: test +description: Detects the execution of a cat /etc/sudoers to list all users that have sudo rights +references: + - https://github.com/sleventyeleven/linuxprivchecker/ +author: Florian Roth +date: 2022/06/20 +modified: 2022/09/15 +tags: + - attack.reconnaissance + - attack.t1592.004 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: + - '/cat' + - 'grep' + - '/head' + - '/tail' + - '/more' + CommandLine|contains: ' /etc/sudoers' + condition: selection +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_chattr_immutable_removal.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_chattr_immutable_removal.yml new file mode 100644 index 00000000..8836eb23 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_chattr_immutable_removal.yml @@ -0,0 +1,25 @@ +title: Remove Immutable File Attribute +id: 34979410-e4b5-4e5d-8cfb-389fdff05c12 +related: + - id: a5b977d6-8a81-4475-91b9-49dbfcd941f7 + type: derived +status: experimental +description: Detects usage of the 'chattr' utility to remove immutable file attribute. +references: + - https://www.trendmicro.com/en_us/research/22/i/how-malicious-actors-abuse-native-linux-tools-in-their-attacks.html +author: Nasreddine Bencherchali +date: 2022/09/15 +tags: + - attack.defense_evasion + - attack.t1222.002 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: '/chattr' + CommandLine|contains: ' -i ' + condition: selection +falsepositives: + - Administrator interacting with immutable files (e.g. for instance backups). +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clear_logs.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clear_logs.yml new file mode 100644 index 00000000..37aee139 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clear_logs.yml @@ -0,0 +1,28 @@ +title: Clear Linux Logs +id: 80915f59-9b56-4616-9de0-fd0dea6c12fe +status: stable +description: Detects attempts to clear logs on the system. Adversaries may clear system logs to hide evidence of an intrusion +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1070.002/T1070.002.md +author: Ömer Günal, oscd.community +date: 2020/10/07 +modified: 2022/09/15 +tags: + - attack.defense_evasion + - attack.t1070.002 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: + - '/rm' # covers /rmdir as well + - '/shred' + - '/unlink' + CommandLine|contains: + - '/var/log' + - '/var/spool/mail' + condition: selection +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clear_syslog.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clear_syslog.yml new file mode 100644 index 00000000..5e217f0c --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clear_syslog.yml @@ -0,0 +1,33 @@ +title: Commands to Clear or Remove the Syslog +id: 3fcc9b35-39e4-44c0-a2ad-9e82b6902b31 +status: experimental +description: Detects specific commands commonly used to remove or empty the syslog. Which is often used by attacker as a method to hide their tracks +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1070.002/T1070.002.md +author: Max Altgelt, Roberto Rodriguez (Cyb3rWard0g), OTR (Open Threat Research), MSTIC +date: 2021/10/15 +modified: 2022/09/15 +tags: + - attack.defense_evasion + - attack.t1070.002 +logsource: + product: linux + category: process_creation +detection: + selection: + CommandLine|contains: + - 'rm /var/log/syslog' + - 'rm -r /var/log/syslog' + - 'rm -f /var/log/syslog' + - 'rm -rf /var/log/syslog' + - 'unlink /var/log/syslog' + - 'unlink -r /var/log/syslog' + - 'unlink -f /var/log/syslog' + - 'unlink -rf /var/log/syslog' + - 'mv /var/log/syslog' + - ' >/var/log/syslog' + - ' > /var/log/syslog' + condition: selection +falsepositives: + - Log rotation. +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clipboard_collection.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clipboard_collection.yml new file mode 100644 index 00000000..cceca4a0 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_clipboard_collection.yml @@ -0,0 +1,28 @@ +title: Clipboard Collection with Xclip Tool +id: ec127035-a636-4b9a-8555-0efd4e59f316 +status: experimental +description: | + Detects attempts to collect data stored in the clipboard from users with the usage of xclip tool. Xclip has to be installed. + Highly recommended using rule on servers, due to high usage of clipboard utilities on user workstations. +references: + - https://www.packetlabs.net/posts/clipboard-data-security/ +author: Pawel Mazur, Roberto Rodriguez (Cyb3rWard0g), OTR (Open Threat Research), MSTIC +date: 2021/10/15 +modified: 2022/09/15 +tags: + - attack.collection + - attack.t1115 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|contains: 'xclip' + CommandLine|contains|all: + - '-sel' + - 'clip' + - '-o' + condition: selection +falsepositives: + - Legitimate usage of xclip tools. +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_crontab_removal.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_crontab_removal.yml new file mode 100644 index 00000000..9fd7bd00 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_crontab_removal.yml @@ -0,0 +1,23 @@ +title: Remove Scheduled Cron Task/Job +id: c2e234de-03a3-41e1-b39a-1e56dc17ba67 +status: experimental +description: | + Detects usage of the 'crontab' utility to remove the current crontab. + This is a common occurrence where cryptocurrency miners compete against each other by removing traces of other miners to hijack the maximum amount of resources possible +references: + - https://www.trendmicro.com/en_us/research/22/i/how-malicious-actors-abuse-native-linux-tools-in-their-attacks.html +author: Nasreddine Bencherchali +date: 2022/09/15 +tags: + - attack.defense_evasion +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: 'crontab' + CommandLine|contains: ' -r' + condition: selection +falsepositives: + - Unknown +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_crypto_mining.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_crypto_mining.yml new file mode 100644 index 00000000..70d6987e --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_crypto_mining.yml @@ -0,0 +1,38 @@ +title: Linux Crypto Mining Indicators +id: 9069ea3c-b213-4c52-be13-86506a227ab1 +status: experimental +description: Detects command line parameters or strings often used by crypto miners +references: + - https://www.poolwatch.io/coin/monero +author: Florian Roth +date: 2021/10/26 +logsource: + product: linux + category: process_creation +detection: + selection: + CommandLine|contains: + - ' --cpu-priority=' + - '--donate-level=0' + - ' -o pool.' + - ' --nicehash' + - ' --algo=rx/0 ' + - 'stratum+tcp://' + - 'stratum+udp://' + # Sub process started by xmrig - the most popular Monero crypto miner - unknown if this causes any false positives + - 'sh -c /sbin/modprobe msr allow_writes=on' + # base64 encoded: --donate-level= + - 'LS1kb25hdGUtbGV2ZWw9' + - '0tZG9uYXRlLWxldmVsP' + - 'tLWRvbmF0ZS1sZXZlbD' + # base64 encoded: stratum+tcp:// and stratum+udp:// + - 'c3RyYXR1bSt0Y3A6Ly' + - 'N0cmF0dW0rdGNwOi8v' + - 'zdHJhdHVtK3RjcDovL' + - 'c3RyYXR1bSt1ZHA6Ly' + - 'N0cmF0dW0rdWRwOi8v' + - 'zdHJhdHVtK3VkcDovL' + condition: selection +falsepositives: + - Legitimate use of crypto miners +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_curl_usage.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_curl_usage.yml new file mode 100644 index 00000000..770a0b51 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_curl_usage.yml @@ -0,0 +1,22 @@ +title: Curl Usage on Linux +id: ea34fb97-e2c4-4afb-810f-785e4459b194 +status: experimental +description: Detects a curl process start on linux, which indicates a file download from a remote location or a simple web request to a remote server +references: + - https://www.trendmicro.com/en_us/research/22/i/how-malicious-actors-abuse-native-linux-tools-in-their-attacks.html +author: Nasreddine Bencherchali +date: 2022/09/15 +tags: + - attack.command_and_control + - attack.t1105 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: '/curl' + condition: selection +falsepositives: + - Scripts created by developers and admins + - Administrative activity +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cve_2022_26134_atlassian_confluence.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cve_2022_26134_atlassian_confluence.yml new file mode 100644 index 00000000..d43cd9c1 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cve_2022_26134_atlassian_confluence.yml @@ -0,0 +1,40 @@ +title: Atlassian Confluence CVE-2022-26134 +id: 7fb14105-530e-4e2e-8cfb-99f7d8700b66 +related: + - id: 245f92e3-c4da-45f1-9070-bc552e06db11 + type: derived +status: experimental +description: Detects spawning of suspicious child processes by Atlassian Confluence server which may indicate successful exploitation of CVE-2022-26134 +references: + - https://www.volexity.com/blog/2022/06/02/zero-day-exploitation-of-atlassian-confluence/ +author: Nasreddine Bencherchali +date: 2022/06/03 +tags: + - attack.initial_access + - attack.execution + - attack.t1190 + - attack.t1059 + - cve.2022.26134 +logsource: + category: process_creation + product: linux +detection: + selection: + # Monitor suspicious child processes spawned by Confluence + ParentImage|startswith: '/opt/atlassian/confluence/' + ParentImage|endswith: '/java' + CommandLine|contains: + - '/bin/sh' + - 'bash' + - 'dash' + - 'ksh' + - 'zsh' + - 'csh' + - 'fish' + - 'curl' + - 'wget' + - 'python' + condition: selection +falsepositives: + - Unknown +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cve_2022_33891_spark_shell_command_injection.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cve_2022_33891_spark_shell_command_injection.yml new file mode 100644 index 00000000..d6ad54f0 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_cve_2022_33891_spark_shell_command_injection.yml @@ -0,0 +1,27 @@ +title: Apache Spark Shell Command Injection - ProcessCreation +id: c8a5f584-cdc8-42cc-8cce-0398e4265de3 +status: experimental +description: Detects attempts to exploit an apache spark server via CVE-2014-6287 from a commandline perspective +references: + - https://github.com/W01fh4cker/cve-2022-33891/blob/fd973b56e78bca8822caa3a2e3cf1b5aff5d0950/cve_2022_33891_poc.py + - https://sumsec.me/2022/CVE-2022-33891%20Apache%20Spark%20shell%20command%20injection.html + - https://github.com/apache/spark/pull/36315/files +author: Nasreddine Bencherchali +date: 2022/07/20 +tags: + - attack.initial_access + - attack.t1190 + - cve.2022.33891 +logsource: + product: linux + category: process_creation +detection: + selection: + ParentImage|endswith: '\bash' + CommandLine|contains: + - 'id -Gn `' + - "id -Gn '" + condition: selection +falsepositives: + - Unlikely +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_dd_file_overwrite.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_dd_file_overwrite.yml new file mode 100644 index 00000000..4998c16c --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_dd_file_overwrite.yml @@ -0,0 +1,30 @@ +title: DD File Overwrite +id: 2953194b-e33c-4859-b9e8-05948c167447 +status: experimental +description: Detects potential overwriting and deletion of a file using DD. +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1485/T1485.md#atomic-test-2---macoslinux---overwrite-file-with-dd +author: Roberto Rodriguez (Cyb3rWard0g), OTR (Open Threat Research), MSTIC +date: 2021/10/15 +modified: 2022/07/07 +tags: + - attack.impact + - attack.t1485 +logsource: + product: linux + category: process_creation +detection: + selection1: + Image: + - '/bin/dd' + - '/usr/bin/dd' + selection2: + CommandLine|contains: 'of=' + selection3: + CommandLine|contains: + - 'if=/dev/zero' + - 'if=/dev/null' + condition: all of selection* +falsepositives: + - Any user deleting files that way. +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_doas_execution.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_doas_execution.yml new file mode 100644 index 00000000..564c37a3 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_doas_execution.yml @@ -0,0 +1,22 @@ +title: Linux Doas Tool Execution +id: 067d8238-7127-451c-a9ec-fa78045b618b +status: stable +description: Detects the doas tool execution in linux host platform. This utility tool allow standard users to perform tasks as root, the same way sudo does. +references: + - https://research.splunk.com/endpoint/linux_doas_tool_execution/ + - https://www.makeuseof.com/how-to-install-and-use-doas/ +author: Sittikorn S, Teoderick Contreras +date: 2022/01/20 +tags: + - attack.privilege_escalation + - attack.t1548 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: '/doas' + condition: selection +falsepositives: + - Unlikely +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_file_and_directory_discovery.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_file_and_directory_discovery.yml new file mode 100644 index 00000000..50bd11e9 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_file_and_directory_discovery.yml @@ -0,0 +1,30 @@ +title: File and Directory Discovery +id: d3feb4ee-ff1d-4d3d-bd10-5b28a238cc72 +status: test +description: Detects usage of system utilities to discover files and directories +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1083/T1083.md +author: Daniil Yugoslavskiy, oscd.community +date: 2020/10/19 +modified: 2021/11/27 +tags: + - attack.discovery + - attack.t1083 +logsource: + category: process_creation + product: linux +detection: + select_file_with_asterisk: + Image|endswith: '/file' + CommandLine|re: '(.){200,}' # execution of the 'file */* *>> /tmp/output.txt' will produce huge commandline + select_recursive_ls: + Image|endswith: '/ls' + CommandLine|contains: '-R' + select_find_execution: + Image|endswith: '/find' + select_tree_execution: + Image|endswith: '/tree' + condition: 1 of select* +falsepositives: + - Legitimate activities +level: informational diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_file_deletion.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_file_deletion.yml new file mode 100644 index 00000000..47adf83d --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_file_deletion.yml @@ -0,0 +1,25 @@ +title: File Deletion +id: 30aed7b6-d2c1-4eaf-9382-b6bc43e50c57 +status: stable +description: Detects file deletion using "rm", "shred" or "unlink" commands which are used often by adversaries to delete files left behind by the actions of their intrusion activity +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1070.004/T1070.004.md +author: Ömer Günal, oscd.community +date: 2020/10/07 +modified: 2022/09/15 +tags: + - attack.defense_evasion + - attack.t1070.004 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: + - '/rm' # covers /rmdir as well + - '/shred' + - '/unlink' + condition: selection +falsepositives: + - Legitimate administration activities +level: informational diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_install_root_certificate.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_install_root_certificate.yml new file mode 100644 index 00000000..ea8e6b83 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_install_root_certificate.yml @@ -0,0 +1,24 @@ +title: Install Root Certificate +id: 78a80655-a51e-4669-bc6b-e9d206a462ee +status: test +description: Detects installation of new certificate on the system which attackers may use to avoid warnings when connecting to controlled web servers or C2s +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1553.004/T1553.004.md +author: Ömer Günal, oscd.community +date: 2020/10/05 +modified: 2022/07/07 +tags: + - attack.defense_evasion + - attack.t1553.004 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: + - '/update-ca-certificates' + - '/update-ca-trust' + condition: selection +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_local_account.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_local_account.yml new file mode 100644 index 00000000..42244a2f --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_local_account.yml @@ -0,0 +1,39 @@ +title: Local System Accounts Discovery +id: b45e3d6f-42c6-47d8-a478-df6bd6cf534c +status: test +description: Detects enumeration of local systeam accounts. This information can help adversaries determine which local accounts exist on a system to aid in follow-on behavior. +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1087.001/T1087.001.md +author: Alejandro Ortuno, oscd.community +date: 2020/10/08 +modified: 2022/09/15 +tags: + - attack.discovery + - attack.t1087.001 +logsource: + category: process_creation + product: linux +detection: + selection_1: + Image|endswith: '/lastlog' + selection_2: + CommandLine|contains: '''x:0:''' + selection_3: + Image|endswith: + - '/cat' + - '/head' + - '/tail' + - '/more' + CommandLine|contains: + - '/etc/passwd' + - '/etc/shadow' + - '/etc/sudoers' + selection_4: + Image|endswith: '/id' + selection_5: + Image|endswith: '/lsof' + CommandLine|contains: '-u' + condition: 1 of selection* +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_local_groups.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_local_groups.yml new file mode 100644 index 00000000..17d1a389 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_local_groups.yml @@ -0,0 +1,29 @@ +title: Local Groups Discovery +id: 676381a6-15ca-4d73-a9c8-6a22e970b90d +status: test +description: Detects enumeration of local system groups. Adversaries may attempt to find local system groups and permission settings +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1069.001/T1069.001.md +author: Ömer Günal, Alejandro Ortuno, oscd.community +date: 2020/10/11 +modified: 2022/09/15 +tags: + - attack.discovery + - attack.t1069.001 +logsource: + category: process_creation + product: linux +detection: + selection_1: + Image|endswith: '/groups' + selection_2: + Image|endswith: + - '/cat' + - '/head' + - '/tail' + - '/more' + CommandLine|contains: '/etc/group' + condition: 1 of selection* +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_network_service_scanning.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_network_service_scanning.yml new file mode 100644 index 00000000..15b4cbea --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_network_service_scanning.yml @@ -0,0 +1,31 @@ +title: Linux Network Service Scanning +id: 3e102cd9-a70d-4a7a-9508-403963092f31 +status: test +description: Detects enumeration of local or remote network services. +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1046/T1046.md +author: Alejandro Ortuno, oscd.community +date: 2020/10/21 +modified: 2022/10/09 +tags: + - attack.discovery + - attack.t1046 +logsource: + category: process_creation + product: linux + definition: 'Detect netcat and filter our listening mode' +detection: + netcat: + Image|endswith: + - '/nc' + - '/netcat' + network_scanning_tools: + Image|endswith: + - '/telnet' # could be wget, curl, ssh, many things. basically everything that is able to do network connection. consider fine tuning + - '/nmap' + netcat_listen_flag: + CommandLine|contains: 'l' + condition: (netcat and not netcat_listen_flag) or network_scanning_tools +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_nohup.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_nohup.yml new file mode 100644 index 00000000..4c9de4ee --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_nohup.yml @@ -0,0 +1,20 @@ +title: Nohup Execution +id: e4ffe466-6ff8-48d4-94bd-e32d1a6061e2 +status: experimental +description: Detects usage of nohup which could be leveraged by an attacker to keep a process running or break out from restricted environments +references: + - https://gtfobins.github.io/gtfobins/nohup/ + - https://en.wikipedia.org/wiki/Nohup + - https://www.computerhope.com/unix/unohup.htm +author: 'Christopher Peacock @SecurePeacock, SCYTHE @scythe_io' +date: 2022/06/06 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: '/nohup' + condition: selection +falsepositives: + - Administrators or installed processes that leverage nohup +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_omigod_scx_runasprovider_executescript.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_omigod_scx_runasprovider_executescript.yml new file mode 100644 index 00000000..01823c9f --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_omigod_scx_runasprovider_executescript.yml @@ -0,0 +1,35 @@ +title: OMIGOD SCX RunAsProvider ExecuteScript +id: 6eea1bf6-f8d2-488a-a742-e6ef6c1b67db +status: experimental +description: | + Rule to detect the use of the SCX RunAsProvider ExecuteScript to execute any UNIX/Linux script using the /bin/sh shell. + Script being executed gets created as a temp file in /tmp folder with a scx* prefix. + Then it is invoked from the following directory /etc/opt/microsoft/scx/conf/tmpdir/. + The file in that directory has the same prefix scx*. SCXcore, started as the Microsoft Operations Manager UNIX/Linux Agent, is now used in a host of products including + Microsoft Operations Manager, Microsoft Azure, and Microsoft Operations Management Suite. +references: + - https://www.wiz.io/blog/omigod-critical-vulnerabilities-in-omi-azure + - https://github.com/Azure/Azure-Sentinel/pull/3059 +author: Roberto Rodriguez (Cyb3rWard0g), OTR (Open Threat Research), MSTIC +date: 2021/10/15 +modified: 2022/10/05 +tags: + - attack.privilege_escalation + - attack.initial_access + - attack.execution + - attack.t1068 + - attack.t1190 + - attack.t1203 +logsource: + product: linux + category: process_creation +detection: + selection: + User: root + LogonId: 0 + CurrentDirectory: '/var/opt/microsoft/scx/tmp' + CommandLine|contains: '/etc/opt/microsoft/scx/conf/tmpdir/scx' + condition: selection +falsepositives: + - Legitimate use of SCX RunAsProvider ExecuteScript. +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_omigod_scx_runasprovider_executeshellcommand.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_omigod_scx_runasprovider_executeshellcommand.yml new file mode 100644 index 00000000..49b0d940 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_omigod_scx_runasprovider_executeshellcommand.yml @@ -0,0 +1,33 @@ +title: OMIGOD SCX RunAsProvider ExecuteShellCommand +id: 21541900-27a9-4454-9c4c-3f0a4240344a +status: experimental +description: | + Rule to detect the use of the SCX RunAsProvider Invoke_ExecuteShellCommand to execute any UNIX/Linux command using the /bin/sh shell. + SCXcore, started as the Microsoft Operations Manager UNIX/Linux Agent, is now used in a host of products including + Microsoft Operations Manager, Microsoft Azure, and Microsoft Operations Management Suite. +references: + - https://www.wiz.io/blog/omigod-critical-vulnerabilities-in-omi-azure + - https://github.com/Azure/Azure-Sentinel/pull/3059 +author: Roberto Rodriguez (Cyb3rWard0g), OTR (Open Threat Research), MSTIC +date: 2021/10/15 +modified: 2022/10/05 +tags: + - attack.privilege_escalation + - attack.initial_access + - attack.execution + - attack.t1068 + - attack.t1190 + - attack.t1203 +logsource: + product: linux + category: process_creation +detection: + selection: + User: root + LogonId: 0 + CurrentDirectory: '/var/opt/microsoft/scx/tmp' + CommandLine|contains: '/bin/sh' + condition: selection +falsepositives: + - Legitimate use of SCX RunAsProvider Invoke_ExecuteShellCommand. +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_process_discovery.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_process_discovery.yml new file mode 100644 index 00000000..f61dbffc --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_process_discovery.yml @@ -0,0 +1,26 @@ +title: Process Discovery +id: 4e2f5868-08d4-413d-899f-dc2f1508627b +status: stable +description: | + Detects process discovery commands. Adversaries may attempt to get information about running processes on a system. + Information obtained could be used to gain an understanding of common software/applications running on systems within the network +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1057/T1057.md +author: Ömer Günal, oscd.community +date: 2020/10/06 +modified: 2022/07/07 +tags: + - attack.discovery + - attack.t1057 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: + - '/ps' + - '/top' + condition: selection +falsepositives: + - Legitimate administration activities +level: informational diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_proxy_connection.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_proxy_connection.yml new file mode 100644 index 00000000..76707098 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_proxy_connection.yml @@ -0,0 +1,24 @@ +title: Connection Proxy +id: 72f4ab3f-787d-495d-a55d-68c2ff46cf4c +status: test +description: Detects setting proxy configuration +references: + - https://attack.mitre.org/techniques/T1090/ +author: Ömer Günal +date: 2020/06/17 +modified: 2022/10/05 +tags: + - attack.defense_evasion + - attack.t1090 +logsource: + product: linux + category: process_creation +detection: + selection: + CommandLine|contains: + - 'http_proxy=' + - 'https_proxy=' + condition: selection +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_python_pty_spawn.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_python_pty_spawn.yml new file mode 100644 index 00000000..f8bb8e7f --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_python_pty_spawn.yml @@ -0,0 +1,29 @@ +title: Python Spawning Pretty TTY +id: c4042d54-110d-45dd-a0e1-05c47822c937 +status: experimental +description: Detects python spawning a pretty tty +references: + - https://www.volexity.com/blog/2022/06/02/zero-day-exploitation-of-atlassian-confluence/ +author: Nextron Systems +date: 2022/06/03 +tags: + - attack.execution + - attack.t1059 +logsource: + category: process_creation + product: linux +detection: + selection_image: + Image|contains: + - '/python2.' # python image is always of the form ../python3.10; ../python is just a symlink + - '/python3.' + selection_cli1: + CommandLine|contains|all: + - 'import pty' + - '.spawn(' + selection_cli2: + CommandLine|contains: 'from pty import spawn' + condition: selection_image and 1 of selection_cli* +falsepositives: + - Unknown +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_remote_system_discovery.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_remote_system_discovery.yml new file mode 100644 index 00000000..a8f917a0 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_remote_system_discovery.yml @@ -0,0 +1,46 @@ +title: Linux Remote System Discovery +id: 11063ec2-de63-4153-935e-b1a8b9e616f1 +status: test +description: Detects the enumeration of other remote systems. +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1018/T1018.md +author: Alejandro Ortuno, oscd.community +date: 2020/10/22 +modified: 2021/11/27 +tags: + - attack.discovery + - attack.t1018 +logsource: + category: process_creation + product: linux +detection: + selection_1: + Image|endswith: '/arp' + CommandLine|contains: '-a' + selection_2: + Image|endswith: '/ping' + CommandLine|contains: + - ' 10.' #10.0.0.0/8 + - ' 192.168.' #192.168.0.0/16 + - ' 172.16.' #172.16.0.0/12 + - ' 172.17.' + - ' 172.18.' + - ' 172.19.' + - ' 172.20.' + - ' 172.21.' + - ' 172.22.' + - ' 172.23.' + - ' 172.24.' + - ' 172.25.' + - ' 172.26.' + - ' 172.27.' + - ' 172.28.' + - ' 172.29.' + - ' 172.30.' + - ' 172.31.' + - ' 127.' #127.0.0.0/8 + - ' 169.254.' #169.254.0.0/16 + condition: 1 of selection* +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_schedule_task_job_cron.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_schedule_task_job_cron.yml new file mode 100644 index 00000000..b5aeb291 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_schedule_task_job_cron.yml @@ -0,0 +1,25 @@ +title: Scheduled Cron Task/Job +id: 6b14bac8-3e3a-4324-8109-42f0546a347f +status: test +description: Detects abuse of the cron utility to perform task scheduling for initial or recurring execution of malicious code. Detection will focus on crontab jobs uploaded from the tmp folder. +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1053.003/T1053.003.md +author: Alejandro Ortuno, oscd.community +date: 2020/10/06 +modified: 2021/11/27 +tags: + - attack.execution + - attack.persistence + - attack.privilege_escalation + - attack.t1053.003 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: 'crontab' + CommandLine|contains: '/tmp/' + condition: selection +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_security_software_discovery.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_security_software_discovery.yml new file mode 100644 index 00000000..b5c8e862 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_security_software_discovery.yml @@ -0,0 +1,34 @@ +title: Security Software Discovery +id: c9d8b7fd-78e4-44fe-88f6-599135d46d60 +status: test +description: Detects usage of system utilities (only grep and egrep for now) to discover security software discovery +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1518.001/T1518.001.md +author: Daniil Yugoslavskiy, oscd.community +date: 2020/10/19 +modified: 2022/09/15 +tags: + - attack.discovery + - attack.t1518.001 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: + # You can add more grep variations such as fgrep, rgrep...etc + - '/grep' + - '/egrep' + CommandLine|contains: + - 'nessusd' # nessus vulnerability scanner + - 'td-agent' # fluentd log shipper + - 'packetbeat' # elastic network logger/shipper + - 'filebeat' # elastic log file shipper + - 'auditbeat' # elastic auditing agent/log shipper + - 'osqueryd' # facebook osquery + - 'cbagentd' # carbon black + - 'falcond' # crowdstrike falcon + condition: selection +falsepositives: + - Legitimate activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_security_tools_disabling.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_security_tools_disabling.yml new file mode 100644 index 00000000..fa83e7f3 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_security_tools_disabling.yml @@ -0,0 +1,83 @@ +title: Disabling Security Tools +id: e3a8a052-111f-4606-9aee-f28ebeb76776 +status: test +description: Detects disabling security tools +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1562.004/T1562.004.md +author: Ömer Günal, Alejandro Ortuno, oscd.community +date: 2020/06/17 +modified: 2022/10/09 +tags: + - attack.defense_evasion + - attack.t1562.004 +logsource: + category: process_creation + product: linux +detection: + selection_iptables_1: + Image|endswith: '/service' + CommandLine|contains|all: + - 'iptables' + - 'stop' + selection_iptables_2: + Image|endswith: '/service' + CommandLine|contains|all: + - 'ip6tables' + - 'stop' + selection_iptables_3: + Image|endswith: '/chkconfig' + CommandLine|contains|all: + - 'iptables' + - 'stop' + selection_iptables_4: + Image|endswith: '/chkconfig' + CommandLine|contains|all: + - 'ip6tables' + - 'stop' + selection_firewall_1: + Image|endswith: '/systemctl' + CommandLine|contains|all: + - 'firewalld' + - 'stop' + selection_firewall_2: + Image|endswith: '/systemctl' + CommandLine|contains|all: + - 'firewalld' + - 'disable' + selection_carbonblack_1: + Image|endswith: '/service' + CommandLine|contains|all: + - 'cbdaemon' + - 'stop' + selection_carbonblack_2: + Image|endswith: '/chkconfig' + CommandLine|contains|all: + - 'cbdaemon' + - 'off' + selection_carbonblack_3: + Image|endswith: '/systemctl' + CommandLine|contains|all: + - 'cbdaemon' + - 'stop' + selection_carbonblack_4: + Image|endswith: '/systemctl' + CommandLine|contains|all: + - 'cbdaemon' + - 'disable' + selection_selinux: + Image|endswith: '/setenforce' + CommandLine|contains: '0' + selection_crowdstrike_1: + Image|endswith: '/systemctl' + CommandLine|contains|all: + - 'stop' + - 'falcon-sensor' + selection_crowdstrike_2: + Image|endswith: '/systemctl' + CommandLine|contains|all: + - 'disable' + - 'falcon-sensor' + condition: 1 of selection* +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_services_stop_and_disable.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_services_stop_and_disable.yml new file mode 100644 index 00000000..b780d436 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_services_stop_and_disable.yml @@ -0,0 +1,26 @@ +title: Disable Or Stop Services +id: de25eeb8-3655-4643-ac3a-b662d3f26b6b +status: experimental +description: Detects the usage of utilities such as 'systemctl', 'service'...etc to stop or disable tools and services +references: + - https://www.trendmicro.com/pl_pl/research/20/i/the-evolution-of-malicious-shell-scripts.html +author: Nasreddine Bencherchali +date: 2022/09/15 +tags: + - attack.defense_evasion +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: + - '/service' + - '/systemctl' + - '/chkconfig' + CommandLine|contains: + - 'stop' + - 'disable' + condition: selection +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_setgid_setuid.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_setgid_setuid.yml new file mode 100644 index 00000000..4f77b1cf --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_setgid_setuid.yml @@ -0,0 +1,26 @@ +title: Setuid and Setgid +id: c21c4eaa-ba2e-419a-92b2-8371703cbe21 +status: test +description: Detects suspicious change of file privileges with chown and chmod commands +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1548.001/T1548.001.md + - https://attack.mitre.org/techniques/T1548/001/ +author: Ömer Günal +date: 2020/06/16 +modified: 2022/10/05 +tags: + - attack.persistence +logsource: + product: linux + category: process_creation +detection: + selection_root: + CommandLine|contains: 'chown root' + selection_perm: + CommandLine|contains: + - ' chmod u+s' + - ' chmod g+s' + condition: all of selection_* +falsepositives: + - Legitimate administration activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_sudo_cve_2019_14287.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_sudo_cve_2019_14287.yml new file mode 100644 index 00000000..ab79e370 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_sudo_cve_2019_14287.yml @@ -0,0 +1,26 @@ +title: Sudo Privilege Escalation CVE-2019-14287 +id: f74107df-b6c6-4e80-bf00-4170b658162b +status: experimental +description: Detects users trying to exploit sudo vulnerability reported in CVE-2019-14287 +references: + - https://www.openwall.com/lists/oss-security/2019/10/14/1 + - https://access.redhat.com/security/cve/cve-2019-14287 + - https://twitter.com/matthieugarin/status/1183970598210412546 +author: Florian Roth +date: 2019/10/15 +modified: 2022/10/05 +tags: + - attack.privilege_escalation + - attack.t1068 + - attack.t1548.003 + - cve.2019.14287 +logsource: + product: linux + category: process_creation +detection: + selection: + CommandLine|contains: ' -u#' + condition: selection +falsepositives: + - Unlikely +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_chmod_directories.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_chmod_directories.yml new file mode 100644 index 00000000..3aab4ebb --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_chmod_directories.yml @@ -0,0 +1,27 @@ +title: Chmod Suspicious Directory +id: 6419afd1-3742-47a5-a7e6-b50386cd15f8 +status: experimental +description: Detects chmod targeting files in abnormal directory paths. +references: + - https://www.intezer.com/blog/malware-analysis/new-backdoor-sysjoker/ + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1222.002/T1222.002.md +author: 'Christopher Peacock @SecurePeacock, SCYTHE @scythe_io' +date: 2022/06/03 +tags: + - attack.defense_evasion + - attack.t1222.002 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: '/chmod' + CommandLine|contains: + - '/tmp/' + - '/.Library/' + - '/etc/' + - '/opt/' + condition: selection +falsepositives: + - Admin changing file permissions. +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_curl_fileupload.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_curl_fileupload.yml new file mode 100644 index 00000000..8bb4c2be --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_curl_fileupload.yml @@ -0,0 +1,37 @@ +title: Suspicious Curl File Upload - Linux +id: 00b90cc1-17ec-402c-96ad-3a8117d7a582 +related: + - id: 00bca14a-df4e-4649-9054-3f2aa676bc04 + type: derived +status: experimental +description: Detects a suspicious curl process start the adds a file to a web request +references: + - https://twitter.com/d1r4c/status/1279042657508081664 + - https://medium.com/@petehouston/upload-files-with-curl-93064dcccc76 + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1105/T1105.md#atomic-test-19---curl-upload-file + - https://curl.se/docs/manpage.html + - https://www.trendmicro.com/en_us/research/22/i/how-malicious-actors-abuse-native-linux-tools-in-their-attacks.html +author: Nasreddine Bencherchali +date: 2022/09/15 +tags: + - attack.exfiltration + - attack.t1567 + - attack.t1105 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: '/curl' + CommandLine|contains: + - ' -F ' + - ' --form' # Also covers the "--form-string" + - ' -T ' + - ' --upload-file ' + - ' -d ' + - ' --data ' + - ' --data-' # For flags like: "--data-ascii", "--data-binary", "--data-raw", "--data-urlencode" + condition: selection +falsepositives: + - Scripts created by developers and admins +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_curl_useragent.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_curl_useragent.yml new file mode 100644 index 00000000..901400b7 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_curl_useragent.yml @@ -0,0 +1,28 @@ +title: Suspicious Curl Change User Agents - Linux +id: b86d356d-6093-443d-971c-9b07db583c68 +related: + - id: 3286d37a-00fd-41c2-a624-a672dcd34e60 + type: derived +status: experimental +description: Detects a suspicious curl process start on linux with set useragent options +references: + - https://curl.se/docs/manpage.html +author: Nasreddine Bencherchali +date: 2022/09/15 +tags: + - attack.command_and_control + - attack.t1071.001 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: '/curl' + CommandLine|contains: + - ' -A ' + - ' --user-agent ' + condition: selection +falsepositives: + - Scripts created by developers and admins + - Administrative activity +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_history_delete.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_history_delete.yml new file mode 100644 index 00000000..33398ced --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_history_delete.yml @@ -0,0 +1,34 @@ +title: History File Deletion +id: 1182f3b3-e716-4efa-99ab-d2685d04360f +status: experimental +description: Detects events in which a history file gets deleted, e.g. the ~/bash_history to remove traces of malicious activity +references: + - https://github.com/sleventyeleven/linuxprivchecker/ + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1552.003/T1552.003.md +author: Florian Roth +date: 2022/06/20 +modified: 2022/09/15 +tags: + - attack.impact + - attack.t1565.001 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: + - '/rm' + - '/unlink' + - '/shred' + selection_history: + - CommandLine|contains: + - '/.bash_history' + - '/.zsh_history' + - CommandLine|endswith: + - '_history' + - '.history' + - 'zhistory' + condition: all of selection* +falsepositives: + - Legitimate administration activities +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_history_recon.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_history_recon.yml new file mode 100644 index 00000000..ace28ef4 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_history_recon.yml @@ -0,0 +1,35 @@ +title: Print History File Contents +id: d7821ff1-4527-4e33-9f84-d0d57fa2fb66 +status: experimental +description: Detects events in which someone prints the contents of history files to the commandline or redirects it to a file for reconnaissance +references: + - https://github.com/sleventyeleven/linuxprivchecker/ + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1552.003/T1552.003.md +author: Florian Roth +date: 2022/06/20 +modified: 2022/09/15 +tags: + - attack.reconnaissance + - attack.t1592.004 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: + - '/cat' + - '/head' + - '/tail' + - '/more' + selection_history: + - CommandLine|contains: + - '/.bash_history' + - '/.zsh_history' + - CommandLine|endswith: + - '_history' + - '.history' + - 'zhistory' + condition: all of selection* +falsepositives: + - Legitimate administration activities +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_interactive_bash.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_interactive_bash.yml new file mode 100644 index 00000000..6b79735c --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_interactive_bash.yml @@ -0,0 +1,30 @@ +title: Interactive Bash Suspicious Children +id: ea3ecad2-db86-4a89-ad0b-132a10d2db55 +status: experimental +description: Detects suspicious interactive bash as a parent to rather uncommon child processes +references: + - Internal Research +author: Florian Roth +date: 2022/03/14 +logsource: + product: linux + category: process_creation +detection: + selection: + ParentCommandLine: 'bash -i' + anomaly1: + CommandLine|contains: + - '-c import ' + - 'base64' + - 'pty.spawn' + anomaly2: + Image|endswith: + - 'whoami' + - 'iptables' + - '/ncat' + - '/nc' + - '/netcat' + condition: selection and 1 of anomaly* +falsepositives: + - Legitimate software that uses these patterns +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_java_children.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_java_children.yml new file mode 100644 index 00000000..cad38873 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_java_children.yml @@ -0,0 +1,32 @@ +title: Suspicious Java Children Processes +id: d292e0af-9a18-420c-9525-ec0ac3936892 +status: experimental +description: Detects java process spawning suspicious children +references: + - https://www.tecmint.com/different-types-of-linux-shells/ +author: Nasreddine Bencherchali +date: 2022/06/03 +tags: + - attack.execution + - attack.t1059 +logsource: + category: process_creation + product: linux +detection: + selection: + ParentImage|endswith: '/java' + CommandLine|contains: + - '/bin/sh' + - 'bash' + - 'dash' + - 'ksh' + - 'zsh' + - 'csh' + - 'fish' + - 'curl' + - 'wget' + - 'python' + condition: selection +falsepositives: + - Unknown +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_pipe_shell.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_pipe_shell.yml new file mode 100644 index 00000000..0d2659fa --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_pipe_shell.yml @@ -0,0 +1,35 @@ +title: Linux Shell Pipe to Shell +id: 880973f3-9708-491c-a77b-2a35a1921158 +status: experimental +description: Detects suspicious process command line that starts with a shell that executes something and finally gets piped into another shell +references: + - Internal Research +author: Florian Roth +date: 2022/03/14 +modified: 2022/07/26 +tags: + - attack.defense_evasion + - attack.t1140 +logsource: + product: linux + category: process_creation +detection: + selection: + CommandLine|startswith: + - 'sh -c ' + - 'bash -c ' + selection_exec: + - CommandLine|contains: + - '| bash ' + - '| sh ' + - '|bash ' + - '|sh ' + - CommandLine|endswith: + - '| bash' + - '| sh' + - '|bash' + - ' |sh' + condition: all of selection* +falsepositives: + - Legitimate software that uses these patterns +level: medium diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_recon_indicators.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_recon_indicators.yml new file mode 100644 index 00000000..7eeba539 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_susp_recon_indicators.yml @@ -0,0 +1,25 @@ +title: Linux Recon Indicators +id: 0cf7a157-8879-41a2-8f55-388dd23746b7 +status: experimental +description: Detects events with patterns found in commands used for reconnaissance on linux systems +references: + - https://github.com/sleventyeleven/linuxprivchecker/blob/0d701080bbf92efd464e97d71a70f97c6f2cd658/linuxprivchecker.py +author: Florian Roth +date: 2022/06/20 +tags: + - attack.reconnaissance + - attack.t1592.004 + - attack.credential_access + - attack.t1552.001 +logsource: + category: process_creation + product: linux +detection: + selection: + CommandLine|contains: + - ' -name .htpasswd' + - ' -perm -4000 ' + condition: selection +falsepositives: + - Legitimate administration activities +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_info_discovery.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_info_discovery.yml new file mode 100644 index 00000000..f45de992 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_info_discovery.yml @@ -0,0 +1,29 @@ +title: System Information Discovery +id: 42df45e7-e6e9-43b5-8f26-bec5b39cc239 +status: stable +description: Detects system information discovery commands +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1082/T1082.md +author: Ömer Günal, oscd.community +date: 2020/10/08 +modified: 2021/09/14 +tags: + - attack.discovery + - attack.t1082 +logsource: + product: linux + category: process_creation +detection: + selection: + Image|endswith: + - '/uname' + - '/hostname' + - '/uptime' + - '/lspci' + - '/dmidecode' + - '/lscpu' + - '/lsmod' + condition: selection +falsepositives: + - Legitimate administration activities +level: informational diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_network_connections_discovery.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_network_connections_discovery.yml new file mode 100644 index 00000000..3413ecc7 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_network_connections_discovery.yml @@ -0,0 +1,27 @@ +title: System Network Connections Discovery +id: 4c519226-f0cd-4471-bd2f-6fbb2bb68a79 +status: test +description: Detects usage of system utilities to discover system network connections +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1049/T1049.md +author: Daniil Yugoslavskiy, oscd.community +date: 2020/10/19 +modified: 2021/11/27 +tags: + - attack.discovery + - attack.t1049 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: + - '/who' + - '/w' + - '/last' + - '/lsof' + - '/netstat' + condition: selection +falsepositives: + - Legitimate activities +level: low diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_network_discovery.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_network_discovery.yml new file mode 100644 index 00000000..a3516089 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_system_network_discovery.yml @@ -0,0 +1,32 @@ +title: System Network Discovery - Linux +id: e7bd1cfa-b446-4c88-8afb-403bcd79e3fa +status: test +description: Detects enumeration of local network configuration +references: + - https://github.com/redcanaryco/atomic-red-team/blob/f339e7da7d05f6057fdfcdd3742bfcf365fee2a9/atomics/T1016/T1016.md +author: Ömer Günal and remotephone, oscd.community +date: 2020/10/06 +modified: 2022/09/15 +tags: + - attack.discovery + - attack.t1016 +logsource: + category: process_creation + product: linux +detection: + selection: + - Image|endswith: + - '/firewall-cmd' + - '/ufw' + - '/iptables' + - '/netstat' + - '/ss' + - '/ip' + - '/ifconfig' + - '/systemd-resolve' + - '/route' + - CommandLine|contains: '/etc/resolv.conf' + condition: selection +falsepositives: + - Legitimate administration activities +level: informational diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_triple_cross_rootkit_execve_hijack.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_triple_cross_rootkit_execve_hijack.yml new file mode 100644 index 00000000..8c915ef5 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_triple_cross_rootkit_execve_hijack.yml @@ -0,0 +1,22 @@ +title: Triple Cross eBPF Rootkit Execve Hijack +id: 0326c3c8-7803-4a0f-8c5c-368f747f7c3e +status: experimental +description: Detects execution of a the file "execve_hijack" which is used by the Triple Cross rootkit as a way to elevate privileges +references: + - https://github.com/h3xduck/TripleCross/blob/1f1c3e0958af8ad9f6ebe10ab442e75de33e91de/src/helpers/execve_hijack.c#L275 +author: Nasreddine Bencherchali +date: 2022/07/05 +tags: + - attack.defense_evasion + - attack.privilege_escalation +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: '/sudo' + CommandLine|contains: 'execve_hijack' + condition: selection +falsepositives: + - Unlikely +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_triple_cross_rootkit_install.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_triple_cross_rootkit_install.yml new file mode 100644 index 00000000..d3d5d5d3 --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_triple_cross_rootkit_install.yml @@ -0,0 +1,27 @@ +title: Triple Cross eBPF Rootkit Install Commands +id: 22236d75-d5a0-4287-bf06-c93b1770860f +status: experimental +description: Detects default install commands of the Triple Cross eBPF rootkit based on the "deployer.sh" script +references: + - https://github.com/h3xduck/TripleCross/blob/1f1c3e0958af8ad9f6ebe10ab442e75de33e91de/apps/deployer.sh +author: Nasreddine Bencherchali +date: 2022/07/05 +tags: + - attack.defense_evasion + - attack.t1014 +logsource: + category: process_creation + product: linux +detection: + selection: + Image|endswith: '/sudo' + CommandLine|contains|all: + - ' tc ' + - ' enp0s3 ' + CommandLine|contains: + - ' qdisc ' + - ' filter ' + condition: selection +falsepositives: + - Unlikely +level: high diff --git a/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_webshell_detection.yml b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_webshell_detection.yml new file mode 100644 index 00000000..7d6452ab --- /dev/null +++ b/resources/policies/sigma/rules/linux/process_creation/proc_creation_lnx_webshell_detection.yml @@ -0,0 +1,53 @@ +title: Linux Webshell Indicators +id: 818f7b24-0fba-4c49-a073-8b755573b9c7 +status: experimental +description: Detects suspicious sub processes of web server processes +references: + - https://www.acunetix.com/blog/articles/web-shells-101-using-php-introduction-web-shells-part-2/ + - https://media.defense.gov/2020/Jun/09/2002313081/-1/-1/0/CSI-DETECT-AND-PREVENT-WEB-SHELL-MALWARE-20200422.PDF +author: Florian Roth, Nasreddine Bencherchali (update) +date: 2021/10/15 +modified: 2022/08/01 +tags: + - attack.persistence + - attack.t1505.003 +logsource: + product: linux + category: process_creation +detection: + selection_general: + ParentImage|contains: + - '/httpd' + - '/lighttpd' + - '/nginx' + - '/apache2' + - '/node' + - '/caddy' + selection_tomcat: + ParentCommandLine|contains|all: + - '/bin/java' + - 'tomcat' + selection_websphere: # ? just guessing + ParentCommandLine|contains|all: + - '/bin/java' + - 'websphere' + sub_processes: + Image|contains: + - '/whoami' + - '/ifconfig' + - '/usr/bin/ip' + - '/usr/sbin/ip' + - '/bin/uname' + - '/bin/cat' + - '/bin/sh' + - '/bin/bash' + - '/bin/crontab' + - '/hostname' + - '/iptables' + - '/netstat' + - '/pwd' + - '/route' + condition: 1 of selection_* and sub_processes +falsepositives: + - Web applications that invoke Linux command line tools +level: high diff --git a/resources/policies/tests/falco.yaml b/resources/policies/tests/falco.yaml new file mode 100644 index 00000000..043764ab --- /dev/null +++ b/resources/policies/tests/falco.yaml @@ -0,0 +1,3062 @@ +# +# For tests only. +# + +# See xxx for details on falco engine and rules versioning. Currently, +# this specific rules file is compatible with engine version 0 +# (e.g. falco releases <= 0.13.1), so we'll keep the +# required_engine_version lines commented out, so maintain +# compatibility with older falco releases. With the first incompatible +# change to this rules file, we'll uncomment this line and set it to +# the falco engine version in use at the time. +# +- required_engine_version: 7 + +# Currently disabled as read/write are ignored syscalls. The nearly +# similar open_write/open_read check for files being opened for +# reading/writing. +# - macro: write +# condition: (syscall.type=write and fd.type in (file, directory)) +# - macro: read +# condition: (syscall.type=read and evt.dir=> and fd.type in (file, directory)) + +- macro: open_write + condition: (evt.type=open or evt.type=openat) and evt.is_open_write=true and fd.typechar='f' and fd.num>=0 + +- macro: open_read + condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='f' and fd.num>=0 + +- macro: open_directory + condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='d' and fd.num>=0 + +- macro: never_true + condition: (evt.num=0) + +- macro: always_true + condition: (evt.num>=0) + +# In some cases, such as dropped system call events, information about +# the process name may be missing. For some rules that really depend +# on the identity of the process performing an action such as opening +# a file, etc., we require that the process name be known. +- macro: proc_name_exists + condition: (proc.name!="") + +- macro: rename + condition: evt.type in (rename, renameat, renameat2) + +- macro: mkdir + condition: evt.type in (mkdir, mkdirat) + +- macro: remove + condition: evt.type in (rmdir, unlink, unlinkat) + +- macro: modify + condition: rename or remove + +- macro: spawned_process + condition: evt.type = execve and evt.dir=< + +- macro: create_symlink + condition: evt.type in (symlink, symlinkat) and evt.dir=< + +- macro: chmod + condition: (evt.type in (chmod, fchmod, fchmodat) and evt.dir=<) + +# File categories +- macro: bin_dir + condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin) + +- macro: bin_dir_mkdir + condition: > + (evt.arg.path startswith /bin/ or + evt.arg.path startswith /sbin/ or + evt.arg.path startswith /usr/bin/ or + evt.arg.path startswith /usr/sbin/) + +- macro: bin_dir_rename + condition: > + (evt.arg.path startswith /bin/ or + evt.arg.path startswith /sbin/ or + evt.arg.path startswith /usr/bin/ or + evt.arg.path startswith /usr/sbin/ or + evt.arg.name startswith /bin/ or + evt.arg.name startswith /sbin/ or + evt.arg.name startswith /usr/bin/ or + evt.arg.name startswith /usr/sbin/ or + evt.arg.oldpath startswith /bin/ or + evt.arg.oldpath startswith /sbin/ or + evt.arg.oldpath startswith /usr/bin/ or + evt.arg.oldpath startswith /usr/sbin/ or + evt.arg.newpath startswith /bin/ or + evt.arg.newpath startswith /sbin/ or + evt.arg.newpath startswith /usr/bin/ or + evt.arg.newpath startswith /usr/sbin/) + +- macro: etc_dir + condition: fd.name startswith /etc/ + +# This detects writes immediately below / or any write anywhere below /root +- macro: root_dir + condition: (fd.directory=/ or fd.name startswith /root/) + +- list: shell_binaries + items: [ash, bash, csh, ksh, sh, tcsh, zsh, dash] + +- list: ssh_binaries + items: [ + sshd, sftp-server, ssh-agent, + ssh, scp, sftp, + ssh-keygen, ssh-keysign, ssh-keyscan, ssh-add + ] + +- list: shell_mgmt_binaries + items: [add-shell, remove-shell] + +- macro: shell_procs + condition: proc.name in (shell_binaries) + +- list: coreutils_binaries + items: [ + truncate, sha1sum, numfmt, fmt, fold, uniq, cut, who, + groups, csplit, sort, expand, printf, printenv, unlink, tee, chcon, stat, + basename, split, nice, "yes", whoami, sha224sum, hostid, users, stdbuf, + base64, unexpand, cksum, od, paste, nproc, pathchk, sha256sum, wc, test, + comm, arch, du, factor, sha512sum, md5sum, tr, runcon, env, dirname, + tsort, join, shuf, install, logname, pinky, nohup, expr, pr, tty, timeout, + tail, "[", seq, sha384sum, nl, head, id, mkfifo, sum, dircolors, ptx, shred, + tac, link, chroot, vdir, chown, touch, ls, dd, uname, "true", pwd, date, + chgrp, chmod, mktemp, cat, mknod, sync, ln, "false", rm, mv, cp, echo, + readlink, sleep, stty, mkdir, df, dir, rmdir, touch + ] + +# dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," +- list: login_binaries + items: [ + login, systemd, '"(systemd)"', systemd-logind, su, + nologin, faillog, lastlog, newgrp, sg + ] + +# dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," +- list: passwd_binaries + items: [ + shadowconfig, grpck, pwunconv, grpconv, pwck, + groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod, + groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh, + gpasswd, chfn, expiry, passwd, vigr, cpgr, adduser, addgroup, deluser, delgroup + ] + +# repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | +# awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," +- list: shadowutils_binaries + items: [ + chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, + groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, + newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd + ] + +- list: sysdigcloud_binaries + items: [setup-backend, dragent, sdchecks] + +- list: docker_binaries + items: [docker, dockerd, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current, dockerd-current] + +- list: k8s_binaries + items: [hyperkube, skydns, kube2sky, exechealthz, weave-net, loopback, bridge, openshift-sdn, openshift] + +- list: lxd_binaries + items: [lxd, lxcfs] + +- list: http_server_binaries + items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2] + +- list: db_server_binaries + items: [mysqld, postgres, sqlplus] + +- list: mysql_mgmt_binaries + items: [mysql_install_d, mysql_ssl_rsa_s] + +- list: postgres_mgmt_binaries + items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] + +- list: db_mgmt_binaries + items: [mysql_mgmt_binaries, postgres_mgmt_binaries] + +- list: nosql_server_binaries + items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] + +- list: gitlab_binaries + items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] + +- list: interpreted_binaries + items: [lua, node, perl, perl5, perl6, php, python, python2, python3, ruby, tcl] + +- macro: interpreted_procs + condition: > + (proc.name in (interpreted_binaries)) + +- macro: server_procs + condition: proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd) + +# The explicit quotes are needed to avoid the - characters being +# interpreted by the filter expression. +- list: rpm_binaries + items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, subscription-ma, + repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, + abrt-action-sav, rpmdb_stat, microdnf, rhn_check, yumdb] + +- list: openscap_rpm_binaries + items: [probe_rpminfo, probe_rpmverify, probe_rpmverifyfile, probe_rpmverifypackage] + +- macro: rpm_procs + condition: (proc.name in (rpm_binaries, openscap_rpm_binaries) or proc.name in (salt-minion)) + +- list: deb_binaries + items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, + frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, + apt-listchanges, unattended-upgr, apt-add-reposit, apt-config, apt-cache + ] + +# The truncated dpkg-preconfigu is intentional, process names are +# truncated at the sysdig level. +- list: package_mgmt_binaries + items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, pip3, sane-utils.post, alternatives, chef-client, apk, snapd] + +- macro: package_mgmt_procs + condition: proc.name in (package_mgmt_binaries) + +- macro: package_mgmt_ancestor_procs + condition: proc.pname in (package_mgmt_binaries) or + proc.aname[2] in (package_mgmt_binaries) or + proc.aname[3] in (package_mgmt_binaries) or + proc.aname[4] in (package_mgmt_binaries) + +- macro: coreos_write_ssh_dir + condition: (proc.name=update-ssh-keys and fd.name startswith /home/core/.ssh) + +- macro: run_by_package_mgmt_binaries + condition: proc.aname in (package_mgmt_binaries, needrestart) + +- list: ssl_mgmt_binaries + items: [ca-certificates] + +- list: dhcp_binaries + items: [dhclient, dhclient-script, 11-dhclient] + +# A canonical set of processes that run other programs with different +# privileges or as a different user. +- list: userexec_binaries + items: [sudo, su, suexec, critical-stack, dzdo] + +- list: known_setuid_binaries + items: [ + sshd, dbus-daemon-lau, ping, ping6, critical-stack-, pmmcli, + filemng, PassengerAgent, bwrap, osdetect, nginxmng, sw-engine-fpm, + start-stop-daem + ] + +- list: user_mgmt_binaries + items: [login_binaries, passwd_binaries, shadowutils_binaries] + +- list: dev_creation_binaries + items: [blkid, rename_device, update_engine, sgdisk] + +- list: hids_binaries + items: [aide, aide.wrapper, update-aide.con, logcheck, syslog-summary, osqueryd, ossec-syscheckd] + +- list: vpn_binaries + items: [openvpn] + +- list: nomachine_binaries + items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] + +- macro: system_procs + condition: proc.name in (coreutils_binaries, user_mgmt_binaries) + +- list: mail_binaries + items: [ + sendmail, sendmail-msp, postfix, procmail, exim4, + pickup, showq, mailq, dovecot, imap-login, imap, + mailmng-core, pop3-login, dovecot-lda, pop3 + ] + +- list: mail_config_binaries + items: [ + update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4, + update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config., + postfix.config, postfix-script, postconf + ] + +- list: sensitive_file_names + items: [/etc/shadow, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf] + +- list: sensitive_directory_names + items: [/, /etc, /etc/, /root, /root/] + +- macro: sensitive_files + condition: > + fd.name startswith /etc and + (fd.name in (sensitive_file_names) + or fd.directory in (/etc/sudoers.d, /etc/pam.d)) + +# Indicates that the process is new. Currently detected using time +# since process was started, using a threshold of 5 seconds. +- macro: proc_is_new + condition: proc.duration <= 5000000000 + +# Network +- macro: inbound + condition: > + (((evt.type in (accept,listen) and evt.dir=<) or + (evt.type in (recvfrom,recvmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + +# RFC1918 addresses were assigned for private network usage +- list: rfc_1918_addresses + items: ['"10.0.0.0/8"', '"172.16.0.0/12"', '"192.168.0.0/16"'] + +- macro: outbound + condition: > + (((evt.type = connect and evt.dir=<) or + (evt.type in (sendto,sendmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8" and not fd.snet in (rfc_1918_addresses)) and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + +# Very similar to inbound/outbound, but combines the tests together +# for efficiency. +- macro: inbound_outbound + condition: > + ((((evt.type in (accept,listen,connect) and evt.dir=<)) or + (fd.typechar = 4 or fd.typechar = 6)) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + +- macro: ssh_port + condition: fd.sport=22 + +# In a local/user rules file, you could override this macro to +# enumerate the servers for which ssh connections are allowed. For +# example, you might have a ssh gateway host for which ssh connections +# are allowed. +# +# In the main falco rules file, there isn't any way to know the +# specific hosts for which ssh access is allowed, so this macro just +# repeats ssh_port, which effectively allows ssh from all hosts. In +# the overridden macro, the condition would look something like +# "fd.sip="a.b.c.d" or fd.sip="e.f.g.h" or ..." +- macro: allowed_ssh_hosts + condition: ssh_port + +- rule: Disallowed SSH Connection + desc: Detect any new ssh connection to a host other than those in an allowed group of hosts + condition: (inbound_outbound) and ssh_port and not allowed_ssh_hosts + output: Disallowed SSH Connection (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_remote_service] + +# These rules and supporting macros are more of an example for how to +# use the fd.*ip and fd.*ip.name fields to match connection +# information against ips, netmasks, and complete domain names. +# +# To use this rule, you should modify consider_all_outbound_conns and +# populate allowed_{source,destination}_{ipaddrs,networks,domains} with the +# values that make sense for your environment. +- macro: consider_all_outbound_conns + condition: (never_true) + +# Note that this can be either individual IPs or netmasks +- list: allowed_outbound_destination_ipaddrs + items: ['"127.0.0.1"', '"8.8.8.8"'] + +- list: allowed_outbound_destination_networks + items: ['"127.0.0.1/8"'] + +- list: allowed_outbound_destination_domains + items: [google.com, www.yahoo.com] + +- rule: Unexpected outbound connection destination + desc: Detect any outbound connection to a destination outside of an allowed set of ips, networks, or domain names + condition: > + consider_all_outbound_conns and outbound and not + ((fd.sip in (allowed_outbound_destination_ipaddrs)) or + (fd.snet in (allowed_outbound_destination_networks)) or + (fd.sip.name in (allowed_outbound_destination_domains))) + output: Disallowed outbound connection destination (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network] + +- macro: consider_all_inbound_conns + condition: (never_true) + +- list: allowed_inbound_source_ipaddrs + items: ['"127.0.0.1"'] + +- list: allowed_inbound_source_networks + items: ['"127.0.0.1/8"', '"10.0.0.0/8"'] + +- list: allowed_inbound_source_domains + items: [google.com] + +- rule: Unexpected inbound connection source + desc: Detect any inbound connection from a source outside of an allowed set of ips, networks, or domain names + condition: > + consider_all_inbound_conns and inbound and not + ((fd.cip in (allowed_inbound_source_ipaddrs)) or + (fd.cnet in (allowed_inbound_source_networks)) or + (fd.cip.name in (allowed_inbound_source_domains))) + output: Disallowed inbound connection source (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network] + +- list: bash_config_filenames + items: [.bashrc, .bash_profile, .bash_history, .bash_login, .bash_logout, .inputrc, .profile] + +- list: bash_config_files + items: [/etc/profile, /etc/bashrc] + +# Covers both csh and tcsh +- list: csh_config_filenames + items: [.cshrc, .login, .logout, .history, .tcshrc, .cshdirs] + +- list: csh_config_files + items: [/etc/csh.cshrc, /etc/csh.login] + +- list: zsh_config_filenames + items: [.zshenv, .zprofile, .zshrc, .zlogin, .zlogout] + +- list: shell_config_filenames + items: [bash_config_filenames, csh_config_filenames, zsh_config_filenames] + +- list: shell_config_files + items: [bash_config_files, csh_config_files] + +- list: shell_config_directories + items: [/etc/zsh] + +- rule: Modify Shell Configuration File + desc: Detect attempt to modify shell configuration files + condition: > + open_write and + (fd.filename in (shell_config_filenames) or + fd.name in (shell_config_files) or + fd.directory in (shell_config_directories)) + and not proc.name in (shell_binaries) + and not exe_running_docker_save + output: > + a shell configuration file has been modified (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline pcmdline=%proc.pcmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [file, mitre_persistence] + +# This rule is not enabled by default, as there are many legitimate +# readers of shell config files. If you want to enable it, modify the +# following macro. + +- macro: consider_shell_config_reads + condition: (never_true) + +- rule: Read Shell Configuration File + desc: Detect attempts to read shell configuration files by non-shell programs + condition: > + open_read and + consider_shell_config_reads and + (fd.filename in (shell_config_filenames) or + fd.name in (shell_config_files) or + fd.directory in (shell_config_directories)) and + (not proc.name in (shell_binaries)) + output: > + a shell configuration file was read by a non-shell program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [file, mitre_discovery] + +- macro: consider_all_cron_jobs + condition: (never_true) + +- macro: user_known_cron_jobs + condition: (never_true) + +- rule: Schedule Cron Jobs + desc: Detect cron jobs scheduled + condition: > + ((open_write and fd.name startswith /etc/cron) or + (spawned_process and proc.name = "crontab")) and + consider_all_cron_jobs and + not user_known_cron_jobs + output: > + Cron jobs were scheduled to run (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + file=%fd.name container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tags: [file, mitre_persistence] + +# Use this to test whether the event occurred within a container. + +# When displaying container information in the output field, use +# %container.info, without any leading term (file=%fd.name +# %container.info user=%user.name user_loginuid=%user.loginuid, and not file=%fd.name +# container=%container.info user=%user.name user_loginuid=%user.loginuid). The output will change +# based on the context and whether or not -pk/-pm/-pc was specified on +# the command line. +- macro: container + condition: (container.id != host) + +- macro: container_started + condition: > + ((evt.type = container or + (spawned_process and proc.vpid=1)) and + container.image.repository != incomplete) + +- macro: interactive + condition: > + ((proc.aname=sshd and proc.name != sshd) or + proc.name=systemd-logind or proc.name=login) + +- list: cron_binaries + items: [anacron, cron, crond, crontab] + +# https://github.com/liske/needrestart +- list: needrestart_binaries + items: [needrestart, 10-dpkg, 20-rpm, 30-pacman] + +# Possible scripts run by sshkit +- list: sshkit_script_binaries + items: [10_etc_sudoers., 10_passwd_group] + +- list: plesk_binaries + items: [sw-engine, sw-engine-fpm, sw-engine-kv, filemng, f2bmng] + +# System users that should never log into a system. Consider adding your own +# service users (e.g. 'apache' or 'mysqld') here. +- macro: system_users + condition: user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data) + +# These macros will be removed soon. Only keeping them to maintain +# compatiblity with some widely used rules files. +# Begin Deprecated +- macro: parent_ansible_running_python + condition: (proc.pname in (python, pypy, python3) and proc.pcmdline contains ansible) + +- macro: parent_bro_running_python + condition: (proc.pname=python and proc.cmdline contains /usr/share/broctl) + +- macro: parent_python_running_denyhosts + condition: > + (proc.cmdline startswith "denyhosts.py /usr/bin/denyhosts.py" or + (proc.pname=python and + (proc.pcmdline contains /usr/sbin/denyhosts or + proc.pcmdline contains /usr/local/bin/denyhosts.py))) + +- macro: parent_python_running_sdchecks + condition: > + (proc.pname in (python, python2.7) and + (proc.pcmdline contains /opt/draios/bin/sdchecks)) + +- macro: python_running_sdchecks + condition: > + (proc.name in (python, python2.7) and + (proc.cmdline contains /opt/draios/bin/sdchecks)) + +- macro: parent_linux_image_upgrade_script + condition: proc.pname startswith linux-image- + +- macro: parent_java_running_echo + condition: (proc.pname=java and proc.cmdline startswith "sh -c echo") + +- macro: parent_scripting_running_builds + condition: > + (proc.pname in (php,php5-fpm,php-fpm7.1,python,ruby,ruby2.3,ruby2.1,node,conda) and ( + proc.cmdline startswith "sh -c git" or + proc.cmdline startswith "sh -c date" or + proc.cmdline startswith "sh -c /usr/bin/g++" or + proc.cmdline startswith "sh -c /usr/bin/gcc" or + proc.cmdline startswith "sh -c gcc" or + proc.cmdline startswith "sh -c if type gcc" or + proc.cmdline startswith "sh -c cd '/var/www/edi/';LC_ALL=en_US.UTF-8 git" or + proc.cmdline startswith "sh -c /var/www/edi/bin/sftp.sh" or + proc.cmdline startswith "sh -c /usr/src/app/crxlsx/bin/linux/crxlsx" or + proc.cmdline startswith "sh -c make parent" or + proc.cmdline startswith "node /jenkins/tools" or + proc.cmdline startswith "sh -c '/usr/bin/node'" or + proc.cmdline startswith "sh -c stty -a |" or + proc.pcmdline startswith "node /opt/nodejs/bin/yarn" or + proc.pcmdline startswith "node /usr/local/bin/yarn" or + proc.pcmdline startswith "node /root/.config/yarn" or + proc.pcmdline startswith "node /opt/yarn/bin/yarn.js")) + + +- macro: httpd_writing_ssl_conf + condition: > + (proc.pname=run-httpd and + (proc.cmdline startswith "sed -ri" or proc.cmdline startswith "sed -i") and + (fd.name startswith /etc/httpd/conf.d/ or fd.name startswith /etc/httpd/conf)) + +- macro: userhelper_writing_etc_security + condition: (proc.name=userhelper and fd.name startswith /etc/security) + +- macro: parent_Xvfb_running_xkbcomp + condition: (proc.pname=Xvfb and proc.cmdline startswith 'sh -c "/usr/bin/xkbcomp"') + +- macro: parent_nginx_running_serf + condition: (proc.pname=nginx and proc.cmdline startswith "sh -c serf") + +- macro: parent_node_running_npm + condition: (proc.pcmdline startswith "node /usr/local/bin/npm" or + proc.pcmdline startswith "node /usr/local/nodejs/bin/npm" or + proc.pcmdline startswith "node /opt/rh/rh-nodejs6/root/usr/bin/npm") + +- macro: parent_java_running_sbt + condition: (proc.pname=java and proc.pcmdline contains sbt-launch.jar) + +- list: known_container_shell_spawn_cmdlines + items: [] + +- list: known_shell_spawn_binaries + items: [] + +## End Deprecated + +- macro: ansible_running_python + condition: (proc.name in (python, pypy, python3) and proc.cmdline contains ansible) + +- macro: python_running_chef + condition: (proc.name=python and (proc.cmdline contains yum-dump.py or proc.cmdline="python /usr/bin/chef-monitor.py")) + +- macro: python_running_denyhosts + condition: > + (proc.name=python and + (proc.cmdline contains /usr/sbin/denyhosts or + proc.cmdline contains /usr/local/bin/denyhosts.py)) + +# Qualys seems to run a variety of shell subprocesses, at various +# levels. This checks at a few levels without the cost of a full +# proc.aname, which traverses the full parent heirarchy. +- macro: run_by_qualys + condition: > + (proc.pname=qualys-cloud-ag or + proc.aname[2]=qualys-cloud-ag or + proc.aname[3]=qualys-cloud-ag or + proc.aname[4]=qualys-cloud-ag) + +- macro: run_by_sumologic_securefiles + condition: > + ((proc.cmdline="usermod -a -G sumologic_collector" or + proc.cmdline="groupadd sumologic_collector") and + (proc.pname=secureFiles.sh and proc.aname[2]=java)) + +- macro: run_by_yum + condition: ((proc.pname=sh and proc.aname[2]=yum) or + (proc.aname[2]=sh and proc.aname[3]=yum)) + +- macro: run_by_ms_oms + condition: > + (proc.aname[3] startswith omsagent- or + proc.aname[3] startswith scx-) + +- macro: run_by_google_accounts_daemon + condition: > + (proc.aname[1] startswith google_accounts or + proc.aname[2] startswith google_accounts or + proc.aname[3] startswith google_accounts) + +# Chef is similar. +- macro: run_by_chef + condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or + proc.aname[2]=chef-client or proc.aname[3]=chef-client or + proc.name=chef-client) + +- macro: run_by_adclient + condition: (proc.aname[2]=adclient or proc.aname[3]=adclient or proc.aname[4]=adclient) + +- macro: run_by_centrify + condition: (proc.aname[2]=centrify or proc.aname[3]=centrify or proc.aname[4]=centrify) + +- macro: run_by_puppet + condition: (proc.aname[2]=puppet or proc.aname[3]=puppet) + +# Also handles running semi-indirectly via scl +- macro: run_by_foreman + condition: > + (user.name=foreman and + ((proc.pname in (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or + (proc.pname=scl and proc.aname[2] in (tfm-rake,tfm-ruby)))) + +- macro: java_running_sdjagent + condition: proc.name=java and proc.cmdline contains sdjagent.jar + +- macro: kubelet_running_loopback + condition: (proc.pname=kubelet and proc.name=loopback) + +- macro: python_mesos_marathon_scripting + condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") + +- macro: splunk_running_forwarder + condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") + +- macro: parent_supervise_running_multilog + condition: (proc.name=multilog and proc.pname=supervise) + +- macro: supervise_writing_status + condition: (proc.name in (supervise,svc) and fd.name startswith "/etc/sb/") + +- macro: pki_realm_writing_realms + condition: (proc.cmdline startswith "bash /usr/local/lib/pki/pki-realm" and fd.name startswith /etc/pki/realms) + +- macro: htpasswd_writing_passwd + condition: (proc.name=htpasswd and fd.name=/etc/nginx/.htpasswd) + +- macro: lvprogs_writing_conf + condition: > + (proc.name in (dmeventd,lvcreate,pvscan,lvs) and + (fd.name startswith /etc/lvm/archive or + fd.name startswith /etc/lvm/backup or + fd.name startswith /etc/lvm/cache)) + +- macro: ovsdb_writing_openvswitch + condition: (proc.name=ovsdb-server and fd.directory=/etc/openvswitch) + +- macro: perl_running_plesk + condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or + proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") + +- macro: perl_running_updmap + condition: (proc.cmdline startswith "perl /usr/bin/updmap") + +- macro: perl_running_centrifydc + condition: (proc.cmdline startswith "perl /usr/share/centrifydc") + +- macro: runuser_reading_pam + condition: (proc.name=runuser and fd.directory=/etc/pam.d) + +# CIS Linux Benchmark program +- macro: linux_bench_reading_etc_shadow + condition: ((proc.aname[2]=linux-bench and + proc.name in (awk,cut,grep)) and + (fd.name=/etc/shadow or + fd.directory=/etc/pam.d)) + +- macro: parent_ucf_writing_conf + condition: (proc.pname=ucf and proc.aname[2]=frontend) + +- macro: consul_template_writing_conf + condition: > + ((proc.name=consul-template and fd.name startswith /etc/haproxy) or + (proc.name=reload.sh and proc.aname[2]=consul-template and fd.name startswith /etc/ssl)) + +- macro: countly_writing_nginx_conf + condition: (proc.cmdline startswith "nodejs /opt/countly/bin" and fd.name startswith /etc/nginx) + +- list: ms_oms_binaries + items: [omi.postinst, omsconfig.posti, scx.postinst, omsadmin.sh, omiagent] + +- macro: ms_oms_writing_conf + condition: > + ((proc.name in (omiagent,omsagent,in_heartbeat_r*,omsadmin.sh,PerformInventor,dsc_host) + or proc.pname in (ms_oms_binaries) + or proc.aname[2] in (ms_oms_binaries)) + and (fd.name startswith /etc/opt/omi or fd.name startswith /etc/opt/microsoft/omsagent)) + +- macro: ms_scx_writing_conf + condition: (proc.name in (GetLinuxOS.sh) and fd.name startswith /etc/opt/microsoft/scx) + +- macro: azure_scripts_writing_conf + condition: (proc.pname startswith "bash /var/lib/waagent/" and fd.name startswith /etc/azure) + +- macro: azure_networkwatcher_writing_conf + condition: (proc.name in (NetworkWatcherA) and fd.name=/etc/init.d/AzureNetworkWatcherAgent) + +- macro: couchdb_writing_conf + condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith /etc/couchdb) + +- macro: update_texmf_writing_conf + condition: (proc.name=update-texmf and fd.name startswith /etc/texmf) + +- macro: slapadd_writing_conf + condition: (proc.name=slapadd and fd.name startswith /etc/ldap) + +- macro: openldap_writing_conf + condition: (proc.pname=run-openldap.sh and fd.name startswith /etc/openldap) + +- macro: ucpagent_writing_conf + condition: (proc.name=apiserver and container.image.repository=docker/ucp-agent and fd.name=/etc/authorization_config.cfg) + +- macro: iscsi_writing_conf + condition: (proc.name=iscsiadm and fd.name startswith /etc/iscsi) + +- macro: istio_writing_conf + condition: (proc.name=pilot-agent and fd.name startswith /etc/istio) + +- macro: symantec_writing_conf + condition: > + ((proc.name=symcfgd and fd.name startswith /etc/symantec) or + (proc.name=navdefutil and fd.name=/etc/symc-defutils.conf)) + +- macro: liveupdate_writing_conf + condition: (proc.cmdline startswith "java LiveUpdate" and fd.name in (/etc/liveupdate.conf, /etc/Product.Catalog.JavaLiveUpdate)) + +- macro: rancher_agent + condition: (proc.name=agent and container.image.repository contains "rancher/agent") + +- macro: rancher_network_manager + condition: (proc.name=rancher-bridge and container.image.repository contains "rancher/network-manager") + +- macro: sosreport_writing_files + condition: > + (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + +- macro: pkgmgmt_progs_writing_pki + condition: > + (proc.name=urlgrabber-ext- and proc.pname in (yum, yum-cron, repoquery) and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + +- macro: update_ca_trust_writing_pki + condition: (proc.pname=update-ca-trust and proc.name=trust and fd.name startswith /etc/pki) + +- macro: brandbot_writing_os_release + condition: proc.name=brandbot and fd.name=/etc/os-release + +- macro: selinux_writing_conf + condition: (proc.name in (semodule,genhomedircon,sefcontext_comp) and fd.name startswith /etc/selinux) + +- list: veritas_binaries + items: [vxconfigd, sfcache, vxclustadm, vxdctl, vxprint, vxdmpadm, vxdisk, vxdg, vxassist, vxtune] + +- macro: veritas_driver_script + condition: (proc.cmdline startswith "perl /opt/VRTSsfmh/bin/mh_driver.pl") + +- macro: veritas_progs + condition: (proc.name in (veritas_binaries) or veritas_driver_script) + +- macro: veritas_writing_config + condition: (veritas_progs and (fd.name startswith /etc/vx or fd.name startswith /etc/opt/VRTS or fd.name startswith /etc/vom)) + +- macro: nginx_writing_conf + condition: (proc.name in (nginx,nginx-ingress-c,nginx-ingress) and (fd.name startswith /etc/nginx or fd.name startswith /etc/ingress-controller)) + +- macro: nginx_writing_certs + condition: > + (((proc.name=openssl and proc.pname=nginx-launch.sh) or proc.name=nginx-launch.sh) and fd.name startswith /etc/nginx/certs) + +- macro: chef_client_writing_conf + condition: (proc.pcmdline startswith "chef-client /opt/gitlab" and fd.name startswith /etc/gitlab) + +- macro: centrify_writing_krb + condition: (proc.name in (adjoin,addns) and fd.name startswith /etc/krb5) + +- macro: cockpit_writing_conf + condition: > + ((proc.pname=cockpit-kube-la or proc.aname[2]=cockpit-kube-la) + and fd.name startswith /etc/cockpit) + +- macro: ipsec_writing_conf + condition: (proc.name=start-ipsec.sh and fd.directory=/etc/ipsec) + +- macro: exe_running_docker_save + condition: > + proc.name = "exe" + and (proc.cmdline contains "/var/lib/docker" + or proc.cmdline contains "/var/run/docker") + and proc.pname in (dockerd, docker, dockerd-current, docker-current) + +# Ideally we'd have a length check here as well but sysdig +# filterchecks don't have operators like len() +- macro: sed_temporary_file + condition: (proc.name=sed and fd.name startswith "/etc/sed") + +- macro: python_running_get_pip + condition: (proc.cmdline startswith "python get-pip.py") + +- macro: python_running_ms_oms + condition: (proc.cmdline startswith "python /var/lib/waagent/") + +- macro: gugent_writing_guestagent_log + condition: (proc.name=gugent and fd.name=GuestAgent.log) + +- macro: dse_writing_tmp + condition: (proc.name=dse-entrypoint and fd.name=/root/tmp__) + +- macro: zap_writing_state + condition: (proc.name=java and proc.cmdline contains "jar /zap" and fd.name startswith /root/.ZAP) + +- macro: airflow_writing_state + condition: (proc.name=airflow and fd.name startswith /root/airflow) + +- macro: rpm_writing_root_rpmdb + condition: (proc.name=rpm and fd.directory=/root/.rpmdb) + +- macro: maven_writing_groovy + condition: (proc.name=java and proc.cmdline contains "classpath /usr/local/apache-maven" and fd.name startswith /root/.groovy) + +- macro: chef_writing_conf + condition: (proc.name=chef-client and fd.name startswith /root/.chef) + +- macro: kubectl_writing_state + condition: (proc.name in (kubectl,oc) and fd.name startswith /root/.kube) + +- macro: java_running_cassandra + condition: (proc.name=java and proc.cmdline contains "cassandra.jar") + +- macro: cassandra_writing_state + condition: (java_running_cassandra and fd.directory=/root/.cassandra) + +# Istio +- macro: galley_writing_state + condition: (proc.name=galley and fd.name in (known_istio_files)) + +- list: known_istio_files + items: [/healthready, /healthliveness] + +- macro: calico_writing_state + condition: (proc.name=kube-controller and fd.name startswith /status.json and k8s.pod.name startswith calico) + +- macro: calico_writing_envvars + condition: (proc.name=start_runit and fd.name startswith "/etc/envvars" and container.image.repository endswith "calico/node") + +- list: repository_files + items: [sources.list] + +- list: repository_directories + items: [/etc/apt/sources.list.d, /etc/yum.repos.d, /etc/apt] + +- macro: access_repositories + condition: (fd.directory in (repository_directories) or + (fd.name pmatch (repository_directories) and + fd.filename in (repository_files))) + +- macro: modify_repositories + condition: (evt.arg.newpath pmatch (repository_directories)) + +- macro: user_known_update_package_registry + condition: (never_true) + +- rule: Update Package Repository + desc: Detect package repositories get updated + condition: > + ((open_write and access_repositories) or (modify and modify_repositories)) + and not package_mgmt_procs + and not package_mgmt_ancestor_procs + and not exe_running_docker_save + and not user_known_update_package_registry + output: > + Repository files get updated (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline pcmdline=%proc.pcmdline file=%fd.name newpath=%evt.arg.newpath container_id=%container.id image=%container.image.repository) + priority: + NOTICE + tags: [filesystem, mitre_persistence] + +# Users should overwrite this macro to specify conditions under which a +# write under the binary dir is ignored. For example, it may be okay to +# install a binary in the context of a ci/cd build. +- macro: user_known_write_below_binary_dir_activities + condition: (never_true) + +- rule: Write below binary dir + desc: an attempt to write to any file below a set of binary directories + condition: > + bin_dir and evt.dir = < and open_write + and not package_mgmt_procs + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms + and not user_known_write_below_binary_dir_activities + output: > + File below a known binary directory opened for writing (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + +# If you'd like to generally monitor a wider set of directories on top +# of the ones covered by the rule Write below binary dir, you can use +# the following rule and lists. + +- list: monitored_directories + items: [/boot, /lib, /lib64, /usr/lib, /usr/local/lib, /usr/local/sbin, /usr/local/bin, /root/.ssh, /etc/cardserver] + +# Until https://github.com/draios/sysdig/pull/1153, which fixes +# https://github.com/draios/sysdig/issues/1152, is widely available, +# we can't use glob operators to match pathnames. Until then, we do a +# looser check to match ssh directories. +# When fixed, we will use "fd.name glob '/home/*/.ssh/*'" +- macro: user_ssh_directory + condition: (fd.name startswith '/home' and fd.name contains '.ssh') + +# google_accounts_(daemon) +- macro: google_accounts_daemon_writing_ssh + condition: (proc.name=google_accounts and user_ssh_directory) + +- macro: cloud_init_writing_ssh + condition: (proc.name=cloud-init and user_ssh_directory) + +- macro: mkinitramfs_writing_boot + condition: (proc.pname in (mkinitramfs, update-initramf) and fd.directory=/boot) + +- macro: monitored_dir + condition: > + (fd.directory in (monitored_directories) + or user_ssh_directory) + and not mkinitramfs_writing_boot + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to allow for specific combinations of +# programs writing below monitored directories. +# +# Its default value is an expression that always is false, which +# becomes true when the "not ..." in the rule is applied. +- macro: user_known_write_monitored_dir_conditions + condition: (never_true) + +- rule: Write below monitored dir + desc: an attempt to write to any file below a set of binary directories + condition: > + evt.dir = < and open_write and monitored_dir + and not package_mgmt_procs + and not coreos_write_ssh_dir + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms + and not google_accounts_daemon_writing_ssh + and not cloud_init_writing_ssh + and not user_known_write_monitored_dir_conditions + output: > + File below a monitored directory opened for writing (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + +# This rule is disabled by default as many system management tools +# like ansible, etc can read these files/paths. Enable it using this macro. + +- macro: consider_ssh_reads + condition: (never_true) + +- macro: user_known_read_ssh_information_activities + condition: (never_true) + +- rule: Read ssh information + desc: Any attempt to read files below ssh directories by non-ssh programs + condition: > + ((open_read or open_directory) and + consider_ssh_reads and + (user_ssh_directory or fd.name startswith /root/.ssh) and + not user_known_read_ssh_information_activities and + not proc.name in (ssh_binaries)) + output: > + ssh-related file/directory read by non-ssh program (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_discovery] + +- list: safe_etc_dirs + items: [/etc/cassandra, /etc/ssl/certs/java, /etc/logstash, /etc/nginx/conf.d, /etc/container_environment, /etc/hrmconfig, /etc/fluent/configs.d] + +- macro: fluentd_writing_conf_files + condition: (proc.name=start-fluentd and fd.name in (/etc/fluent/fluent.conf, /etc/td-agent/td-agent.conf)) + +- macro: qualys_writing_conf_files + condition: (proc.name=qualys-cloud-ag and fd.name=/etc/qualys/cloud-agent/qagent-log.conf) + +- macro: git_writing_nssdb + condition: (proc.name=git-remote-http and fd.directory=/etc/pki/nssdb) + +- macro: plesk_writing_keys + condition: (proc.name in (plesk_binaries) and fd.name startswith /etc/sw/keys) + +- macro: plesk_install_writing_apache_conf + condition: (proc.cmdline startswith "bash -hB /usr/lib/plesk-9.0/services/webserver.apache configure" + and fd.name="/etc/apache2/apache2.conf.tmp") + +- macro: plesk_running_mktemp + condition: (proc.name=mktemp and proc.aname[3] in (plesk_binaries)) + +- macro: networkmanager_writing_resolv_conf + condition: proc.aname[2]=nm-dispatcher and fd.name=/etc/resolv.conf + +- macro: add_shell_writing_shells_tmp + condition: (proc.name=add-shell and fd.name=/etc/shells.tmp) + +- macro: duply_writing_exclude_files + condition: (proc.name=touch and proc.pcmdline startswith "bash /usr/bin/duply" and fd.name startswith "/etc/duply") + +- macro: xmlcatalog_writing_files + condition: (proc.name=update-xmlcatal and fd.directory=/etc/xml) + +- macro: datadog_writing_conf + condition: ((proc.cmdline startswith "python /opt/datadog-agent" or + proc.cmdline startswith "entrypoint.sh /entrypoint.sh datadog start" or + proc.cmdline startswith "agent.py /opt/datadog-agent") + and fd.name startswith "/etc/dd-agent") + +- macro: rancher_writing_conf + condition: ((proc.name in (healthcheck, lb-controller, rancher-dns)) and + (container.image.repository contains "rancher/healthcheck" or + container.image.repository contains "rancher/lb-service-haproxy" or + container.image.repository contains "rancher/dns") and + (fd.name startswith "/etc/haproxy" or fd.name startswith "/etc/rancher-dns")) + +- macro: rancher_writing_root + condition: (proc.name=rancher-metadat and + (container.image.repository contains "rancher/metadata" or container.image.repository contains "rancher/lb-service-haproxy") and + fd.name startswith "/answers.json") + +- macro: checkpoint_writing_state + condition: (proc.name=checkpoint and + container.image.repository contains "coreos/pod-checkpointer" and + fd.name startswith "/etc/kubernetes") + +- macro: jboss_in_container_writing_passwd + condition: > + ((proc.cmdline="run-java.sh /opt/jboss/container/java/run/run-java.sh" + or proc.cmdline="run-java.sh /opt/run-java/run-java.sh") + and container + and fd.name=/etc/passwd) + +- macro: curl_writing_pki_db + condition: (proc.name=curl and fd.directory=/etc/pki/nssdb) + +- macro: haproxy_writing_conf + condition: ((proc.name in (update-haproxy-,haproxy_reload.) or proc.pname in (update-haproxy-,haproxy_reload,haproxy_reload.)) + and (fd.name=/etc/openvpn/client.map or fd.name startswith /etc/haproxy)) + +- macro: java_writing_conf + condition: (proc.name=java and fd.name=/etc/.java/.systemPrefs/.system.lock) + +- macro: rabbitmq_writing_conf + condition: (proc.name=rabbitmq-server and fd.directory=/etc/rabbitmq) + +- macro: rook_writing_conf + condition: (proc.name=toolbox.sh and container.image.repository=rook/toolbox + and fd.directory=/etc/ceph) + +- macro: httpd_writing_conf_logs + condition: (proc.name=httpd and fd.name startswith /etc/httpd/) + +- macro: mysql_writing_conf + condition: > + ((proc.name in (start-mysql.sh, run-mysqld) or proc.pname=start-mysql.sh) and + (fd.name startswith /etc/mysql or fd.directory=/etc/my.cnf.d)) + +- macro: redis_writing_conf + condition: > + (proc.name in (run-redis, redis-launcher.) and (fd.name=/etc/redis.conf or fd.name startswith /etc/redis)) + +- macro: openvpn_writing_conf + condition: (proc.name in (openvpn,openvpn-entrypo) and fd.name startswith /etc/openvpn) + +- macro: php_handlers_writing_conf + condition: (proc.name=php_handlers_co and fd.name=/etc/psa/php_versions.json) + +- macro: sed_writing_temp_file + condition: > + ((proc.aname[3]=cron_start.sh and fd.name startswith /etc/security/sed) or + (proc.name=sed and (fd.name startswith /etc/apt/sources.list.d/sed or + fd.name startswith /etc/apt/sed or + fd.name startswith /etc/apt/apt.conf.d/sed))) + +- macro: cron_start_writing_pam_env + condition: (proc.cmdline="bash /usr/sbin/start-cron" and fd.name=/etc/security/pam_env.conf) + +# In some cases dpkg-reconfigur runs commands that modify /etc. Not +# putting the full set of package management programs yet. +- macro: dpkg_scripting + condition: (proc.aname[2] in (dpkg-reconfigur, dpkg-preconfigu)) + +- macro: ufw_writing_conf + condition: (proc.name=ufw and fd.directory=/etc/ufw) + +- macro: calico_writing_conf + condition: > + (((proc.name = calico-node) or + (container.image.repository=gcr.io/projectcalico-org/node and proc.name in (start_runit, cp)) or + (container.image.repository=gcr.io/projectcalico-org/cni and proc.name=sed)) + and fd.name startswith /etc/calico) + +- macro: prometheus_conf_writing_conf + condition: (proc.name=prometheus-conf and fd.name startswith /etc/prometheus/config_out) + +- macro: openshift_writing_conf + condition: (proc.name=oc and fd.name startswith /etc/origin/node) + +- macro: keepalived_writing_conf + condition: (proc.name=keepalived and fd.name=/etc/keepalived/keepalived.conf) + +- macro: etcd_manager_updating_dns + condition: (container and proc.name=etcd-manager and fd.name=/etc/hosts) + +- macro: automount_using_mtab + condition: (proc.pname = automount and fd.name startswith /etc/mtab) + +- macro: mcafee_writing_cma_d + condition: (proc.name=macompatsvc and fd.directory=/etc/cma.d) + +- macro: avinetworks_supervisor_writing_ssh + condition: > + (proc.cmdline="se_supervisor.p /opt/avi/scripts/se_supervisor.py -d" and + (fd.name startswith /etc/ssh/known_host_ or + fd.name startswith /etc/ssh/ssh_monitor_config_ or + fd.name startswith /etc/ssh/ssh_config_)) + +- macro: multipath_writing_conf + condition: (proc.name = multipath and fd.name startswith /etc/multipath/) + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to allow for specific combinations of +# programs writing below specific directories below +# /etc. fluentd_writing_conf_files is a good example to follow, as it +# specifies both the program doing the writing as well as the specific +# files it is allowed to modify. +# +# In this file, it just takes one of the programs in the base macro +# and repeats it. + +- macro: user_known_write_etc_conditions + condition: proc.name=confd + +# This is a placeholder for user to extend the whitelist for write below etc rule +- macro: user_known_write_below_etc_activities + condition: (never_true) + +- macro: write_etc_common + condition: > + etc_dir and evt.dir = < and open_write + and proc_name_exists + and not proc.name in (passwd_binaries, shadowutils_binaries, sysdigcloud_binaries, + package_mgmt_binaries, ssl_mgmt_binaries, dhcp_binaries, + dev_creation_binaries, shell_mgmt_binaries, + mail_config_binaries, + sshkit_script_binaries, + ldconfig.real, ldconfig, confd, gpg, insserv, + apparmor_parser, update-mime, tzdata.config, tzdata.postinst, + systemd, systemd-machine, systemd-sysuser, + debconf-show, rollerd, bind9.postinst, sv, + gen_resolvconf., update-ca-certi, certbot, runsv, + qualys-cloud-ag, locales.postins, nomachine_binaries, + adclient, certutil, crlutil, pam-auth-update, parallels_insta, + openshift-launc, update-rc.d, puppet) + and not (container and proc.cmdline in ("cp /run/secrets/kubernetes.io/serviceaccount/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt")) + and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, sshkit_script_binaries, locales.postins, deb_binaries, dhcp_binaries) + and not fd.name pmatch (safe_etc_dirs) + and not fd.name in (/etc/container_environment.sh, /etc/container_environment.json, /etc/motd, /etc/motd.svc) + and not sed_temporary_file + and not exe_running_docker_save + and not ansible_running_python + and not python_running_denyhosts + and not fluentd_writing_conf_files + and not user_known_write_etc_conditions + and not run_by_centrify + and not run_by_adclient + and not qualys_writing_conf_files + and not git_writing_nssdb + and not plesk_writing_keys + and not plesk_install_writing_apache_conf + and not plesk_running_mktemp + and not networkmanager_writing_resolv_conf + and not run_by_chef + and not add_shell_writing_shells_tmp + and not duply_writing_exclude_files + and not xmlcatalog_writing_files + and not parent_supervise_running_multilog + and not supervise_writing_status + and not pki_realm_writing_realms + and not htpasswd_writing_passwd + and not lvprogs_writing_conf + and not ovsdb_writing_openvswitch + and not datadog_writing_conf + and not curl_writing_pki_db + and not haproxy_writing_conf + and not java_writing_conf + and not dpkg_scripting + and not parent_ucf_writing_conf + and not rabbitmq_writing_conf + and not rook_writing_conf + and not php_handlers_writing_conf + and not sed_writing_temp_file + and not cron_start_writing_pam_env + and not httpd_writing_conf_logs + and not mysql_writing_conf + and not openvpn_writing_conf + and not consul_template_writing_conf + and not countly_writing_nginx_conf + and not ms_oms_writing_conf + and not ms_scx_writing_conf + and not azure_scripts_writing_conf + and not azure_networkwatcher_writing_conf + and not couchdb_writing_conf + and not update_texmf_writing_conf + and not slapadd_writing_conf + and not symantec_writing_conf + and not liveupdate_writing_conf + and not sosreport_writing_files + and not selinux_writing_conf + and not veritas_writing_config + and not nginx_writing_conf + and not nginx_writing_certs + and not chef_client_writing_conf + and not centrify_writing_krb + and not cockpit_writing_conf + and not ipsec_writing_conf + and not httpd_writing_ssl_conf + and not userhelper_writing_etc_security + and not pkgmgmt_progs_writing_pki + and not update_ca_trust_writing_pki + and not brandbot_writing_os_release + and not redis_writing_conf + and not openldap_writing_conf + and not ucpagent_writing_conf + and not iscsi_writing_conf + and not istio_writing_conf + and not ufw_writing_conf + and not calico_writing_conf + and not calico_writing_envvars + and not prometheus_conf_writing_conf + and not openshift_writing_conf + and not keepalived_writing_conf + and not rancher_writing_conf + and not checkpoint_writing_state + and not jboss_in_container_writing_passwd + and not etcd_manager_updating_dns + and not user_known_write_below_etc_activities + and not automount_using_mtab + and not mcafee_writing_cma_d + and not avinetworks_supervisor_writing_ssh + and not multipath_writing_conf + +- rule: Write below etc + desc: an attempt to write to any file below /etc + condition: write_etc_common + output: "File below /etc opened for writing (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline file=%fd.name program=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + +- list: known_root_files + items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, /root/.aws/credentials, + /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, /root/.localstack, + /root/.node_repl_history, /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, /root/.rnd, /root/.wget-hsts, /health, /exec.fifo] + +- list: known_root_directories + items: [/root/.oracle_jre_usage, /root/.ssh, /root/.subversion, /root/.nami] + +- macro: known_root_conditions + condition: (fd.name startswith /root/orcexec. + or fd.name startswith /root/.m2 + or fd.name startswith /root/.npm + or fd.name startswith /root/.pki + or fd.name startswith /root/.ivy2 + or fd.name startswith /root/.config/Cypress + or fd.name startswith /root/.config/pulse + or fd.name startswith /root/.config/configstore + or fd.name startswith /root/jenkins/workspace + or fd.name startswith /root/.jenkins + or fd.name startswith /root/.cache + or fd.name startswith /root/.sbt + or fd.name startswith /root/.java + or fd.name startswith /root/.glide + or fd.name startswith /root/.sonar + or fd.name startswith /root/.v8flag + or fd.name startswith /root/infaagent + or fd.name startswith /root/.local/lib/python + or fd.name startswith /root/.pm2 + or fd.name startswith /root/.gnupg + or fd.name startswith /root/.pgpass + or fd.name startswith /root/.theano + or fd.name startswith /root/.gradle + or fd.name startswith /root/.android + or fd.name startswith /root/.ansible + or fd.name startswith /root/.crashlytics + or fd.name startswith /root/.dbus + or fd.name startswith /root/.composer + or fd.name startswith /root/.gconf + or fd.name startswith /root/.nv + or fd.name startswith /root/.local/share/jupyter + or fd.name startswith /root/oradiag_root + or fd.name startswith /root/workspace + or fd.name startswith /root/jvm + or fd.name startswith /root/.node-gyp) + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to allow for specific combinations of +# programs writing below specific directories below +# / or /root. +# +# In this file, it just takes one of the condition in the base macro +# and repeats it. +- macro: user_known_write_root_conditions + condition: fd.name=/root/.bash_history + +# This is a placeholder for user to extend the whitelist for write below root rule +- macro: user_known_write_below_root_activities + condition: (never_true) + +- macro: runc_writing_exec_fifo + condition: (proc.cmdline="runc:[1:CHILD] init" and fd.name=/exec.fifo) + +- macro: runc_writing_var_lib_docker + condition: (proc.cmdline="runc:[1:CHILD] init" and evt.arg.filename startswith /var/lib/docker) + +- macro: mysqlsh_writing_state + condition: (proc.name=mysqlsh and fd.directory=/root/.mysqlsh) + +- rule: Write below root + desc: an attempt to write to any file directly below / or /root + condition: > + root_dir and evt.dir = < and open_write + and proc_name_exists + and not fd.name in (known_root_files) + and not fd.directory pmatch (known_root_directories) + and not exe_running_docker_save + and not gugent_writing_guestagent_log + and not dse_writing_tmp + and not zap_writing_state + and not airflow_writing_state + and not rpm_writing_root_rpmdb + and not maven_writing_groovy + and not chef_writing_conf + and not kubectl_writing_state + and not cassandra_writing_state + and not galley_writing_state + and not calico_writing_state + and not rancher_writing_root + and not runc_writing_exec_fifo + and not mysqlsh_writing_state + and not known_root_conditions + and not user_known_write_root_conditions + and not user_known_write_below_root_activities + output: "File below / or /root opened for writing (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname file=%fd.name program=%proc.name container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + +- macro: cmp_cp_by_passwd + condition: proc.name in (cmp, cp) and proc.pname in (passwd, run-parts) + +- macro: user_known_read_sensitive_files_activities + condition: (never_true) + +- rule: Read sensitive file trusted after startup + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information) by a trusted program after startup. Trusted programs might read these files + at startup to load initial state, but not afterwards. + condition: sensitive_files and open_read and server_procs and not proc_is_new and proc.name!="sshd" and not user_known_read_sensitive_files_activities + output: > + Sensitive file opened for reading by trusted program after startup (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline parent=%proc.pname file=%fd.name parent=%proc.pname gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [filesystem, mitre_credential_access] + +- list: read_sensitive_file_binaries + items: [ + iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, + vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, + pam-auth-update, pam-config, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, + scxcimservera, adclient, rtvscand, cockpit-session, userhelper, ossec-syscheckd + ] + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to allow for specific combinations of +# programs accessing sensitive files. +# fluentd_writing_conf_files is a good example to follow, as it +# specifies both the program doing the writing as well as the specific +# files it is allowed to modify. +# +# In this file, it just takes one of the macros in the base rule +# and repeats it. + +- macro: user_read_sensitive_file_conditions + condition: cmp_cp_by_passwd + +- list: read_sensitive_file_images + items: [] + +- macro: user_read_sensitive_file_containers + condition: (container and container.image.repository in (read_sensitive_file_images)) + +- rule: Read sensitive file untrusted + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information). Exceptions are made for known trusted programs. + condition: > + sensitive_files and open_read + and proc_name_exists + and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, + cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, + vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, + in.proftpd, mandb, salt-minion, postgres_mgmt_binaries, + google_oslogin_ + ) + and not cmp_cp_by_passwd + and not ansible_running_python + and not proc.cmdline contains /usr/bin/mandb + and not run_by_qualys + and not run_by_chef + and not run_by_google_accounts_daemon + and not user_read_sensitive_file_conditions + and not perl_running_plesk + and not perl_running_updmap + and not veritas_driver_script + and not perl_running_centrifydc + and not runuser_reading_pam + and not linux_bench_reading_etc_shadow + and not user_known_read_sensitive_files_activities + and not user_read_sensitive_file_containers + output: > + Sensitive file opened for reading by non-trusted program (user=%user.name user_loginuid=%user.loginuid program=%proc.name + command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [filesystem, mitre_credential_access, mitre_discovery] + +- macro: amazon_linux_running_python_yum + condition: > + (proc.name = python and + proc.pcmdline = "python -m amazon_linux_extras system_motd" and + proc.cmdline startswith "python -c import yum;") + +- macro: user_known_write_rpm_database_activities + condition: (never_true) + +# Only let rpm-related programs write to the rpm database +- rule: Write below rpm database + desc: an attempt to write to the rpm database by any non-rpm related program + condition: > + fd.name startswith /var/lib/rpm and open_write + and not rpm_procs + and not ansible_running_python + and not python_running_chef + and not exe_running_docker_save + and not amazon_linux_running_python_yum + and not user_known_write_rpm_database_activities + output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, software_mgmt, mitre_persistence] + +- macro: postgres_running_wal_e + condition: (proc.pname=postgres and proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e") + +- macro: redis_running_prepost_scripts + condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) + +- macro: rabbitmq_running_scripts + condition: > + (proc.pname=beam.smp and + (proc.cmdline startswith "sh -c exec ps" or + proc.cmdline startswith "sh -c exec inet_gethost" or + proc.cmdline= "sh -s unix:cmd" or + proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) + +- macro: rabbitmqctl_running_scripts + condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") + +- macro: run_by_appdynamics + condition: (proc.pname=java and proc.pcmdline startswith "java -jar -Dappdynamics") + +- macro: user_known_db_spawned_processes + condition: (never_true) + +- rule: DB program spawned process + desc: > + a database-server related program spawned a new process other than itself. + This shouldn\'t occur and is a follow on from some SQL injection attacks. + condition: > + proc.pname in (db_server_binaries) + and spawned_process + and not proc.name in (db_server_binaries) + and not postgres_running_wal_e + and not user_known_db_spawned_processes + output: > + Database-related program spawned process other than itself (user=%user.name user_loginuid=%user.loginuid + program=%proc.cmdline parent=%proc.pname container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [process, database, mitre_execution] + +- macro: user_known_modify_bin_dir_activities + condition: (never_true) + +- rule: Modify binary dirs + desc: an attempt to modify any file below a set of binary directories. + condition: bin_dir_rename and modify and not package_mgmt_procs and not exe_running_docker_save and not user_known_modify_bin_dir_activities + output: > + File below known binary directory renamed/removed (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + pcmdline=%proc.pcmdline operation=%evt.type file=%fd.name %evt.args container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + +- macro: user_known_mkdir_bin_dir_activities + condition: (never_true) + +- rule: Mkdir binary dirs + desc: an attempt to create a directory below a set of binary directories. + condition: mkdir and bin_dir_mkdir and not package_mgmt_procs and not user_known_mkdir_bin_dir_activities + output: > + Directory below known binary directory created (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline directory=%evt.arg.path container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + +# This list allows for easy additions to the set of commands allowed +# to change thread namespace without having to copy and override the +# entire change thread namespace rule. +- list: user_known_change_thread_namespace_binaries + items: [] + +- macro: user_known_change_thread_namespace_activities + condition: (never_true) + +- list: network_plugin_binaries + items: [aws-cni, azure-vnet] + +- macro: calico_node + condition: (container.image.repository endswith calico/node and proc.name=calico-node) + +- macro: weaveworks_scope + condition: (container.image.repository endswith weaveworks/scope and proc.name=scope) + +- rule: Change thread namespace + desc: > + an attempt to change a program/thread\'s namespace (commonly done + as a part of creating a container) by calling setns. + condition: > + evt.type=setns and evt.dir=< + and proc_name_exists + and not (container.id=host and proc.name in (docker_binaries, k8s_binaries, lxd_binaries, nsenter)) + and not proc.name in (sysdigcloud_binaries, sysdig, calico, oci-umount, cilium-cni, network_plugin_binaries) + and not proc.name in (user_known_change_thread_namespace_binaries) + and not proc.name startswith "runc" + and not proc.cmdline startswith "containerd" + and not proc.pname in (sysdigcloud_binaries, hyperkube, kubelet, protokube, dockerd, tini, aws) + and not python_running_sdchecks + and not java_running_sdjagent + and not kubelet_running_loopback + and not rancher_agent + and not rancher_network_manager + and not calico_node + and not weaveworks_scope + and not user_known_change_thread_namespace_activities + output: > + Namespace change (setns) by unexpected program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + parent=%proc.pname %container.info container_id=%container.id image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [process, mitre_privilege_escalation, mitre_lateral_movement] + +# The binaries in this list and their descendents are *not* allowed +# spawn shells. This includes the binaries spawning shells directly as +# well as indirectly. For example, apache -> php/perl for +# mod_{php,perl} -> some shell is also not allowed, because the shell +# has apache as an ancestor. + +- list: protected_shell_spawning_binaries + items: [ + http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, + fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 + ] + +- macro: parent_java_running_zookeeper + condition: (proc.pname=java and proc.pcmdline contains org.apache.zookeeper.server) + +- macro: parent_java_running_kafka + condition: (proc.pname=java and proc.pcmdline contains kafka.Kafka) + +- macro: parent_java_running_elasticsearch + condition: (proc.pname=java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch) + +- macro: parent_java_running_activemq + condition: (proc.pname=java and proc.pcmdline contains activemq.jar) + +- macro: parent_java_running_cassandra + condition: (proc.pname=java and (proc.pcmdline contains "-Dcassandra.config.loader" or proc.pcmdline contains org.apache.cassandra.service.CassandraDaemon)) + +- macro: parent_java_running_jboss_wildfly + condition: (proc.pname=java and proc.pcmdline contains org.jboss) + +- macro: parent_java_running_glassfish + condition: (proc.pname=java and proc.pcmdline contains com.sun.enterprise.glassfish) + +- macro: parent_java_running_hadoop + condition: (proc.pname=java and proc.pcmdline contains org.apache.hadoop) + +- macro: parent_java_running_datastax + condition: (proc.pname=java and proc.pcmdline contains com.datastax) + +- macro: nginx_starting_nginx + condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") + +- macro: nginx_running_aws_s3_cp + condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") + +- macro: consul_running_net_scripts + condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) + +- macro: consul_running_alert_checks + condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") + +- macro: serf_script + condition: (proc.cmdline startswith "sh -c serf") + +- macro: check_process_status + condition: (proc.cmdline startswith "sh -c kill -0 ") + +# In some cases, you may want to consider node processes run directly +# in containers as protected shell spawners. Examples include using +# pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct +# entrypoint of the container, and when the node app is a long-lived +# server using something like express. +# +# However, there are other uses of node related to build pipelines for +# which node is not really a server but instead a general scripting +# tool. In these cases, shells are very likely and in these cases you +# don't want to consider node processes protected shell spawners. +# +# We have to choose one of these cases, so we consider node processes +# as unprotected by default. If you want to consider any node process +# run in a container as a protected shell spawner, override the below +# macro to remove the "never_true" clause, which allows it to take effect. +- macro: possibly_node_in_container + condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe)) + +# Similarly, you may want to consider any shell spawned by apache +# tomcat as suspect. The famous apache struts attack (CVE-2017-5638) +# could be exploited to do things like spawn shells. +# +# However, many applications *do* use tomcat to run arbitrary shells, +# as a part of build pipelines, etc. +# +# Like for node, we make this case opt-in. +- macro: possibly_parent_java_running_tomcat + condition: (never_true and proc.pname=java and proc.pcmdline contains org.apache.catalina.startup.Bootstrap) + +- macro: protected_shell_spawner + condition: > + (proc.aname in (protected_shell_spawning_binaries) + or parent_java_running_zookeeper + or parent_java_running_kafka + or parent_java_running_elasticsearch + or parent_java_running_activemq + or parent_java_running_cassandra + or parent_java_running_jboss_wildfly + or parent_java_running_glassfish + or parent_java_running_hadoop + or parent_java_running_datastax + or possibly_parent_java_running_tomcat + or possibly_node_in_container) + +- list: mesos_shell_binaries + items: [mesos-docker-ex, mesos-slave, mesos-health-ch] + +# Note that runsv is both in protected_shell_spawner and the +# exclusions by pname. This means that runsv can itself spawn shells +# (the ./run and ./finish scripts), but the processes runsv can not +# spawn shells. +- rule: Run shell untrusted + desc: an attempt to spawn a shell below a non-shell application. Specific applications are monitored. + condition: > + spawned_process + and shell_procs + and proc.pname exists + and protected_shell_spawner + and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, + needrestart_binaries, + mesos_shell_binaries, + erl_child_setup, exechealthz, + PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, + lb-controller, nvidia-installe, runsv, statsite, erlexec, calico-node, + "puma reactor") + and not proc.cmdline in (known_shell_spawn_cmdlines) + and not proc.aname in (unicorn_launche) + and not consul_running_net_scripts + and not consul_running_alert_checks + and not nginx_starting_nginx + and not nginx_running_aws_s3_cp + and not run_by_package_mgmt_binaries + and not serf_script + and not check_process_status + and not run_by_foreman + and not python_mesos_marathon_scripting + and not splunk_running_forwarder + and not postgres_running_wal_e + and not redis_running_prepost_scripts + and not rabbitmq_running_scripts + and not rabbitmqctl_running_scripts + and not run_by_appdynamics + and not user_shell_container_exclusions + output: > + Shell spawned by untrusted binary (user=%user.name user_loginuid=%user.loginuid shell=%proc.name parent=%proc.pname + cmdline=%proc.cmdline pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] + aname[4]=%proc.aname[4] aname[5]=%proc.aname[5] aname[6]=%proc.aname[6] aname[7]=%proc.aname[7] container_id=%container.id image=%container.image.repository) + priority: DEBUG + tags: [shell, mitre_execution] + +- macro: allowed_openshift_registry_root + condition: > + (container.image.repository startswith openshift3/ or + container.image.repository startswith registry.redhat.io/openshift3/ or + container.image.repository startswith registry.access.redhat.com/openshift3/) + +# Source: https://docs.openshift.com/enterprise/3.2/install_config/install/disconnected_install.html +- macro: openshift_image + condition: > + (allowed_openshift_registry_root and + (container.image.repository endswith /logging-deployment or + container.image.repository endswith /logging-elasticsearch or + container.image.repository endswith /logging-kibana or + container.image.repository endswith /logging-fluentd or + container.image.repository endswith /logging-auth-proxy or + container.image.repository endswith /metrics-deployer or + container.image.repository endswith /metrics-hawkular-metrics or + container.image.repository endswith /metrics-cassandra or + container.image.repository endswith /metrics-heapster or + container.image.repository endswith /ose-haproxy-router or + container.image.repository endswith /ose-deployer or + container.image.repository endswith /ose-sti-builder or + container.image.repository endswith /ose-docker-builder or + container.image.repository endswith /ose-pod or + container.image.repository endswith /ose-node or + container.image.repository endswith /ose-docker-registry or + container.image.repository endswith /prometheus-node-exporter or + container.image.repository endswith /image-inspector)) + +# These images are allowed both to run with --privileged and to mount +# sensitive paths from the host filesystem. +# +# NOTE: This list is only provided for backwards compatibility with +# older local falco rules files that may have been appending to +# trusted_images. To make customizations, it's better to add images to +# either privileged_images or falco_sensitive_mount_images. +- list: trusted_images + items: [] + +# NOTE: This macro is only provided for backwards compatibility with +# older local falco rules files that may have been appending to +# trusted_images. To make customizations, it's better to add containers to +# user_trusted_containers, user_privileged_containers or user_sensitive_mount_containers. +- macro: trusted_containers + condition: (container.image.repository in (trusted_images)) + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to specify additional containers that are +# trusted and therefore allowed to run privileged *and* with sensitive +# mounts. +# +# Like trusted_images, this is deprecated in favor of +# user_privileged_containers and user_sensitive_mount_containers and +# is only provided for backwards compatibility. +# +# In this file, it just takes one of the images in trusted_containers +# and repeats it. +- macro: user_trusted_containers + condition: (never_true) + +- list: sematext_images + items: [docker.io/sematext/sematext-agent-docker, docker.io/sematext/agent, docker.io/sematext/logagent, + registry.access.redhat.com/sematext/sematext-agent-docker, + registry.access.redhat.com/sematext/agent, + registry.access.redhat.com/sematext/logagent] + +# These container images are allowed to run with --privileged +- list: falco_privileged_images + items: [ + docker.io/calico/node, + calico/node, + docker.io/cloudnativelabs/kube-router, + docker.io/docker/ucp-agent, + docker.io/falcosecurity/falco, + docker.io/mesosphere/mesos-slave, + docker.io/rook/toolbox, + docker.io/sysdig/falco, + docker.io/sysdig/sysdig, + falcosecurity/falco, + gcr.io/google_containers/kube-proxy, + gcr.io/google-containers/startup-script, + gcr.io/projectcalico-org/node, + gke.gcr.io/kube-proxy, + gke.gcr.io/gke-metadata-server, + gke.gcr.io/netd-amd64, + gcr.io/google-containers/prometheus-to-sd, + k8s.gcr.io/ip-masq-agent-amd64, + k8s.gcr.io/kube-proxy, + k8s.gcr.io/prometheus-to-sd, + quay.io/calico/node, + sysdig/falco, + sysdig/sysdig, + sematext_images + ] + +- macro: falco_privileged_containers + condition: (openshift_image or + user_trusted_containers or + container.image.repository in (trusted_images) or + container.image.repository in (falco_privileged_images) or + container.image.repository startswith istio/proxy_ or + container.image.repository startswith quay.io/sysdig/) + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to specify additional containers that are +# allowed to run privileged +# +# In this file, it just takes one of the images in falco_privileged_images +# and repeats it. +- macro: user_privileged_containers + condition: (never_true) + +- list: rancher_images + items: [ + rancher/network-manager, rancher/dns, rancher/agent, + rancher/lb-service-haproxy, rancher/metadata, rancher/healthcheck + ] + +# These container images are allowed to mount sensitive paths from the +# host filesystem. +- list: falco_sensitive_mount_images + items: [ + docker.io/sysdig/falco, docker.io/sysdig/sysdig, sysdig/falco, sysdig/sysdig, + docker.io/falcosecurity/falco, falcosecurity/falco, + gcr.io/google_containers/hyperkube, + gcr.io/google_containers/kube-proxy, docker.io/calico/node, + docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/consul, + docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout, + docker.io/netdata/netdata, docker.io/google/cadvisor, docker.io/prom/node-exporter, + amazon/amazon-ecs-agent + ] + +- macro: falco_sensitive_mount_containers + condition: (user_trusted_containers or + container.image.repository in (trusted_images) or + container.image.repository in (falco_sensitive_mount_images) or + container.image.repository startswith quay.io/sysdig/) + +# These container images are allowed to run with hostnetwork=true +- list: falco_hostnetwork_images + items: [ + gcr.io/google-containers/prometheus-to-sd, + gcr.io/projectcalico-org/typha, + gcr.io/projectcalico-org/node, + gke.gcr.io/gke-metadata-server, + gke.gcr.io/kube-proxy, + gke.gcr.io/netd-amd64, + k8s.gcr.io/ip-masq-agent-amd64, + k8s.gcr.io/prometheus-to-sd, + ] + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to specify additional containers that are +# allowed to perform sensitive mounts. +# +# In this file, it just takes one of the images in falco_sensitive_mount_images +# and repeats it. +- macro: user_sensitive_mount_containers + condition: (never_true) + +- rule: Launch Privileged Container + desc: Detect the initial process started in a privileged container. Exceptions are made for known trusted images. + condition: > + container_started and container + and container.privileged=true + and not falco_privileged_containers + and not user_privileged_containers + output: Privileged container started (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: INFO + tags: [container, cis, mitre_privilege_escalation, mitre_lateral_movement] + +# For now, only considering a full mount of /etc as +# sensitive. Ideally, this would also consider all subdirectories +# below /etc as well, but the globbing mechanism used by sysdig +# doesn't allow exclusions of a full pattern, only single characters. +- macro: sensitive_mount + condition: (container.mount.dest[/proc*] != "N/A" or + container.mount.dest[/var/run/docker.sock] != "N/A" or + container.mount.dest[/var/run/crio/crio.sock] != "N/A" or + container.mount.dest[/var/lib/kubelet] != "N/A" or + container.mount.dest[/var/lib/kubelet/pki] != "N/A" or + container.mount.dest[/] != "N/A" or + container.mount.dest[/home/admin] != "N/A" or + container.mount.dest[/etc] != "N/A" or + container.mount.dest[/etc/kubernetes] != "N/A" or + container.mount.dest[/etc/kubernetes/manifests] != "N/A" or + container.mount.dest[/root*] != "N/A") + +# The steps libcontainer performs to set up the root program for a container are: +# - clone + exec self to a program runc:[0:PARENT] +# - clone a program runc:[1:CHILD] which sets up all the namespaces +# - clone a second program runc:[2:INIT] + exec to the root program. +# The parent of runc:[2:INIT] is runc:0:PARENT] +# As soon as 1:CHILD is created, 0:PARENT exits, so there's a race +# where at the time 2:INIT execs the root program, 0:PARENT might have +# already exited, or might still be around. So we handle both. +# We also let runc:[1:CHILD] count as the parent process, which can occur +# when we lose events and lose track of state. + +- macro: container_entrypoint + condition: (not proc.pname exists or proc.pname in (runc:[0:PARENT], runc:[1:CHILD], runc, docker-runc, exe, docker-runc-cur)) + +- rule: Launch Sensitive Mount Container + desc: > + Detect the initial process started by a container that has a mount from a sensitive host directory + (i.e. /proc). Exceptions are made for known trusted images. + condition: > + container_started and container + and sensitive_mount + and not falco_sensitive_mount_containers + and not user_sensitive_mount_containers + output: Container with sensitive mount started (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag mounts=%container.mounts) + priority: INFO + tags: [container, cis, mitre_lateral_movement] + +# In a local/user rules file, you could override this macro to +# explicitly enumerate the container images that you want to run in +# your environment. In this main falco rules file, there isn't any way +# to know all the containers that can run, so any container is +# allowed, by using a filter that is guaranteed to evaluate to true. +# In the overridden macro, the condition would look something like +# (container.image.repository = vendor/container-1 or +# container.image.repository = vendor/container-2 or ...) + +- macro: allowed_containers + condition: (container.id exists) + +- rule: Launch Disallowed Container + desc: > + Detect the initial process started by a container that is not in a list of allowed containers. + condition: container_started and container and not allowed_containers + output: Container started and not in allowed list (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [container, mitre_lateral_movement] + +- macro: user_known_system_user_login + condition: (never_true) + +# Anything run interactively by root +# - condition: evt.type != switch and user.name = root and proc.name != sshd and interactive +# output: "Interactive root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)" +# priority: WARNING + +- rule: System user interactive + desc: an attempt to run interactive commands by a system (i.e. non-login) user + condition: spawned_process and system_users and interactive and not user_known_system_user_login + output: "System user ran an interactive command (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id image=%container.image.repository)" + priority: INFO + tags: [users, mitre_remote_access_tools] + +# In some cases, a shell is expected to be run in a container. For example, configuration +# management software may do this, which is expected. +- macro: user_expected_terminal_shell_in_container_conditions + condition: (never_true) + +- rule: Terminal shell in container + desc: A shell was used as the entrypoint/exec point into a container with an attached terminal. + condition: > + spawned_process and container + and shell_procs and proc.tty != 0 + and container_entrypoint + and not user_expected_terminal_shell_in_container_conditions + output: > + A shell was spawned in a container with an attached terminal (user=%user.name user_loginuid=%user.loginuid %container.info + shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [container, shell, mitre_execution] + +# For some container types (mesos), there isn't a container image to +# work with, and the container name is autogenerated, so there isn't +# any stable aspect of the software to work with. In this case, we +# fall back to allowing certain command lines. + +- list: known_shell_spawn_cmdlines + items: [ + '"sh -c uname -p 2> /dev/null"', + '"sh -c uname -s 2>&1"', + '"sh -c uname -r 2>&1"', + '"sh -c uname -v 2>&1"', + '"sh -c uname -a 2>&1"', + '"sh -c ruby -v 2>&1"', + '"sh -c getconf CLK_TCK"', + '"sh -c getconf PAGESIZE"', + '"sh -c LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c /sbin/ldconfig -p 2>/dev/null"', + '"sh -c stty -a 2>/dev/null"', + '"sh -c stty -a < /dev/tty"', + '"sh -c stty -g < /dev/tty"', + '"sh -c node index.js"', + '"sh -c node index"', + '"sh -c node ./src/start.js"', + '"sh -c node app.js"', + '"sh -c node -e \"require(''nan'')\""', + '"sh -c node -e \"require(''nan'')\")"', + '"sh -c node $NODE_DEBUG_OPTION index.js "', + '"sh -c crontab -l 2"', + '"sh -c lsb_release -a"', + '"sh -c lsb_release -is 2>/dev/null"', + '"sh -c whoami"', + '"sh -c node_modules/.bin/bower-installer"', + '"sh -c /bin/hostname -f 2> /dev/null"', + '"sh -c locale -a"', + '"sh -c -t -i"', + '"sh -c openssl version"', + '"bash -c id -Gn kafadmin"', + '"sh -c /bin/sh -c ''date +%%s''"' + ] + +# This list allows for easy additions to the set of commands allowed +# to run shells in containers without having to without having to copy +# and override the entire run shell in container macro. Once +# https://github.com/draios/falco/issues/255 is fixed this will be a +# bit easier, as someone could append of any of the existing lists. +- list: user_known_shell_spawn_binaries + items: [] + +# This macro allows for easy additions to the set of commands allowed +# to run shells in containers without having to override the entire +# rule. Its default value is an expression that always is false, which +# becomes true when the "not ..." in the rule is applied. +- macro: user_shell_container_exclusions + condition: (never_true) + +- macro: login_doing_dns_lookup + condition: (proc.name=login and fd.l4proto=udp and fd.sport=53) + +# sockfamily ip is to exclude certain processes (like 'groups') that communicate on unix-domain sockets +# systemd can listen on ports to launch things like sshd on demand +- rule: System procs network activity + desc: any network activity performed by system binaries that are not expected to send or receive any network traffic + condition: > + (fd.sockfamily = ip and (system_procs or proc.name in (shell_binaries))) + and (inbound_outbound) + and not proc.name in (known_system_procs_network_activity_binaries) + and not login_doing_dns_lookup + and not user_expected_system_procs_network_activity_conditions + output: > + Known system binary sent/received network traffic + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + +# This list allows easily whitelisting system proc names that are +# expected to communicate on the network. +- list: known_system_procs_network_activity_binaries + items: [systemd, hostid, id] + +# This macro allows specifying conditions under which a system binary +# is allowed to communicate on the network. For instance, only specific +# proc.cmdline values could be allowed to be more granular in what is +# allowed. +- macro: user_expected_system_procs_network_activity_conditions + condition: (never_true) + +# When filled in, this should look something like: +# (proc.env contains "HTTP_PROXY=http://my.http.proxy.com ") +# The trailing space is intentional so avoid matching on prefixes of +# the actual proxy. +- macro: allowed_ssh_proxy_env + condition: (always_true) + +- list: http_proxy_binaries + items: [curl, wget] + +- macro: http_proxy_procs + condition: (proc.name in (http_proxy_binaries)) + +- rule: Program run with disallowed http proxy env + desc: An attempt to run a program with a disallowed HTTP_PROXY environment variable + condition: > + spawned_process and + http_proxy_procs and + not allowed_ssh_proxy_env and + proc.env icontains HTTP_PROXY + output: > + Program run with disallowed HTTP_PROXY environment variable + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline env=%proc.env parent=%proc.pname container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [host, users] + +# In some environments, any attempt by a interpreted program (perl, +# python, ruby, etc) to listen for incoming connections or perform +# outgoing connections might be suspicious. These rules are not +# enabled by default, but you can modify the following macros to +# enable them. + +- macro: consider_interpreted_inbound + condition: (never_true) + +- macro: consider_interpreted_outbound + condition: (never_true) + +- rule: Interpreted procs inbound network activity + desc: Any inbound network activity performed by any interpreted program (perl, python, ruby, etc.) + condition: > + (inbound and consider_interpreted_inbound + and interpreted_procs) + output: > + Interpreted program received/listened for network traffic + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + +- rule: Interpreted procs outbound network activity + desc: Any outbound network activity performed by any interpreted program (perl, python, ruby, etc.) + condition: > + (outbound and consider_interpreted_outbound + and interpreted_procs) + output: > + Interpreted program performed outgoing network connection + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + +- list: openvpn_udp_ports + items: [1194, 1197, 1198, 8080, 9201] + +- list: l2tp_udp_ports + items: [500, 1701, 4500, 10000] + +- list: statsd_ports + items: [8125] + +- list: ntp_ports + items: [123] + +# Some applications will connect a udp socket to an address only to +# test connectivity. Assuming the udp connect works, they will follow +# up with a tcp connect that actually sends/receives data. +# +# With that in mind, we listed a few commonly seen ports here to avoid +# some false positives. In addition, we make the main rule opt-in, so +# it's disabled by default. + +- list: test_connect_ports + items: [0, 9, 80, 3306] + +- macro: do_unexpected_udp_check + condition: (never_true) + +- list: expected_udp_ports + items: [53, openvpn_udp_ports, l2tp_udp_ports, statsd_ports, ntp_ports, test_connect_ports] + +- macro: expected_udp_traffic + condition: fd.port in (expected_udp_ports) + +- rule: Unexpected UDP Traffic + desc: UDP traffic not on port 53 (DNS) or other commonly used ports + condition: (inbound_outbound) and do_unexpected_udp_check and fd.l4proto=udp and not expected_udp_traffic + output: > + Unexpected UDP Traffic Seen + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name proto=%fd.l4proto evt=%evt.type %evt.args container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + +# With the current restriction on system calls handled by falco +# (e.g. excluding read/write/sendto/recvfrom/etc, this rule won't +# trigger). +# - rule: Ssh error in syslog +# desc: any ssh errors (failed logins, disconnects, ...) sent to syslog +# condition: syslog and ssh_error_message and evt.dir = < +# output: "sshd sent error message to syslog (error=%evt.buffer)" +# priority: WARNING + +- macro: somebody_becoming_themself + condition: ((user.name=nobody and evt.arg.uid=nobody) or + (user.name=www-data and evt.arg.uid=www-data) or + (user.name=_apt and evt.arg.uid=_apt) or + (user.name=postfix and evt.arg.uid=postfix) or + (user.name=pki-agent and evt.arg.uid=pki-agent) or + (user.name=pki-acme and evt.arg.uid=pki-acme) or + (user.name=nfsnobody and evt.arg.uid=nfsnobody) or + (user.name=postgres and evt.arg.uid=postgres)) + +- macro: nrpe_becoming_nagios + condition: (proc.name=nrpe and evt.arg.uid=nagios) + +# In containers, the user name might be for a uid that exists in the +# container but not on the host. (See +# https://github.com/draios/sysdig/issues/954). So in that case, allow +# a setuid. +- macro: known_user_in_container + condition: (container and user.name != "N/A") + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to allow for specific combinations of +# programs changing users by calling setuid. +# +# In this file, it just takes one of the condition in the base macro +# and repeats it. +- macro: user_known_non_sudo_setuid_conditions + condition: user.name=root + +# sshd, mail programs attempt to setuid to root even when running as non-root. Excluding here to avoid meaningless FPs +- rule: Non sudo setuid + desc: > + an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody" + suing to itself are also excluded, as setuid calls typically involve dropping privileges. + condition: > + evt.type=setuid and evt.dir=> + and (known_user_in_container or not container) + and not user.name=root + and not somebody_becoming_themself + and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, + nomachine_binaries) + and not proc.name startswith "runc:" + and not java_running_sdjagent + and not nrpe_becoming_nagios + and not user_known_non_sudo_setuid_conditions + output: > + Unexpected setuid call by non-sudo, non-root program (user=%user.name user_loginuid=%user.loginuid cur_uid=%user.uid parent=%proc.pname + command=%proc.cmdline uid=%evt.arg.uid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [users, mitre_privilege_escalation] + +- macro: user_known_user_management_activities + condition: (never_true) + +- macro: chage_list + condition: (proc.name=chage and (proc.cmdline contains "-l" or proc.cmdline contains "--list")) + +- rule: User mgmt binaries + desc: > + activity by any programs that can manage users, passwords, or permissions. sudo and su are excluded. + Activity in containers is also excluded--some containers create custom users on top + of a base linux distribution at startup. + Some innocuous commandlines that don't actually change anything are excluded. + condition: > + spawned_process and proc.name in (user_mgmt_binaries) and + not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container and + not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, run-parts) and + not proc.cmdline startswith "passwd -S" and + not proc.cmdline startswith "useradd -D" and + not proc.cmdline startswith "systemd --version" and + not run_by_qualys and + not run_by_sumologic_securefiles and + not run_by_yum and + not run_by_ms_oms and + not run_by_google_accounts_daemon and + not chage_list and + not user_known_user_management_activities + output: > + User management binary command run outside of container + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) + priority: NOTICE + tags: [host, users, mitre_persistence] + +- list: allowed_dev_files + items: [ + /dev/null, /dev/stdin, /dev/stdout, /dev/stderr, + /dev/random, /dev/urandom, /dev/console, /dev/kmsg + ] + +- macro: user_known_create_files_below_dev_activities + condition: (never_true) + +# (we may need to add additional checks against false positives, see: +# https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153) +- rule: Create files below dev + desc: creating any files below /dev other than known programs that manage devices. Some rootkits hide files in /dev. + condition: > + fd.directory = /dev and + (evt.type = creat or ((evt.type = open or evt.type = openat) and evt.arg.flags contains O_CREAT)) + and not proc.name in (dev_creation_binaries) + and not fd.name in (allowed_dev_files) + and not fd.name startswith /dev/tty + and not user_known_create_files_below_dev_activities + output: "File created below /dev by untrusted program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + + +# In a local/user rules file, you could override this macro to +# explicitly enumerate the container images that you want to allow +# access to EC2 metadata. In this main falco rules file, there isn't +# any way to know all the containers that should have access, so any +# container is alllowed, by repeating the "container" macro. In the +# overridden macro, the condition would look something like +# (container.image.repository = vendor/container-1 or +# container.image.repository = vendor/container-2 or ...) +- macro: ec2_metadata_containers + condition: container + +# On EC2 instances, 169.254.169.254 is a special IP used to fetch +# metadata about the instance. It may be desirable to prevent access +# to this IP from containers. +- rule: Contact EC2 Instance Metadata Service From Container + desc: Detect attempts to contact the EC2 Instance Metadata Service from a container + condition: outbound and fd.sip="169.254.169.254" and container and not ec2_metadata_containers + output: Outbound connection to EC2 instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, aws, container, mitre_discovery] + + +# This rule is not enabled by default, since this rule is for cloud environment(GCP, AWS and Azure) only. +# If you want to enable this rule, overwrite the first macro, +# And you can filter the container that you want to allow access to metadata by overwriting the second macro. +- macro: consider_metadata_access + condition: (never_true) + +- macro: user_known_metadata_access + condition: (k8s.ns.name = "kube-system") + +# On GCP, AWS and Azure, 169.254.169.254 is a special IP used to fetch +# metadata about the instance. The metadata could be used to get credentials by attackers. +- rule: Contact cloud metadata service from container + desc: Detect attempts to contact the Cloud Instance Metadata Service from a container + condition: outbound and fd.sip="169.254.169.254" and container and consider_metadata_access and not user_known_metadata_access + output: Outbound connection to cloud instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, container, mitre_discovery] + + +# In a local/user rules file, list the namespace or container images that are +# allowed to contact the K8s API Server from within a container. This +# might cover cases where the K8s infrastructure itself is running +# within a container. +- macro: k8s_containers + condition: > + (container.image.repository in (gcr.io/google_containers/hyperkube-amd64, + gcr.io/google_containers/kube2sky, docker.io/sysdig/falco, + docker.io/sysdig/sysdig, docker.io/falcosecurity/falco, + sysdig/falco, sysdig/sysdig, falcosecurity/falco) or (k8s.ns.name = "kube-system")) + +- macro: k8s_api_server + condition: (fd.sip.name="kubernetes.default.svc.cluster.local") + +- macro: user_known_contact_k8s_api_server_activities + condition: (never_true) + +- rule: Contact K8S API Server From Container + desc: Detect attempts to contact the K8S API Server from a container + condition: > + evt.type=connect and evt.dir=< and + (fd.typechar=4 or fd.typechar=6) and + container and + not k8s_containers and + k8s_api_server and + not user_known_contact_k8s_api_server_activities + output: Unexpected connection to K8s API Server from container (command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag connection=%fd.name) + priority: NOTICE + tags: [network, k8s, container, mitre_discovery] + +# In a local/user rules file, list the container images that are +# allowed to contact NodePort services from within a container. This +# might cover cases where the K8s infrastructure itself is running +# within a container. +# +# By default, all containers are allowed to contact NodePort services. +- macro: nodeport_containers + condition: container + +- rule: Unexpected K8s NodePort Connection + desc: Detect attempts to use K8s NodePorts from a container + condition: (inbound_outbound) and fd.sport >= 30000 and fd.sport <= 32767 and container and not nodeport_containers + output: Unexpected K8s NodePort Connection (command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, k8s, container, mitre_port_knocking] + +- list: network_tool_binaries + items: [nc, ncat, nmap, dig, tcpdump, tshark, ngrep, telnet, mitmproxy, socat, zmap] + +- macro: network_tool_procs + condition: (proc.name in (network_tool_binaries)) + +# In a local/user rules file, create a condition that matches legitimate uses +# of a package management process inside a container. +# +# For example: +# - macro: user_known_package_manager_in_container +# condition: proc.cmdline="dpkg -l" +- macro: user_known_package_manager_in_container + condition: (never_true) + +# Container is supposed to be immutable. Package management should be done in building the image. +- rule: Launch Package Management Process in Container + desc: Package management process ran inside container + condition: > + spawned_process + and container + and user.name != "_apt" + and package_mgmt_procs + and not package_mgmt_ancestor_procs + and not user_known_package_manager_in_container + output: > + Package management process launched in container (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: ERROR + tags: [process, mitre_persistence] + +- rule: Netcat Remote Code Execution in Container + desc: Netcat Program runs inside container that allows remote code execution + condition: > + spawned_process and container and + ((proc.name = "nc" and (proc.args contains "-e" or proc.args contains "-c")) or + (proc.name = "ncat" and (proc.args contains "--sh-exec" or proc.args contains "--exec" or proc.args contains "-e " + or proc.args contains "-c " or proc.args contains "--lua-exec")) + ) + output: > + Netcat runs inside container that allows remote code execution (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [network, process, mitre_execution] + +- macro: user_known_network_tool_activities + condition: (never_true) + +- rule: Launch Suspicious Network Tool in Container + desc: Detect network tools launched inside container + condition: > + spawned_process and container and network_tool_procs and not user_known_network_tool_activities + output: > + Network tool launched in container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname + container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, process, mitre_discovery, mitre_exfiltration] + +# This rule is not enabled by default, as there are legitimate use +# cases for these tools on hosts. If you want to enable it, modify the +# following macro. +- macro: consider_network_tools_on_host + condition: (never_true) + +- rule: Launch Suspicious Network Tool on Host + desc: Detect network tools launched on the host + condition: > + spawned_process and + not container and + consider_network_tools_on_host and + network_tool_procs and + not user_known_network_tool_activities + output: > + Network tool launched on host (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname) + priority: NOTICE + tags: [network, process, mitre_discovery, mitre_exfiltration] + +- list: grep_binaries + items: [grep, egrep, fgrep] + +- macro: grep_commands + condition: (proc.name in (grep_binaries)) + +# a less restrictive search for things that might be passwords/ssh/user etc. +- macro: grep_more + condition: (never_true) + +- macro: private_key_or_password + condition: > + (proc.args icontains "BEGIN PRIVATE" or + proc.args icontains "BEGIN RSA PRIVATE" or + proc.args icontains "BEGIN DSA PRIVATE" or + proc.args icontains "BEGIN EC PRIVATE" or + (grep_more and + (proc.args icontains " pass " or + proc.args icontains " ssh " or + proc.args icontains " user ")) + ) + +- rule: Search Private Keys or Passwords + desc: > + Detect grep private keys or passwords activity. + condition: > + (spawned_process and + ((grep_commands and private_key_or_password) or + (proc.name = "find" and (proc.args contains "id_rsa" or proc.args contains "id_dsa"))) + ) + output: > + Grep private keys or passwords activities found + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id container_name=%container.name + image=%container.image.repository:%container.image.tag) + priority: + WARNING + tags: [process, mitre_credential_access] + +- list: log_directories + items: [/var/log, /dev/log] + +- list: log_files + items: [syslog, auth.log, secure, kern.log, cron, user.log, dpkg.log, last.log, yum.log, access_log, mysql.log, mysqld.log] + +- macro: access_log_files + condition: (fd.directory in (log_directories) or fd.filename in (log_files)) + +# a placeholder for whitelist log files that could be cleared. Recommend the macro as (fd.name startswith "/var/log/app1*") +- macro: allowed_clear_log_files + condition: (never_true) + +- macro: trusted_logging_images + condition: (container.image.repository endswith "splunk/fluentd-hec" or + container.image.repository endswith "fluent/fluentd-kubernetes-daemonset" or + container.image.repository endswith "openshift3/ose-logging-fluentd" or + container.image.repository endswith "containernetworking/azure-npm") + +- rule: Clear Log Activities + desc: Detect clearing of critical log files + condition: > + open_write and + access_log_files and + evt.arg.flags contains "O_TRUNC" and + not trusted_logging_images and + not allowed_clear_log_files + output: > + Log files were tampered (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [file, mitre_defense_evasion] + +- list: data_remove_commands + items: [shred, mkfs, mke2fs] + +- macro: clear_data_procs + condition: (proc.name in (data_remove_commands)) + +- macro: user_known_remove_data_activities + condition: (never_true) + +- rule: Remove Bulk Data from Disk + desc: Detect process running to clear bulk data from disk + condition: spawned_process and clear_data_procs and not user_known_remove_data_activities + output: > + Bulk data has been removed from disk (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [process, mitre_persistence] + +- macro: modify_shell_history + condition: > + (modify and ( + evt.arg.name contains "bash_history" or + evt.arg.name contains "zsh_history" or + evt.arg.name contains "fish_read_history" or + evt.arg.name endswith "fish_history" or + evt.arg.oldpath contains "bash_history" or + evt.arg.oldpath contains "zsh_history" or + evt.arg.oldpath contains "fish_read_history" or + evt.arg.oldpath endswith "fish_history" or + evt.arg.path contains "bash_history" or + evt.arg.path contains "zsh_history" or + evt.arg.path contains "fish_read_history" or + evt.arg.path endswith "fish_history")) + +- macro: truncate_shell_history + condition: > + (open_write and ( + fd.name contains "bash_history" or + fd.name contains "zsh_history" or + fd.name contains "fish_read_history" or + fd.name endswith "fish_history") and evt.arg.flags contains "O_TRUNC") + +- macro: var_lib_docker_filepath + condition: (evt.arg.name startswith /var/lib/docker or fd.name startswith /var/lib/docker) + +- rule: Delete or rename shell history + desc: Detect shell history deletion + condition: > + (modify_shell_history or truncate_shell_history) and + not var_lib_docker_filepath and + not proc.name in (docker_binaries) + output: > + Shell history had been deleted or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath %container.info) + priority: + WARNING + tags: [process, mitre_defense_evasion] + +# This rule is deprecated and will/should never be triggered. Keep it here for backport compatibility. +# Rule Delete or rename shell history is the preferred rule to use now. +- rule: Delete Bash History + desc: Detect bash history deletion + condition: > + ((spawned_process and proc.name in (shred, rm, mv) and proc.args contains "bash_history") or + (open_write and fd.name contains "bash_history" and evt.arg.flags contains "O_TRUNC")) + output: > + Shell history had been deleted or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath %container.info) + priority: + WARNING + tags: [process, mitre_defense_evasion] + +- macro: consider_all_chmods + condition: (always_true) + +- list: user_known_chmod_applications + items: [hyperkube, kubelet] + +# This macro should be overridden in user rules as needed. This is useful if a given application +# should not be ignored alltogether with the user_known_chmod_applications list, but only in +# specific conditions. +- macro: user_known_set_setuid_or_setgid_bit_conditions + condition: (never_true) + +- rule: Set Setuid or Setgid bit + desc: > + When the setuid or setgid bits are set for an application, + this means that the application will run with the privileges of the owning user or group respectively. + Detect setuid or setgid bits set via chmod + condition: > + consider_all_chmods and chmod and (evt.arg.mode contains "S_ISUID" or evt.arg.mode contains "S_ISGID") + and not proc.name in (user_known_chmod_applications) + and not exe_running_docker_save + and not user_known_set_setuid_or_setgid_bit_conditions + output: > + Setuid or setgid bit is set via chmod (fd=%evt.arg.fd filename=%evt.arg.filename mode=%evt.arg.mode user=%user.name user_loginuid=%user.loginuid process=%proc.name + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tags: [process, mitre_persistence] + +- list: exclude_hidden_directories + items: [/root/.cassandra] + +# To use this rule, you should modify consider_hidden_file_creation. +- macro: consider_hidden_file_creation + condition: (never_true) + +- macro: user_known_create_hidden_file_activities + condition: (never_true) + +- rule: Create Hidden Files or Directories + desc: Detect hidden files or directories created + condition: > + ((modify and evt.arg.newpath contains "/.") or + (mkdir and evt.arg.path contains "/.") or + (open_write and evt.arg.flags contains "O_CREAT" and fd.name contains "/." and not fd.name pmatch (exclude_hidden_directories))) and + consider_hidden_file_creation and + not user_known_create_hidden_file_activities + output: > + Hidden file or directory created (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + file=%fd.name newpath=%evt.arg.newpath container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tags: [file, mitre_persistence] + +- list: remote_file_copy_binaries + items: [rsync, scp, sftp, dcp] + +- macro: remote_file_copy_procs + condition: (proc.name in (remote_file_copy_binaries)) + +# Users should overwrite this macro to specify conditions under which a +# Custom condition for use of remote file copy tool in container +- macro: user_known_remote_file_copy_activities + condition: (never_true) + +- rule: Launch Remote File Copy Tools in Container + desc: Detect remote file copy tools launched in container + condition: > + spawned_process + and container + and remote_file_copy_procs + and not user_known_remote_file_copy_activities + output: > + Remote file copy tool launched in container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname + container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, process, mitre_lateral_movement, mitre_exfiltration] + +- rule: Create Symlink Over Sensitive Files + desc: Detect symlink created over sensitive files + condition: > + create_symlink and + (evt.arg.target in (sensitive_file_names) or evt.arg.target in (sensitive_directory_names)) + output: > + Symlinks created over senstivie files (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline target=%evt.arg.target linkpath=%evt.arg.linkpath parent_process=%proc.pname) + priority: NOTICE + tags: [file, mitre_exfiltration] + +- list: miner_ports + items: [ + 25, 3333, 3334, 3335, 3336, 3357, 4444, + 5555, 5556, 5588, 5730, 6099, 6666, 7777, + 7778, 8000, 8001, 8008, 8080, 8118, 8333, + 8888, 8899, 9332, 9999, 14433, 14444, + 45560, 45700 + ] + +- list: miner_domains + items: [ + "asia1.ethpool.org","ca.minexmr.com", + "cn.stratum.slushpool.com","de.minexmr.com", + "eth-ar.dwarfpool.com","eth-asia.dwarfpool.com", + "eth-asia1.nanopool.org","eth-au.dwarfpool.com", + "eth-au1.nanopool.org","eth-br.dwarfpool.com", + "eth-cn.dwarfpool.com","eth-cn2.dwarfpool.com", + "eth-eu.dwarfpool.com","eth-eu1.nanopool.org", + "eth-eu2.nanopool.org","eth-hk.dwarfpool.com", + "eth-jp1.nanopool.org","eth-ru.dwarfpool.com", + "eth-ru2.dwarfpool.com","eth-sg.dwarfpool.com", + "eth-us-east1.nanopool.org","eth-us-west1.nanopool.org", + "eth-us.dwarfpool.com","eth-us2.dwarfpool.com", + "eu.stratum.slushpool.com","eu1.ethermine.org", + "eu1.ethpool.org","fr.minexmr.com", + "mine.moneropool.com","mine.xmrpool.net", + "pool.minexmr.com","pool.monero.hashvault.pro", + "pool.supportxmr.com","sg.minexmr.com", + "sg.stratum.slushpool.com","stratum-eth.antpool.com", + "stratum-ltc.antpool.com","stratum-zec.antpool.com", + "stratum.antpool.com","us-east.stratum.slushpool.com", + "us1.ethermine.org","us1.ethpool.org", + "us2.ethermine.org","us2.ethpool.org", + "xmr-asia1.nanopool.org","xmr-au1.nanopool.org", + "xmr-eu1.nanopool.org","xmr-eu2.nanopool.org", + "xmr-jp1.nanopool.org","xmr-us-east1.nanopool.org", + "xmr-us-west1.nanopool.org","xmr.crypto-pool.fr", + "xmr.pool.minergate.com" + ] + +- list: https_miner_domains + items: [ + "ca.minexmr.com", + "cn.stratum.slushpool.com", + "de.minexmr.com", + "fr.minexmr.com", + "mine.moneropool.com", + "mine.xmrpool.net", + "pool.minexmr.com", + "sg.minexmr.com", + "stratum-eth.antpool.com", + "stratum-ltc.antpool.com", + "stratum-zec.antpool.com", + "stratum.antpool.com", + "xmr.crypto-pool.fr" + ] + +- list: http_miner_domains + items: [ + "ca.minexmr.com", + "de.minexmr.com", + "fr.minexmr.com", + "mine.moneropool.com", + "mine.xmrpool.net", + "pool.minexmr.com", + "sg.minexmr.com", + "xmr.crypto-pool.fr" + ] + +# Add rule based on crypto mining IOCs +- macro: minerpool_https + condition: (fd.sport="443" and fd.sip.name in (https_miner_domains)) + +- macro: minerpool_http + condition: (fd.sport="80" and fd.sip.name in (http_miner_domains)) + +- macro: minerpool_other + condition: (fd.sport in (miner_ports) and fd.sip.name in (miner_domains)) + +- macro: net_miner_pool + condition: (evt.type in (sendto, sendmsg) and evt.dir=< and (fd.net != "127.0.0.0/8" and not fd.snet in (rfc_1918_addresses)) and ((minerpool_http) or (minerpool_https) or (minerpool_other))) + +- macro: trusted_images_query_miner_domain_dns + condition: (container.image.repository in (docker.io/falcosecurity/falco, falcosecurity/falco)) + append: false + +# The rule is disabled by default. +# Note: falco will send DNS request to resolve miner pool domain which may trigger alerts in your environment. +- rule: Detect outbound connections to common miner pool ports + desc: Miners typically connect to miner pools on common ports. + condition: net_miner_pool and not trusted_images_query_miner_domain_dns + enabled: false + output: Outbound connection to IP/Port flagged by cryptoioc.ch (command=%proc.cmdline port=%fd.rport ip=%fd.rip container=%container.info image=%container.image.repository) + priority: CRITICAL + tags: [network, mitre_execution] + +- rule: Detect crypto miners using the Stratum protocol + desc: Miners typically specify the mining pool to connect to with a URI that begins with 'stratum+tcp' + condition: spawned_process and proc.cmdline contains "stratum+tcp" + output: Possible miner running (command=%proc.cmdline container=%container.info image=%container.image.repository) + priority: CRITICAL + tags: [process, mitre_execution] + +- list: k8s_client_binaries + items: [docker, kubectl, crictl] + +- list: user_known_k8s_ns_kube_system_images + items: [ + k8s.gcr.io/fluentd-gcp-scaler, + k8s.gcr.io/node-problem-detector/node-problem-detector + ] + +- list: user_known_k8s_images + items: [ + mcr.microsoft.com/aks/hcp/hcp-tunnel-front + ] + +# Whitelist for known docker client binaries run inside container +# - k8s.gcr.io/fluentd-gcp-scaler in GCP/GKE +- macro: user_known_k8s_client_container + condition: > + (k8s.ns.name="kube-system" and container.image.repository in (user_known_k8s_ns_kube_system_images)) or container.image.repository in (user_known_k8s_images) + +- macro: user_known_k8s_client_container_parens + condition: (user_known_k8s_client_container) + +- rule: The docker client is executed in a container + desc: Detect a k8s client tool executed inside a container + condition: spawned_process and container and not user_known_k8s_client_container_parens and proc.name in (k8s_client_binaries) + output: "Docker or kubernetes client executed in container (user=%user.name user_loginuid=%user.loginuid %container.info parent=%proc.pname cmdline=%proc.cmdline image=%container.image.repository:%container.image.tag)" + priority: WARNING + tags: [container, mitre_execution] + + +# This rule is enabled by default. +# If you want to disable it, modify the following macro. +- macro: consider_packet_socket_communication + condition: (always_true) + +- list: user_known_packet_socket_binaries + items: [] + +- rule: Packet socket created in container + desc: Detect new packet socket at the device driver (OSI Layer 2) level in a container. Packet socket could be used for ARP Spoofing and privilege escalation(CVE-2020-14386) by attacker. + condition: evt.type=socket and evt.arg[0]=AF_PACKET and consider_packet_socket_communication and container and not proc.name in (user_known_packet_socket_binaries) + output: Packet socket was created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline socket_info=%evt.args container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, mitre_discovery] + +# Change to (always_true) to enable rule 'Network connection outside local subnet' +- macro: enabled_rule_network_only_subnet + condition: (never_true) + +# Images that are allowed to have outbound traffic +- list: images_allow_network_outside_subnet + items: [] + +# Namespaces where the rule is enforce +- list: namespace_scope_network_only_subnet + items: [] + +- macro: network_local_subnet + condition: > + fd.rnet in (rfc_1918_addresses) or + fd.ip = "0.0.0.0" or + fd.net = "127.0.0.0/8" + +# # How to test: +# # Change macro enabled_rule_network_only_subnet to condition: always_true +# # Add 'default' to namespace_scope_network_only_subnet +# # Run: +# kubectl run --generator=run-pod/v1 -n default -i --tty busybox --image=busybox --rm -- wget google.com -O /var/google.html +# # Check logs running + +- rule: Network Connection outside Local Subnet + desc: Detect traffic to image outside local subnet. + condition: > + enabled_rule_network_only_subnet and + inbound_outbound and + container and + not network_local_subnet and + k8s.ns.name in (namespace_scope_network_only_subnet) + output: > + Network connection outside local subnet + (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id + image=%container.image.repository namespace=%k8s.ns.name + fd.rip.name=%fd.rip.name fd.lip.name=%fd.lip.name fd.cip.name=%fd.cip.name fd.sip.name=%fd.sip.name) + priority: WARNING + tags: [network] + +- macro: allowed_port + condition: (never_true) + +- list: allowed_image + items: [] # add image to monitor, i.e.: bitnami/nginx + +- list: authorized_server_binaries + items: [] # add binary to allow, i.e.: nginx + +- list: authorized_server_port + items: [] # add port to allow, i.e.: 80 + +# # How to test: +# kubectl run --image=nginx nginx-app --port=80 --env="DOMAIN=cluster" +# kubectl expose deployment nginx-app --port=80 --name=nginx-http --type=LoadBalancer +# # On minikube: +# minikube service nginx-http +# # On general K8s: +# kubectl get services +# kubectl cluster-info +# # Visit the Nginx service and port, should not fire. +# # Change rule to different port, then different process name, and test again that it fires. + +- rule: Outbound or Inbound Traffic not to Authorized Server Process and Port + desc: Detect traffic that is not to authorized server process and port. + condition: > + allowed_port and + inbound_outbound and + container and + container.image.repository in (allowed_image) and + not proc.name in (authorized_server_binary) and + not fd.sport in (authorized_server_port) + output: > + Network connection outside authorized port and binary + (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id + image=%container.image.repository) + priority: WARNING + tags: [network] + +- macro: user_known_stand_streams_redirect_activities + condition: (never_true) + +- rule: Redirect STDOUT/STDIN to Network Connection in Container + desc: Detect redirecting stdout/stdin to network connection in container (potential reverse shell). + condition: evt.type=dup and evt.dir=> and container and fd.num in (0, 1, 2) and fd.type in ("ipv4", "ipv6") and not user_known_stand_streams_redirect_activities + output: > + Redirect stdout/stdin to network connection (user=%user.name user_loginuid=%user.loginuid %container.info process=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository fd.name=%fd.name fd.num=%fd.num fd.type=%fd.type fd.sip=%fd.sip) + priority: WARNING + +# The two Container Drift rules below will fire when a new executable is created in a container. +# There are two ways to create executables - file is created with execution permissions or permissions change of existing file. +# We will use a new sysdig filter, is_open_exec, to find all files creations with execution permission, and will trace all chmods in a container. +# The use case we are targeting here is an attempt to execute code that was not shipped as part of a container (drift) - +# an activity that might be malicious or non-compliant. +# Two things to pay attention to: +# 1) In most cases, 'docker cp' will not be identified, but the assumption is that if an attacker gained access to the container runtime daemon, they are already privileged +# 2) Drift rules will be noisy in environments in which containers are built (e.g. docker build) +# These two rules are not enabled by default. Use `never_true` in macro condition to enable them. + +- macro: user_known_container_drift_activities + condition: (always_true) + +- rule: Container Drift Detected (chmod) + desc: New executable created in a container due to chmod + condition: > + chmod and + consider_all_chmods and + container and + not runc_writing_exec_fifo and + not runc_writing_var_lib_docker and + not user_known_container_drift_activities and + evt.rawres>=0 and + ((evt.arg.mode contains "S_IXUSR") or + (evt.arg.mode contains "S_IXGRP") or + (evt.arg.mode contains "S_IXOTH")) + output: Drift detected (chmod), new executable created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode event=%evt.type) + priority: ERROR + +# **************************************************************************** +# * "Container Drift Detected (open+create)" requires FALCO_ENGINE_VERSION 6 * +# **************************************************************************** +- rule: Container Drift Detected (open+create) + desc: New executable created in a container due to open+create + condition: > + evt.type in (open,openat,creat) and + evt.is_open_exec=true and + container and + not runc_writing_exec_fifo and + not runc_writing_var_lib_docker and + not user_known_container_drift_activities and + evt.rawres>=0 + output: Drift detected (open+create), new executable created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode event=%evt.type) + priority: ERROR + +- list: c2_server_ip_list + items: [] + +- rule: Outbound Connection to C2 Servers + desc: Detect outbound connection to command & control servers + condition: outbound and fd.sip in (c2_server_ip_list) + output: Outbound connection to C2 server (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [network] + +- list: white_listed_modules + items: [] + +- rule: Linux Kernel Module Injection Detected + desc: Detect kernel module was injected (from container). + condition: spawned_process and container and proc.name=insmod and not proc.args in (white_listed_modules) + output: Linux Kernel Module injection using insmod detected (user=%user.name user_loginuid=%user.loginuid parent_process=%proc.pname module=%proc.args) + priority: WARNING + tags: [process] + +# Application rules have moved to application_rules.yaml. Please look +# there if you want to enable them by adding to +# falco_rules.local.yaml. \ No newline at end of file diff --git a/resources/policies/tests/unit_test_contains.yaml b/resources/policies/tests/unit_test_contains.yaml index 0fdf76af..aa87705a 100644 --- a/resources/policies/tests/unit_test_contains.yaml +++ b/resources/policies/tests/unit_test_contains.yaml @@ -1,13 +1,11 @@ - rule: Contains rule desc: unit test Contains rule condition: sf.container.name contains node and sf.proc.exe contains /usr/bin/python - action: [alert] priority: low tags: [test] - rule: iContains rule desc: unit test iContains rule condition: sf.container.name contains node and sf.proc.exe icontains /USR/BIN/PYTHON - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_eq.yaml b/resources/policies/tests/unit_test_eq.yaml index 61861579..3d6fef94 100644 --- a/resources/policies/tests/unit_test_eq.yaml +++ b/resources/policies/tests/unit_test_eq.yaml @@ -1,6 +1,5 @@ - rule: Eq rule desc: unit test eq rule condition: sf.container.name contains node and sf.proc.exe=/usr/bin/python and sf.ts=1548965940603299403 - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_fileflow.yaml b/resources/policies/tests/unit_test_fileflow.yaml index 2fcba0ab..b732a14c 100644 --- a/resources/policies/tests/unit_test_fileflow.yaml +++ b/resources/policies/tests/unit_test_fileflow.yaml @@ -4,6 +4,5 @@ and sf.type=FF and sf.is_open_write=true and sf.proc.exe contains python - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_ge.yaml b/resources/policies/tests/unit_test_ge.yaml index 47b9c08f..989c7e0a 100644 --- a/resources/policies/tests/unit_test_ge.yaml +++ b/resources/policies/tests/unit_test_ge.yaml @@ -1,6 +1,5 @@ - rule: Ge rule desc: unit test Ge rule condition: sf.container.name contains node and sf.proc.exe=/usr/bin/python and sf.ts>=1548965940603299403 - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_gt.yaml b/resources/policies/tests/unit_test_gt.yaml index a4503015..715ca80e 100644 --- a/resources/policies/tests/unit_test_gt.yaml +++ b/resources/policies/tests/unit_test_gt.yaml @@ -1,6 +1,5 @@ - rule: Gt rule desc: unit test Gt rule condition: sf.container.name contains node and sf.proc.exe=/usr/bin/python and sf.ts>1548965940603299403 - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_in.yaml b/resources/policies/tests/unit_test_in.yaml index d3adced5..dafcc655 100644 --- a/resources/policies/tests/unit_test_in.yaml +++ b/resources/policies/tests/unit_test_in.yaml @@ -7,6 +7,5 @@ - rule: In rule desc: Unit test In rule condition: sf.container.name contains node and in_macro and sf.proc.args startswith cos-write.py - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_le.yaml b/resources/policies/tests/unit_test_le.yaml index e012482a..2a7c973a 100644 --- a/resources/policies/tests/unit_test_le.yaml +++ b/resources/policies/tests/unit_test_le.yaml @@ -1,6 +1,5 @@ - rule: Le rule desc: unit test Le rule condition: sf.container.name contains node and sf.type=NF and sf.proc.exe=/usr/bin/python and sf.ts<=1548965940603299403 - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_logic.yaml b/resources/policies/tests/unit_test_logic.yaml index 5c2e5d94..ea1ccdf2 100644 --- a/resources/policies/tests/unit_test_logic.yaml +++ b/resources/policies/tests/unit_test_logic.yaml @@ -12,7 +12,6 @@ in_macro and sf.proc.args startswith cos-write.py and (in_macro and (sf.proc.exe=/usr/bin/python or sf.proc.args startswith cos-write.py)) - action: [alert] priority: low tags: [test] @@ -24,6 +23,5 @@ ((((((in_macro)))))) and (sf.proc.args startswith cos-write.py and (in_macro and (sf.proc.exe=/usr/bin/python or sf.proc.args startswith cos-write.py))) - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_lt.yaml b/resources/policies/tests/unit_test_lt.yaml index 08afe618..50d38b43 100644 --- a/resources/policies/tests/unit_test_lt.yaml +++ b/resources/policies/tests/unit_test_lt.yaml @@ -1,6 +1,5 @@ - rule: Lt rule desc: unit test Lt rule condition: sf.container.name contains node and sf.type=FF and sf.proc.exe=/usr/bin/python and sf.ts<1548965940603299403 - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_macro.yaml b/resources/policies/tests/unit_test_macro.yaml index 9c99c282..a9400ed1 100644 --- a/resources/policies/tests/unit_test_macro.yaml +++ b/resources/policies/tests/unit_test_macro.yaml @@ -4,6 +4,5 @@ - rule: Simple rule to test if Python process desc: unit test macro rule condition: sf.container.name contains node and sf.state=CREATE and sf.type=PE and is_python - action: [alert] priority: low tags: [test] diff --git a/resources/policies/tests/unit_test_networkflow.yaml b/resources/policies/tests/unit_test_networkflow.yaml index 0eadbf1b..d4258a22 100644 --- a/resources/policies/tests/unit_test_networkflow.yaml +++ b/resources/policies/tests/unit_test_networkflow.yaml @@ -3,6 +3,5 @@ condition: sf.container.name contains node and sf.type=NF and sf.proc.exe contains python - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_pmatch.yaml b/resources/policies/tests/unit_test_pmatch.yaml index 7ab4b425..8169ae0c 100644 --- a/resources/policies/tests/unit_test_pmatch.yaml +++ b/resources/policies/tests/unit_test_pmatch.yaml @@ -7,6 +7,5 @@ - rule: Pmatch rule desc: Unit test Pmatch rule condition: sf.container.name contains node and sf.type=PE and pmatch_macro and sf.proc.args startswith cos-write.py - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_sfmap.yaml b/resources/policies/tests/unit_test_sfmap.yaml index aa2009de..e77ef89f 100644 --- a/resources/policies/tests/unit_test_sfmap.yaml +++ b/resources/policies/tests/unit_test_sfmap.yaml @@ -1,6 +1,5 @@ - rule: Sf map rule desc: unit test Sf map rule condition: sf.proc.name=python - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_simple.yaml b/resources/policies/tests/unit_test_simple.yaml index 1afa2f13..777b7dd3 100644 --- a/resources/policies/tests/unit_test_simple.yaml +++ b/resources/policies/tests/unit_test_simple.yaml @@ -1,41 +1,35 @@ - rule: Simple rule 1 desc: unit test rule condition: sf.container.name contains node and sf.type=PE and sf.proc.exe contains python - action: [alert] priority: low tags: [test] - rule: Simple rule 2 desc: unit test rule condition: (sf.container.name contains node and sf.type=PE and sf.proc.exe contains python) - action: [alert] priority: low tags: [test] - rule: Simple rule 3 desc: unit test rule condition: sf.container.name contains node and (sf.type=PE and sf.proc.exe contains python) - action: [alert] priority: low tags: [test] - rule: Simple rule 4 desc: unit test rule condition: sf.container.name contains node and sf.type=PE and sf.proc.exe = /usr/bin/python - action: [alert] priority: low tags: [test] - rule: Simple rule 5 desc: unit test rule condition: (sf.container.name contains node and sf.type=PE and sf.proc.exe = /usr/bin/python) - action: [alert] priority: low tags: [test] - rule: Simple rule 6 desc: unit test rule condition: sf.container.name contains node and (sf.type=PE and proc.exe = /usr/bin/python) - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_startswith.yaml b/resources/policies/tests/unit_test_startswith.yaml index 44726c50..8d0a8ed3 100644 --- a/resources/policies/tests/unit_test_startswith.yaml +++ b/resources/policies/tests/unit_test_startswith.yaml @@ -1,6 +1,5 @@ - rule: Startswith rule desc: unit test Startswith rule condition: sf.container.name contains node and sf.proc.args startswith cos-write.py - action: [alert] priority: low - tags: [test] \ No newline at end of file + tags: [test] diff --git a/resources/policies/tests/unit_test_unary.yaml b/resources/policies/tests/unit_test_unary.yaml index 58e662ef..773c5e07 100644 --- a/resources/policies/tests/unit_test_unary.yaml +++ b/resources/policies/tests/unit_test_unary.yaml @@ -1,6 +1,5 @@ - rule: Exists rule desc: unit test unary operator condition: sf.container.name contains node and sf.pproc.name exists - action: [alert] priority: low tags: [test] diff --git a/resources/traces/1621959914 b/resources/traces/1621959914 new file mode 100644 index 00000000..151aa5f9 Binary files /dev/null and b/resources/traces/1621959914 differ diff --git a/resources/traces/k8s_large.sf b/resources/traces/k8s_large.sf new file mode 100644 index 00000000..bcdacb08 Binary files /dev/null and b/resources/traces/k8s_large.sf differ diff --git a/resources/traces/k8s_small.sf b/resources/traces/k8s_small.sf new file mode 100644 index 00000000..a95a6f41 Binary files /dev/null and b/resources/traces/k8s_small.sf differ diff --git a/resources/traces/shellshock.sf b/resources/traces/shellshock.sf new file mode 100644 index 00000000..eb66c369 Binary files /dev/null and b/resources/traces/shellshock.sf differ diff --git a/scripts/build/update-sfapis.sh b/scripts/build/update-sfapis.sh new file mode 100755 index 00000000..54d0132a --- /dev/null +++ b/scripts/build/update-sfapis.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Usage: update-sfapis.sh [tag|branch] +DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +for gomod in $(find $DIR/../.. -name go.mod); do + match=$(grep -w github.com/sysflow-telemetry/sf-apis/go $gomod) + if [ -n "$match" ]; then + echo "Updating $gomod" + CDIR=$(pwd) + cd $(dirname $gomod) + go get -u github.com/sysflow-telemetry/sf-apis/go@$1 + go mod tidy + cd $CDIR + fi +done \ No newline at end of file diff --git a/scripts/cpack/CPackConfig.cmake b/scripts/cpack/CPackConfig.cmake new file mode 100644 index 00000000..f7e3b955 --- /dev/null +++ b/scripts/cpack/CPackConfig.cmake @@ -0,0 +1,66 @@ +set(CPACK_PACKAGE_NAME "sfprocessor") +set(CPACK_PACKAGE_CONTACT "sysflow.io") +set(CPACK_PACKAGE_VENDOR "SysFlow") +set(CPACK_PACKAGE_DESCRIPTION "The SysFlow Processor implements a pluggable stream-processing pipeline and contains a built-in policy engine that evaluates rules on the ingested SysFlow stream") +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "SysFlow stream processing agent") +set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_LIST_DIR}/build/LICENSE.md") +set(CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_LIST_DIR}/build/README.md") +set(CPACK_STRIP_FILES "ON") +set(CPACK_PACKAGE_RELOCATABLE "OFF") + +set(CPACK_PACKAGE_VERSION "$ENV{SYSFLOW_VERSION}") +if(NOT CPACK_PACKAGE_VERSION) + set(CPACK_PACKAGE_VERSION "0.0.0") +else() + # Remove the starting "v" in case there is one + string(REGEX REPLACE "^v(.*)" "\\1" CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION}") + + # Remove any release suffixes in case there is one + # string(REGEX REPLACE "-.*" "" CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION}") +endif() +# Parse version into its major, minor, patch components +string(REGEX MATCH "^(0|[1-9][0-9]*)" CPACK_PACKAGE_VERSION_MAJOR "${CPACK_PACKAGE_VERSION}") +string(REGEX REPLACE "^(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\..*" "\\2" CPACK_PACKAGE_VERSION_MINOR "${CPACK_PACKAGE_VERSION}") +string(REGEX REPLACE "^(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*).*" "\\3" CPACK_PACKAGE_VERSION_PATCH "${CPACK_PACKAGE_VERSION}") + +if(NOT CPACK_GENERATOR) + set(CPACK_GENERATOR DEB RPM TGZ) +endif() + +message(STATUS "Packaging SysFlow ${CPACK_PACKAGE_VERSION}") +message(STATUS "Using package generators: ${CPACK_GENERATOR}") +message(STATUS "Package architecture: ${CMAKE_SYSTEM_PROCESSOR}") + +# DEB +set(CPACK_DEBIAN_PACKAGE_SECTION "utils") +if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64") + set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "amd64") +endif() +if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64") + set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "arm64") +endif() +set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/sysflow-telemetry/sf-processor") + +# RPM +set(CPACK_RPM_PACKAGE_LICENSE "Apache v2.0") +set(CPACK_RPM_PACKAGE_ARCHITECTURE, "${CMAKE_SYSTEM_PROCESSOR}") +set(CPACK_RPM_PACKAGE_URL "https://github.com/sysflow-telemetry/sf-processor") +set(CPACK_RPM_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION}") +set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION + /usr/src + /usr/share/man + /usr/share/man/man8 + /etc + /etc/sysflow + /usr + /usr/bin + /usr/share + /usr/lib + /usr/lib/systemd + /usr/lib/systemd/system) +set(CPACK_RPM_PACKAGE_RELOCATABLE "OFF") + +# Contents +set(CPACK_PACKAGE_FILE_NAME ${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CMAKE_SYSTEM_PROCESSOR}) +set(CPACK_INSTALLED_DIRECTORIES "${CMAKE_CURRENT_LIST_DIR}/build/bin" "/usr/bin" "${CMAKE_CURRENT_LIST_DIR}/build/resources" "/etc/sysflow" "${CMAKE_CURRENT_LIST_DIR}/build/service" "/usr/lib/systemd/system") + diff --git a/scripts/cpack/clean.sh b/scripts/cpack/clean.sh new file mode 100755 index 00000000..1d35bd03 --- /dev/null +++ b/scripts/cpack/clean.sh @@ -0,0 +1,4 @@ +#!/bin/sh +set -e +rm -rf build _CPack_Packages sfprocessor* + diff --git a/scripts/cpack/prepackage.sh b/scripts/cpack/prepackage.sh new file mode 100755 index 00000000..1621f6c5 --- /dev/null +++ b/scripts/cpack/prepackage.sh @@ -0,0 +1,14 @@ +#!/bin/sh +set -e + +DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $DIR && rm -rf build && mkdir -p build +cp -a $DIR/../../bin $DIR/build/bin +cp -a $DIR/../service/systemd $DIR/build/service +mkdir -p $DIR/build/resources +cp -a $DIR/../../resources/pipelines $DIR/build/resources/. +cp -a $DIR/../../resources/policies $DIR/build/resources/. +cp $DIR/../../LICENSE.md $DIR/build/. +cp $DIR/../../README.md $DIR/build/. +cd $DIR + diff --git a/scripts/service/systemd/sysflow-processor.service b/scripts/service/systemd/sysflow-processor.service new file mode 100644 index 00000000..635403ff --- /dev/null +++ b/scripts/service/systemd/sysflow-processor.service @@ -0,0 +1,26 @@ +[Unit] +Description=SysFlow cloud-native system telemetry (processor) +Documentation=https://sysflow.readthedocs.io +PartOf=sysflow.service +After=sysflow.service + +[Service] +Type=simple +User=root +EnvironmentFile=/etc/sysflow/conf/sysflow.env +PassEnvironment=CONFIG_PATH PLUGIN_DIR SOCKET +ExecStart=/usr/bin/sfprocessor -log=error -driver=socket -config=${CONFIG_PATH} -plugdir=${PLUGIN_DIR} ${SOCKET} +UMask=0077 +TimeoutSec=30 +RestartSec=15s +Restart=on-failure +PrivateTmp=true +NoNewPrivileges=yes +ProtectHome=read-only +ProtectSystem=full +ProtectKernelTunables=true +RestrictAddressFamilies=~AF_PACKET +KillSignal=SIGKILL + +[Install] +WantedBy=sysflow.service diff --git a/scripts/tests/benchmark-rules.sh b/scripts/tests/benchmark-rules.sh new file mode 100755 index 00000000..3f8fbc86 --- /dev/null +++ b/scripts/tests/benchmark-rules.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +tm=timeout +if ! command -v $tm &> /dev/null +then + tm=gtimeout +fi + +DURATION=$1 +CONFIG=$2 +RULES=$3 +TRACES=$4 +OUTDIR=$5 + +mkdir -p $OUTDIR + +n=0 +while [ "$n" -lt "$RULES" ]; do + export POLICYENGINE_BENCH_RULEINDEX=$n + echo "Benchmarking rule at index $POLICYENGINE_BENCH_RULEINDEX" + ( $tm $DURATION ../../driver/sfprocessor -perflog -log=quiet -config=$CONFIG -driver=file $TRACES > $OUTDIR/rate_rule_$POLICYENGINE_BENCH_RULEINDEX.out ) & + sleep 90 + n=$(($n + 1)) +done \ No newline at end of file diff --git a/scripts/tests/benchmark.sh b/scripts/tests/benchmark.sh new file mode 100755 index 00000000..8f4204c0 --- /dev/null +++ b/scripts/tests/benchmark.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +tm=timeout +if ! command -v $tm &> /dev/null +then + tm=gtimeout +fi + +DURATION=$1 +CONFIG=$2 +TRACES=$3 +OUTDIR=$4 + +mkdir -p $OUTDIR + +for n in {1..10} +do + export POLICYENGINE_BENCH_RULESETSIZE=$(( $n*5 )) + echo "Benchmarking with $POLICYENGINE_BENCH_RULESETSIZE rules" + ( $tm $DURATION ../../driver/sfprocessor -perflog -log=quiet -config=$CONFIG -driver=file $TRACES > $OUTDIR/rate_$POLICYENGINE_BENCH_RULESETSIZE.out ) & + sleep 90 +done \ No newline at end of file diff --git a/scripts/tests/pipeline.falco.bench.json b/scripts/tests/pipeline.falco.bench.json new file mode 100644 index 00000000..f96db7b9 --- /dev/null +++ b/scripts/tests/pipeline.falco.bench.json @@ -0,0 +1,24 @@ +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "policyengine", + "in": "flat flattenerchan", + "out": "evt eventchan", + "policies": "../../resources/policies/runtimeintegrity/ttps.yaml", + "mode": "alert", + "concurrency": 10 + }, + { + "processor": "exporter", + "in": "evt eventchan", + "export": "null", + "format": "json" + } + ] +} diff --git a/scripts/tests/pipeline.sigma.bench.json b/scripts/tests/pipeline.sigma.bench.json new file mode 100644 index 00000000..e90e3086 --- /dev/null +++ b/scripts/tests/pipeline.sigma.bench.json @@ -0,0 +1,26 @@ +{ + "pipeline":[ + { + "processor": "sysflowreader", + "handler": "flattener", + "in": "sysflow sysflowchan", + "out": "flat flattenerchan" + }, + { + "processor": "policyengine", + "in": "flat flattenerchan", + "out": "evt eventchan", + "language": "sigma", + "policies": "../../resources/policies/sigma/rules/linux", + "config": "../../resources/policies/sigma/config/sysflow.yml", + "mode": "alert", + "concurrency": 10 + }, + { + "processor": "exporter", + "in": "evt eventchan", + "export": "null", + "format": "json" + } + ] +} diff --git a/scripts/tests/runbench.sh b/scripts/tests/runbench.sh new file mode 100755 index 00000000..e6e1ca05 --- /dev/null +++ b/scripts/tests/runbench.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +./benchmark-rules.sh 90 ./pipeline.falco.bench.json 52 ../../../datasets/k8s/wcm_drill_3_5 falco_rule_index +./benchmark-rules.sh 90 ./pipeline.sigma.bench.json 136 ../../../datasets/k8s/wcm_drill_3_5 sigma_rule_index +./benchmark.sh 120 ./pipeline.falco.bench.json ../../../datasets/k8s/wcm_drill_3_5 falco_ruleset +./benchmark.sh 120 ./pipeline.sigma.bench.json ../../../datasets/k8s/wcm_drill_3_5 sigma_ruleset \ No newline at end of file diff --git a/scripts/tests/summarize.sh b/scripts/tests/summarize.sh new file mode 100755 index 00000000..60dbda51 --- /dev/null +++ b/scripts/tests/summarize.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +tm=timeout +if ! command -v $tm &> /dev/null +then + tm=gtimeout +fi + +DIR=${1:-"."} +VALUES="" +LABELS="" + +for f in `ls -v $DIR` +do + n=$(v=${f%.*} && printf "%s\n" "${v##*_}") + AVG=$(cat $DIR/$f | grep "Policy engine rate" | awk 'NR>2 {print $NF}' | awk '{ total += $1; count++ } END { print total/(1000*count) }') + VALUES="$VALUES $AVG" + LABELS="$LABELS $(( $n ))" +done + +echo $VALUES +echo $LABELS