diff --git a/commands/package.go b/commands/package.go index f97d2b7f..d59bf0de 100644 --- a/commands/package.go +++ b/commands/package.go @@ -2,40 +2,39 @@ package commands import ( "bufio" + "context" "encoding/json" "fmt" "html" "io" "path/filepath" - "github.com/docker/model-cli/commands/completion" - "github.com/docker/model-cli/desktop" "github.com/docker/model-distribution/builder" "github.com/docker/model-distribution/registry" + "github.com/docker/model-distribution/tarball" + "github.com/docker/model-distribution/types" + "github.com/google/go-containerregistry/pkg/name" "github.com/spf13/cobra" + + "github.com/docker/model-cli/commands/completion" + "github.com/docker/model-cli/desktop" ) func newPackagedCmd() *cobra.Command { var opts packageOptions c := &cobra.Command{ - Use: "package --gguf [--license ...] [--context-size ] --push TARGET", - Short: "Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry", + Use: "package --gguf [--license ...] [--context-size ] [--push] MODEL", + Short: "Package a GGUF file into a Docker model OCI artifact, with optional licenses. The package is sent to the model-runner, unless --push is specified", Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf( "'docker model package' requires 1 argument.\n\n"+ - "Usage: %s\n\n"+ + "Usage: docker model %s\n\n"+ "See 'docker model package --help' for more information", cmd.Use, ) } - if opts.push != true { - return fmt.Errorf( - "This version of 'docker model package' requires --push and will write the resulting package directly to the registry.\n\n" + - "See 'docker model package --help' for more information", - ) - } if opts.ggufPath == "" { return fmt.Errorf( "GGUF path is required.\n\n" + @@ -62,7 +61,8 @@ func newPackagedCmd() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { - if err := packageModel(cmd, args[0], opts); err != nil { + opts.tag = args[0] + if err := packageModel(cmd, opts); err != nil { cmd.PrintErrln("Failed to package model") return fmt.Errorf("package model: %w", err) } @@ -73,7 +73,7 @@ func newPackagedCmd() *cobra.Command { c.Flags().StringVar(&opts.ggufPath, "gguf", "", "absolute path to gguf file (required)") c.Flags().StringArrayVarP(&opts.licensePaths, "license", "l", nil, "absolute path to a license file") - c.Flags().BoolVar(&opts.push, "push", false, "push to registry (required)") + c.Flags().BoolVar(&opts.push, "push", false, "push to registry (if not set, the model is loaded into the Model Runner content store.") c.Flags().Uint64Var(&opts.contextSize, "context-size", 0, "context size in tokens") return c } @@ -83,13 +83,21 @@ type packageOptions struct { licensePaths []string push bool contextSize uint64 + tag string } -func packageModel(cmd *cobra.Command, tag string, opts packageOptions) error { - cmd.PrintErrf("Packaging model %q\n", tag) - target, err := registry.NewClient( - registry.WithUserAgent("docker-model-cli/" + desktop.Version), - ).NewTarget(tag) +func packageModel(cmd *cobra.Command, opts packageOptions) error { + var ( + target builder.Target + err error + ) + if opts.push { + target, err = registry.NewClient( + registry.WithUserAgent("docker-model-cli/" + desktop.Version), + ).NewTarget(opts.tag) + } else { + target, err = newModelRunnerTarget(desktopClient, opts.tag) + } if err != nil { return err } @@ -116,8 +124,11 @@ func packageModel(cmd *cobra.Command, tag string, opts packageOptions) error { } } - // Write the artifact to the registry - cmd.PrintErrln("Pushing to registry...") + if opts.push { + cmd.PrintErrln("Pushing model to registry...") + } else { + cmd.PrintErrln("Loading model to Model Runner...") + } pr, pw := io.Pipe() done := make(chan error, 1) go func() { @@ -147,8 +158,70 @@ func packageModel(cmd *cobra.Command, tag string, opts packageOptions) error { cmd.PrintErrln("Error streaming progress:", err) } if err := <-done; err != nil { - return fmt.Errorf("push: %w", err) + if opts.push { + return fmt.Errorf("failed to save packaged model: %w", err) + } + return fmt.Errorf("failed to load packaged model: %w", err) + } + + if opts.push { + cmd.PrintErrln("Model pushed successfully") + } else { + cmd.PrintErrln("Model loaded successfully") + } + return nil +} + +// modelRunnerTarget loads model to Docker Model Runner via models/load endpoint +type modelRunnerTarget struct { + client *desktop.Client + tag name.Tag +} + +func newModelRunnerTarget(client *desktop.Client, tag string) (*modelRunnerTarget, error) { + target := &modelRunnerTarget{ + client: client, + } + if tag != "" { + var err error + target.tag, err = name.NewTag(tag) + if err != nil { + return nil, fmt.Errorf("invalid tag: %w", err) + } + } + return target, nil +} + +func (t *modelRunnerTarget) Write(ctx context.Context, mdl types.ModelArtifact, progressWriter io.Writer) error { + pr, pw := io.Pipe() + errCh := make(chan error, 1) + go func() { + defer pw.Close() + target, err := tarball.NewTarget(pw) + if err != nil { + errCh <- err + return + } + errCh <- target.Write(ctx, mdl, progressWriter) + }() + + loadErr := t.client.LoadModel(ctx, pr) + writeErr := <-errCh + + if loadErr != nil { + return fmt.Errorf("loading model archive: %w", loadErr) + } + if writeErr != nil { + return fmt.Errorf("writing model archive: %w", writeErr) + } + id, err := mdl.ID() + if err != nil { + return fmt.Errorf("get model ID: %w", err) + } + if t.tag.String() != "" { + if err := desktopClient.Tag(id, parseRepo(t.tag), t.tag.TagStr()); err != nil { + return fmt.Errorf("tag model: %w", err) + } } - cmd.PrintErrln("Model pushed successfully") return nil } diff --git a/desktop/desktop.go b/desktop/desktop.go index f3f1dc83..54699838 100644 --- a/desktop/desktop.go +++ b/desktop/desktop.go @@ -3,6 +3,7 @@ package desktop import ( "bufio" "bytes" + "context" "encoding/json" "fmt" "html" @@ -704,3 +705,25 @@ func (c *Client) Tag(source, targetRepo, targetTag string) error { return nil } + +func (c *Client) LoadModel(ctx context.Context, r io.Reader) error { + loadPath := fmt.Sprintf("%s/load", inference.ModelsPrefix) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.modelRunner.URL(loadPath), r) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("Content-Type", "application/x-tar") + req.Header.Set("User-Agent", "docker-model-cli/"+Version) + + resp, err := c.modelRunner.Client().Do(req) + if err != nil { + return c.handleQueryError(err, loadPath) + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + return fmt.Errorf("load failed with status %s: %s", resp.Status, string(body)) + } + return nil +} diff --git a/docs/reference/docker_model_package.yaml b/docs/reference/docker_model_package.yaml index 532909a6..e00078e1 100644 --- a/docs/reference/docker_model_package.yaml +++ b/docs/reference/docker_model_package.yaml @@ -1,9 +1,9 @@ command: docker model package short: | - Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry + Package a GGUF file into a Docker model OCI artifact, with optional licenses. The package is sent to the model-runner, unless --push is specified long: | - Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry -usage: docker model package --gguf [--license ...] [--context-size ] --push TARGET + Package a GGUF file into a Docker model OCI artifact, with optional licenses. The package is sent to the model-runner, unless --push is specified +usage: docker model package --gguf [--license ...] [--context-size ] [--push] MODEL pname: docker model plink: docker_model.yaml options: @@ -40,7 +40,8 @@ options: - option: push value_type: bool default_value: "false" - description: push to registry (required) + description: | + push to registry (if not set, the model is loaded into the Model Runner content store. deprecated: false hidden: false experimental: false diff --git a/docs/reference/model.md b/docs/reference/model.md index f79e2530..144cf7a0 100644 --- a/docs/reference/model.md +++ b/docs/reference/model.md @@ -5,24 +5,24 @@ Docker Model Runner (EXPERIMENTAL) ### Subcommands -| Name | Description | -|:------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------| -| [`df`](model_df.md) | Show Docker Model Runner disk usage | -| [`inspect`](model_inspect.md) | Display detailed information on one model | -| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) | -| [`list`](model_list.md) | List the models pulled to your local environment | -| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs | -| [`package`](model_package.md) | Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry | -| [`ps`](model_ps.md) | List running models | -| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment | -| [`push`](model_push.md) | Push a model to Docker Hub | -| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub | -| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode | -| [`status`](model_status.md) | Check if the Docker Model Runner is running | -| [`tag`](model_tag.md) | Tag a model | -| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner | -| [`unload`](model_unload.md) | Unload running models | -| [`version`](model_version.md) | Show the Docker Model Runner version | +| Name | Description | +|:------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------| +| [`df`](model_df.md) | Show Docker Model Runner disk usage | +| [`inspect`](model_inspect.md) | Display detailed information on one model | +| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) | +| [`list`](model_list.md) | List the models pulled to your local environment | +| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs | +| [`package`](model_package.md) | Package a GGUF file into a Docker model OCI artifact, with optional licenses. The package is sent to the model-runner, unless --push is specified | +| [`ps`](model_ps.md) | List running models | +| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment | +| [`push`](model_push.md) | Push a model to Docker Hub | +| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub | +| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode | +| [`status`](model_status.md) | Check if the Docker Model Runner is running | +| [`tag`](model_tag.md) | Tag a model | +| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner | +| [`unload`](model_unload.md) | Unload running models | +| [`version`](model_version.md) | Show the Docker Model Runner version | diff --git a/docs/reference/model_package.md b/docs/reference/model_package.md index 615535fd..e08c7762 100644 --- a/docs/reference/model_package.md +++ b/docs/reference/model_package.md @@ -1,16 +1,16 @@ # docker model package -Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry +Package a GGUF file into a Docker model OCI artifact, with optional licenses. The package is sent to the model-runner, unless --push is specified ### Options -| Name | Type | Default | Description | -|:------------------|:--------------|:--------|:--------------------------------------| -| `--context-size` | `uint64` | `0` | context size in tokens | -| `--gguf` | `string` | | absolute path to gguf file (required) | -| `-l`, `--license` | `stringArray` | | absolute path to a license file | -| `--push` | `bool` | | push to registry (required) | +| Name | Type | Default | Description | +|:------------------|:--------------|:--------|:---------------------------------------------------------------------------------------| +| `--context-size` | `uint64` | `0` | context size in tokens | +| `--gguf` | `string` | | absolute path to gguf file (required) | +| `-l`, `--license` | `stringArray` | | absolute path to a license file | +| `--push` | `bool` | | push to registry (if not set, the model is loaded into the Model Runner content store. | diff --git a/go.mod b/go.mod index 7fba2fa7..3aad6127 100644 --- a/go.mod +++ b/go.mod @@ -11,8 +11,8 @@ require ( github.com/docker/docker v28.2.2+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 - github.com/docker/model-distribution v0.0.0-20250710123110-a633223e127e - github.com/docker/model-runner v0.0.0-20250711130825-8907b3ddf82e + github.com/docker/model-distribution v0.0.0-20250724114133-a11d745e582c + github.com/docker/model-runner v0.0.0-20250724122432-ecfa5e7e6807 github.com/google/go-containerregistry v0.20.6 github.com/mattn/go-isatty v0.0.17 github.com/nxadm/tail v1.4.8 @@ -43,6 +43,8 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/elastic/go-sysinfo v1.15.3 // indirect + github.com/elastic/go-windows v1.0.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect @@ -77,6 +79,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect diff --git a/go.sum b/go.sum index 0d5c0dc2..7f52ec92 100644 --- a/go.sum +++ b/go.sum @@ -78,11 +78,15 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/model-distribution v0.0.0-20250710123110-a633223e127e h1:qBkjP4A20f3RXvtstitIPiStQ4p+bK8xcjosrXLBQZ0= -github.com/docker/model-distribution v0.0.0-20250710123110-a633223e127e/go.mod h1:dThpO9JoG5Px3i+rTluAeZcqLGw8C0qepuEL4gL2o/c= -github.com/docker/model-runner v0.0.0-20250711130825-8907b3ddf82e h1:oafd84kAFBgv/DAYgtXGLkC1KmRpDN+7G3be5+2+hA0= -github.com/docker/model-runner v0.0.0-20250711130825-8907b3ddf82e/go.mod h1:QmSoUNAbqolMY1Aq9DaC+sR/M/OPga0oCT/DBA1z9ow= +github.com/docker/model-distribution v0.0.0-20250724114133-a11d745e582c h1:w9MekYamXmWLe9ZWXWgNXJ7BLDDemXwB8WcF7wzHF5Q= +github.com/docker/model-distribution v0.0.0-20250724114133-a11d745e582c/go.mod h1:dThpO9JoG5Px3i+rTluAeZcqLGw8C0qepuEL4gL2o/c= +github.com/docker/model-runner v0.0.0-20250724122432-ecfa5e7e6807 h1:02vImD8wqUDv6VJ2cBLbqzbjn17IMYEi4ileCEjXMQ8= +github.com/docker/model-runner v0.0.0-20250724122432-ecfa5e7e6807/go.mod h1:rCzRjRXJ42E8JVIA69E9hErJVV5mnUpWdJ2POsktfRs= github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/elastic/go-sysinfo v1.15.3 h1:W+RnmhKFkqPTCRoFq2VCTmsT4p/fwpo+3gKNQsn1XU0= +github.com/elastic/go-sysinfo v1.15.3/go.mod h1:K/cNrqYTDrSoMh2oDkYEMS2+a72GRxMvNP+GC+vRIlo= +github.com/elastic/go-windows v1.0.2 h1:yoLLsAsV5cfg9FLhZ9EXZ2n2sQFKeDYrHenkcivY4vI= +github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= diff --git a/vendor/github.com/docker/model-distribution/distribution/client.go b/vendor/github.com/docker/model-distribution/distribution/client.go index a601888e..7fba7fa1 100644 --- a/vendor/github.com/docker/model-distribution/distribution/client.go +++ b/vendor/github.com/docker/model-distribution/distribution/client.go @@ -2,6 +2,7 @@ package distribution import ( "context" + "errors" "fmt" "io" "net/http" @@ -12,6 +13,7 @@ import ( "github.com/docker/model-distribution/internal/progress" "github.com/docker/model-distribution/internal/store" "github.com/docker/model-distribution/registry" + "github.com/docker/model-distribution/tarball" "github.com/docker/model-distribution/types" ) @@ -179,7 +181,7 @@ func (c *Client) PullModel(ctx context.Context, reference string, progressWriter // Ensure model has the correct tag if err := c.store.AddTags(remoteDigest.String(), []string{reference}); err != nil { - return fmt.Errorf("tagging modle: %w", err) + return fmt.Errorf("tagging model: %w", err) } return nil } else { @@ -206,6 +208,49 @@ func (c *Client) PullModel(ctx context.Context, reference string, progressWriter return nil } +// LoadModel loads the model from the reader to the store +func (c *Client) LoadModel(r io.Reader, progressWriter io.Writer) (string, error) { + c.log.Infoln("Starting model load") + + tr := tarball.NewReader(r) + for { + diffID, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + c.log.Infof("Model load interrupted (likely cancelled): %v", err) + return "", fmt.Errorf("model load interrupted: %w", err) + } + return "", fmt.Errorf("reading blob from stream: %w", err) + } + c.log.Infoln("Loading blob:", diffID) + if err := c.store.WriteBlob(diffID, tr); err != nil { + return "", fmt.Errorf("writing blob: %w", err) + } + c.log.Infoln("Loaded blob:", diffID) + } + + manifest, digest, err := tr.Manifest() + if err != nil { + return "", fmt.Errorf("read manifest: %w", err) + } + c.log.Infoln("Loading manifest:", digest.String()) + if err := c.store.WriteManifest(digest, manifest); err != nil { + return "", fmt.Errorf("write manifest: %w", err) + } + c.log.Infoln("Loaded model with ID:", digest.String()) + + if err := progress.WriteSuccess(progressWriter, "Model loaded successfully"); err != nil { + c.log.Warnf("Failed to write success message: %v", err) + // If we fail to write success message, don't try again + progressWriter = nil + } + + return digest.String(), nil +} + // ListModels returns all available models func (c *Client) ListModels() ([]types.Model, error) { c.log.Infoln("Listing available models") diff --git a/vendor/github.com/docker/model-distribution/internal/progress/reader.go b/vendor/github.com/docker/model-distribution/internal/progress/reader.go new file mode 100644 index 00000000..476dcde6 --- /dev/null +++ b/vendor/github.com/docker/model-distribution/internal/progress/reader.go @@ -0,0 +1,39 @@ +package progress + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/v1" +) + +// Reader wraps an io.Reader to track reading progress +type Reader struct { + Reader io.Reader + ProgressChan chan<- v1.Update + Total int64 +} + +// NewReader returns a reader that reports progress to the given channel while reading. +func NewReader(r io.Reader, updates chan<- v1.Update) io.Reader { + if updates == nil { + return r + } + return &Reader{ + Reader: r, + ProgressChan: updates, + } +} + +func (pr *Reader) Read(p []byte) (int, error) { + n, err := pr.Reader.Read(p) + pr.Total += int64(n) + if err == io.EOF { + pr.ProgressChan <- v1.Update{Complete: pr.Total} + } else if n > 0 { + select { + case pr.ProgressChan <- v1.Update{Complete: pr.Total}: + default: // if the progress channel is full, it skips sending rather than blocking the Read() call. + } + } + return n, err +} diff --git a/vendor/github.com/docker/model-distribution/internal/store/blobs.go b/vendor/github.com/docker/model-distribution/internal/store/blobs.go index 3a75d4b9..81cb0348 100644 --- a/vendor/github.com/docker/model-distribution/internal/store/blobs.go +++ b/vendor/github.com/docker/model-distribution/internal/store/blobs.go @@ -6,6 +6,8 @@ import ( "os" "path/filepath" + "github.com/docker/model-distribution/internal/progress" + v1 "github.com/google/go-containerregistry/pkg/v1" ) @@ -28,9 +30,8 @@ type blob interface { Uncompressed() (io.ReadCloser, error) } -// writeBlob write the blob to the store, reporting progress to the given channel. -// If the blob is already in the store, it is a no-op. -func (s *LocalStore) writeBlob(layer blob, progress chan<- v1.Update) error { +// writeLayer write the layer blob to the store +func (s *LocalStore) writeLayer(layer blob, updates chan<- v1.Update) error { hash, err := layer.DiffID() if err != nil { return fmt.Errorf("get file hash: %w", err) @@ -40,14 +41,24 @@ func (s *LocalStore) writeBlob(layer blob, progress chan<- v1.Update) error { return nil } - path := s.blobPath(hash) lr, err := layer.Uncompressed() if err != nil { return fmt.Errorf("get blob contents: %w", err) } defer lr.Close() - r := withProgress(lr, progress) + r := progress.NewReader(lr, updates) + + return s.WriteBlob(hash, r) +} + +// WriteBlob writes the blob to the store, reporting progress to the given channel. +// If the blob is already in the store, it is a no-op and the blob is not consumed from the reader. +func (s *LocalStore) WriteBlob(diffID v1.Hash, r io.Reader) error { + if s.hasBlob(diffID) { + return nil + } + path := s.blobPath(diffID) f, err := createFile(incompletePath(path)) if err != nil { return fmt.Errorf("create blob file: %w", err) @@ -56,7 +67,7 @@ func (s *LocalStore) writeBlob(layer blob, progress chan<- v1.Update) error { defer f.Close() if _, err := io.Copy(f, r); err != nil { - return fmt.Errorf("copy blob %q to store: %w", hash.String(), err) + return fmt.Errorf("copy blob %q to store: %w", diffID.String(), err) } f.Close() // Rename will fail on Windows if the file is still open. @@ -86,17 +97,6 @@ func createFile(path string) (*os.File, error) { return os.Create(path) } -// withProgress returns a reader that reports progress to the given channel. -func withProgress(r io.Reader, progress chan<- v1.Update) io.Reader { - if progress == nil { - return r - } - return &ProgressReader{ - Reader: r, - ProgressChan: progress, - } -} - // incompletePath returns the path to the incomplete file for the given path. func incompletePath(path string) string { return path + ".incomplete" diff --git a/vendor/github.com/docker/model-distribution/internal/store/index.go b/vendor/github.com/docker/model-distribution/internal/store/index.go index 6cff7c98..000d7d71 100644 --- a/vendor/github.com/docker/model-distribution/internal/store/index.go +++ b/vendor/github.com/docker/model-distribution/internal/store/index.go @@ -8,7 +8,6 @@ import ( "path/filepath" "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" ) // Index represents the index of all models in the store @@ -137,40 +136,10 @@ type IndexEntry struct { ID string `json:"id"` // Tags are the list of tags associated with the model. Tags []string `json:"tags"` - // Files are the GGUF files associated with the model. + // Files are the files associated with the model. Files []string `json:"files"` } -func newEntry(image v1.Image) (IndexEntry, error) { - digest, err := image.Digest() - if err != nil { - return IndexEntry{}, fmt.Errorf("getting digest: %w", err) - } - - layers, err := image.Layers() - if err != nil { - return IndexEntry{}, fmt.Errorf("getting layers: %w", err) - } - files := make([]string, len(layers)+1) - for i, layer := range layers { - diffID, err := layer.DiffID() - if err != nil { - return IndexEntry{}, fmt.Errorf("getting diffID: %w", err) - } - files[i] = diffID.String() - } - cfgName, err := image.ConfigName() - if err != nil { - return IndexEntry{}, fmt.Errorf("getting config name: %w", err) - } - files[len(layers)] = cfgName.String() - - return IndexEntry{ - ID: digest.String(), - Files: files, - }, nil -} - func (e IndexEntry) HasTag(tag string) bool { ref, err := name.NewTag(tag) if err != nil { diff --git a/vendor/github.com/docker/model-distribution/internal/store/manifests.go b/vendor/github.com/docker/model-distribution/internal/store/manifests.go index b8b458fa..86f59ea9 100644 --- a/vendor/github.com/docker/model-distribution/internal/store/manifests.go +++ b/vendor/github.com/docker/model-distribution/internal/store/manifests.go @@ -1,6 +1,8 @@ package store import ( + "bytes" + "errors" "fmt" "os" "path/filepath" @@ -17,17 +19,41 @@ func (s *LocalStore) manifestPath(hash v1.Hash) string { return filepath.Join(s.rootPath, manifestsDir, hash.Algorithm, hash.Hex) } -// writeManifest writes the model's manifest to the store -func (s *LocalStore) writeManifest(mdl v1.Image) error { - digest, err := mdl.Digest() +// WriteManifest writes the model's manifest to the store +func (s *LocalStore) WriteManifest(hash v1.Hash, raw []byte) error { + manifest, err := v1.ParseManifest(bytes.NewReader(raw)) if err != nil { - return fmt.Errorf("get digest: %w", err) + return fmt.Errorf("parse manifest: %w", err) } - rm, err := mdl.RawManifest() + for _, layer := range manifest.Layers { + if !s.hasBlob(layer.Digest) { + return errors.New("missing blob %q for manifest - refusing to write unless all blobs exist") + } + } + if err := writeFile(s.manifestPath(hash), raw); err != nil { + return fmt.Errorf("write manifest: %w", err) + } + + // Add the manifest to the index + idx, err := s.readIndex() if err != nil { - return fmt.Errorf("get raw manifest: %w", err) + return fmt.Errorf("reading models: %w", err) + } + + return s.writeIndex(idx.Add(newEntryForManifest(hash, manifest))) +} + +func newEntryForManifest(digest v1.Hash, manifest *v1.Manifest) IndexEntry { + files := make([]string, len(manifest.Layers)+1) + for i := range manifest.Layers { + files[i] = manifest.Layers[i].Digest.String() + } + files[len(manifest.Layers)] = manifest.Config.Digest.String() + + return IndexEntry{ + ID: digest.String(), + Files: files, } - return writeFile(s.manifestPath(digest), rm) } // removeManifest removes the manifest file from the store diff --git a/vendor/github.com/docker/model-distribution/internal/store/store.go b/vendor/github.com/docker/model-distribution/internal/store/store.go index 233608b4..ac7bae64 100644 --- a/vendor/github.com/docker/model-distribution/internal/store/store.go +++ b/vendor/github.com/docker/model-distribution/internal/store/store.go @@ -6,8 +6,9 @@ import ( "os" "path/filepath" - "github.com/docker/model-distribution/internal/progress" v1 "github.com/google/go-containerregistry/pkg/v1" + + "github.com/docker/model-distribution/internal/progress" ) const ( @@ -219,7 +220,7 @@ func (s *LocalStore) Write(mdl v1.Image, tags []string, w io.Writer) error { progressChan = pr.Updates() } - err := s.writeBlob(layer, progressChan) + err := s.writeLayer(layer, progressChan) if progressChan != nil { close(progressChan) @@ -234,32 +235,21 @@ func (s *LocalStore) Write(mdl v1.Image, tags []string, w io.Writer) error { } // Write the manifest - if err := s.writeManifest(mdl); err != nil { - return fmt.Errorf("writing manifest: %w", err) - } - - // Add the model to the index - idx, err := s.readIndex() + digest, err := mdl.Digest() if err != nil { - return fmt.Errorf("reading models: %w", err) + return fmt.Errorf("get digest: %w", err) } - entry, err := newEntry(mdl) + rm, err := mdl.RawManifest() if err != nil { - return fmt.Errorf("creating index entry: %w", err) + return fmt.Errorf("get raw manifest: %w", err) } - - // Add the model tags - idx = idx.Add(entry) - for _, tag := range tags { - updatedIdx, err := idx.Tag(entry.ID, tag) - if err != nil { - fmt.Printf("Warning: failed to tag model %q with tag %q: %v\n", entry.ID, tag, err) - continue - } - idx = updatedIdx + if err := s.WriteManifest(digest, rm); err != nil { + return fmt.Errorf("write manifest: %w", err) } - - return s.writeIndex(idx) + if err := s.AddTags(digest.String(), tags); err != nil { + return fmt.Errorf("adding tags: %w", err) + } + return err } // Read reads a model from the store by reference (either tag or ID) @@ -282,24 +272,3 @@ func (s *LocalStore) Read(reference string) (*Model, error) { return nil, ErrModelNotFound } - -// ProgressReader wraps an io.Reader to track reading progress -type ProgressReader struct { - Reader io.Reader - ProgressChan chan<- v1.Update - Total int64 -} - -func (pr *ProgressReader) Read(p []byte) (int, error) { - n, err := pr.Reader.Read(p) - pr.Total += int64(n) - if err == io.EOF { - pr.ProgressChan <- v1.Update{Complete: pr.Total} - } else if n > 0 { - select { - case pr.ProgressChan <- v1.Update{Complete: pr.Total}: - default: // if the progress channel is full, it skips sending rather than blocking the Read() call. - } - } - return n, err -} diff --git a/vendor/github.com/docker/model-distribution/tarball/file.go b/vendor/github.com/docker/model-distribution/tarball/file.go new file mode 100644 index 00000000..05848f80 --- /dev/null +++ b/vendor/github.com/docker/model-distribution/tarball/file.go @@ -0,0 +1,36 @@ +package tarball + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/docker/model-distribution/types" +) + +// FileTarget writes an artifact tarball to a local file. +type FileTarget struct { + path string +} + +// NewFileTarget returns a *FileTarget for the given path. +func NewFileTarget(path string) *FileTarget { + return &FileTarget{ + path: path, + } +} + +// Write writes the given artifact to the target. +func (t *FileTarget) Write(ctx context.Context, mdl types.ModelArtifact, pw io.Writer) error { + f, err := os.Create(t.path) + if err != nil { + return fmt.Errorf("create file for archive: %w", err) + } + defer f.Close() + target, err := NewTarget(f) + if err != nil { + return fmt.Errorf("create target: %w", err) + } + return target.Write(ctx, mdl, pw) +} diff --git a/vendor/github.com/docker/model-distribution/tarball/reader.go b/vendor/github.com/docker/model-distribution/tarball/reader.go new file mode 100644 index 00000000..d06ecb47 --- /dev/null +++ b/vendor/github.com/docker/model-distribution/tarball/reader.go @@ -0,0 +1,93 @@ +package tarball + +import ( + "archive/tar" + "encoding/hex" + "errors" + "io" + "path/filepath" + "strings" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +type Reader struct { + tr *tar.Reader + rawManifest []byte + digest v1.Hash + done bool +} + +type Blob struct { + diffID v1.Hash + rc io.ReadCloser +} + +func (b Blob) DiffID() (v1.Hash, error) { + return b.diffID, nil +} + +func (b Blob) Uncompressed() (io.ReadCloser, error) { + return b.rc, nil +} + +func (r *Reader) Next() (v1.Hash, error) { + for { + hdr, err := r.tr.Next() + if err != nil { + if err == io.EOF { + r.done = true + } + return v1.Hash{}, err + } + //fi := hdr.FileInfo() + if !(hdr.Typeflag == tar.TypeReg) { + continue + } + if hdr.Name == "manifest.json" { + // save the manifest + hasher, err := v1.Hasher("sha256") + if err != nil { + return v1.Hash{}, err + } + rm, err := io.ReadAll(io.TeeReader(r.tr, hasher)) + if err != nil { + return v1.Hash{}, err + } + r.rawManifest = rm + r.digest = v1.Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + } + continue + } + parts := strings.Split(filepath.Clean(hdr.Name), "/") + if len(parts) != 3 || parts[0] != "blobs" && parts[0] != "manifests" { + continue + } + return v1.Hash{ + Algorithm: parts[1], + Hex: parts[2], + }, nil + } +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.tr.Read(p) +} + +func (r *Reader) Manifest() ([]byte, v1.Hash, error) { + if !r.done { + return nil, v1.Hash{}, errors.New("must read all blobs first before getting manifest") + } + if r.done && r.rawManifest == nil { + return nil, v1.Hash{}, errors.New("manifest not found") + } + return r.rawManifest, r.digest, nil +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + tr: tar.NewReader(r), + } +} diff --git a/vendor/github.com/docker/model-distribution/tarball/target.go b/vendor/github.com/docker/model-distribution/tarball/target.go new file mode 100644 index 00000000..c8eb48a2 --- /dev/null +++ b/vendor/github.com/docker/model-distribution/tarball/target.go @@ -0,0 +1,155 @@ +package tarball + +import ( + "archive/tar" + "context" + "fmt" + "io" + "path/filepath" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + + "github.com/docker/model-distribution/internal/progress" + "github.com/docker/model-distribution/types" +) + +// Target stores an artifact as a TAR archive +type Target struct { + reference name.Tag + writer io.Writer + dirs map[string]struct{} +} + +// NewTarget returns a *Target for the given writer +func NewTarget(w io.Writer) (*Target, error) { + return &Target{ + writer: w, + dirs: make(map[string]struct{}), + }, nil +} + +// Write writes the artifact in archive format to the configured io.Writer +func (t *Target) Write(ctx context.Context, mdl types.ModelArtifact, progressWriter io.Writer) error { + tw := tar.NewWriter(t.writer) + defer tw.Close() + + rm, err := mdl.RawManifest() + if err != nil { + return err + } + + if err := t.ensureDir("blobs", tw); err != nil { + return err + } + + ls, err := mdl.Layers() + if err != nil { + return fmt.Errorf("get layers: %w", err) + } + + layersSize := int64(0) + for _, layer := range ls { + size, err := layer.Size() + if err != nil { + return fmt.Errorf("get layer size: %w", err) + } + layersSize += size + } + + for _, layer := range ls { + if err := t.addLayer(layer, tw, progressWriter, layersSize); err != nil { + return fmt.Errorf("add layer entry: %w", err) + } + } + rcf, err := mdl.RawConfigFile() + if err != nil { + return err + } + cn, err := mdl.ConfigName() + if err != nil { + return err + } + if err = tw.WriteHeader(&tar.Header{ + Name: filepath.Join("blobs", cn.Algorithm, cn.Hex), + Mode: 0666, + Size: int64(len(rcf)), + }); err != nil { + return err + } + if _, err = tw.Write(rcf); err != nil { + return fmt.Errorf("write config blob contents: %w", err) + } + + if err := tw.WriteHeader(&tar.Header{ + Name: "manifest.json", + Size: int64(len(rm)), + Mode: 0666, + }); err != nil { + return fmt.Errorf("write manifest.json header: %w", err) + } + if _, err = tw.Write(rm); err != nil { + return fmt.Errorf("write manifest.json contents: %w", err) + } + + return nil +} + +func (t *Target) addLayer(layer v1.Layer, tw *tar.Writer, progressWriter io.Writer, imageSize int64) error { + diffID, err := layer.DiffID() + if err != nil { + return fmt.Errorf("get layer diffID: %w", err) + } + if err := t.ensureDir(filepath.Join("blobs", diffID.Algorithm), tw); err != nil { + return err + } + sz, err := layer.Size() + if err != nil { + return fmt.Errorf("get layer size: %w", err) + } + if err = tw.WriteHeader(&tar.Header{ + Name: filepath.Join("blobs", diffID.Algorithm, diffID.Hex), + Mode: 0666, + Size: sz, + }); err != nil { + return fmt.Errorf("write blob file header: %w", err) + } + + var pr *progress.Reporter + var progressChan chan<- v1.Update + if progressWriter != nil { + pr = progress.NewProgressReporter(progressWriter, func(update v1.Update) string { + return fmt.Sprintf("Transferred: %.2f MB", float64(update.Complete)/1024/1024) + }, imageSize, layer) + progressChan = pr.Updates() + defer func() { + close(progressChan) + if err := pr.Wait(); err != nil { + fmt.Printf("reporter finished with non-fatal error: %v\n", err) + } + }() + } + + rc, err := layer.Uncompressed() + if err != nil { + return fmt.Errorf("open layer %q: %w", diffID, err) + } + defer rc.Close() + if _, err = io.Copy(tw, progress.NewReader(rc, progressChan)); err != nil { + return fmt.Errorf("copy layer %q: %w", diffID, err) + } + return nil +} + +func (t *Target) ensureDir(path string, tw *tar.Writer) error { + if _, ok := t.dirs[path]; !ok { + if err := tw.WriteHeader(&tar.Header{ + Name: path, + Typeflag: tar.TypeDir, + }); err != nil { + return fmt.Errorf("add dir entry %q: %w", path, err) + } + } + t.dirs[path] = struct{}{} + return nil +} diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/gpuinfo.go b/vendor/github.com/docker/model-runner/pkg/gpuinfo/gpuinfo.go new file mode 100644 index 00000000..3bc8f66e --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/gpuinfo.go @@ -0,0 +1,17 @@ +package gpuinfo + +type GPUInfo struct { + // modelRuntimeInstallPath is the location where DMR installed it's llama-server + // and accompanying tools + modelRuntimeInstallPath string +} + +func New(modelRuntimeInstallPath string) *GPUInfo { + return &GPUInfo{ + modelRuntimeInstallPath: modelRuntimeInstallPath, + } +} + +func (g *GPUInfo) GetVRAMSize() (uint64, error) { + return getVRAMSize(g.modelRuntimeInstallPath) +} diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_darwin_cgo.go b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_darwin_cgo.go new file mode 100644 index 00000000..95a20e3d --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_darwin_cgo.go @@ -0,0 +1,19 @@ +//go:build darwin && cgo + +package gpuinfo + +/* +#cgo LDFLAGS: -framework Metal +#include "metal.h" +*/ +import "C" +import "errors" + +// getVRAMSize returns total system GPU memory in bytes +func getVRAMSize(_ string) (uint64, error) { + vramSize := C.getVRAMSize() + if vramSize == 0 { + return 0, errors.New("could not get metal VRAM size") + } + return uint64(vramSize), nil +} diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_darwin_nocgo.go b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_darwin_nocgo.go new file mode 100644 index 00000000..915af448 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_darwin_nocgo.go @@ -0,0 +1,10 @@ +//go:build darwin && !cgo + +package gpuinfo + +import "errors" + +// getVRAMSize returns total system GPU memory in bytes +func getVRAMSize(_ string) (uint64, error) { + return 0, errors.New("unimplemented without cgo") +} diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_linux_cgo.go b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_linux_cgo.go new file mode 100644 index 00000000..041219ed --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_linux_cgo.go @@ -0,0 +1,19 @@ +//go:build linux && cgo + +package gpuinfo + +/* +#cgo LDFLAGS: -ldl +#include "nvidia.h" +*/ +import "C" +import "errors" + +// getVRAMSize returns total system GPU memory in bytes +func getVRAMSize(_ string) (uint64, error) { + vramSize := C.getVRAMSize() + if vramSize == 0 { + return 0, errors.New("could not get nvidia VRAM size") + } + return uint64(vramSize), nil +} diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_linux_nocgo.go b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_linux_nocgo.go new file mode 100644 index 00000000..abe74c18 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_linux_nocgo.go @@ -0,0 +1,10 @@ +//go:build linux && !cgo + +package gpuinfo + +import "errors" + +// getVRAMSize returns total system GPU memory in bytes +func getVRAMSize(_ string) (uint64, error) { + return 0, errors.New("unimplemented without cgo") +} diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_windows.go b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_windows.go new file mode 100644 index 00000000..7ca9a0e4 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/memory_windows.go @@ -0,0 +1,40 @@ +package gpuinfo + +import ( + "bufio" + "context" + "errors" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" +) + +// getVRAMSize returns total system GPU memory in bytes +func getVRAMSize(modelRuntimeInstallPath string) (uint64, error) { + if runtime.GOARCH == "arm64" { + // TODO(p1-0tr): For now, on windows/arm64, stick to the old behaviour. This will + // require backend.GetRequiredMemoryForModel to return 1 as well. + return 1, nil + } + + nvGPUInfoBin := filepath.Join(modelRuntimeInstallPath, "bin", "com.docker.nv-gpu-info.exe") + + ctx, _ := context.WithTimeout(context.Background(), 30*time.Second) + cmd := exec.CommandContext(ctx, nvGPUInfoBin) + out, err := cmd.CombinedOutput() + if err != nil { + return 0, err + } + sc := bufio.NewScanner(strings.NewReader(string(out))) + for sc.Scan() { + vram, found := strings.CutPrefix(sc.Text(), "GPU[0]: dedicated memory:") + if found { + vram = strings.TrimSpace(vram) + return strconv.ParseUint(vram, 10, 64) + } + } + return 0, errors.New("unexpected nv-gpu-info output format") +} diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/metal.h b/vendor/github.com/docker/model-runner/pkg/gpuinfo/metal.h new file mode 100644 index 00000000..d7e96a5e --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/metal.h @@ -0,0 +1,5 @@ +//go:build darwin + +#include + +size_t getVRAMSize(); \ No newline at end of file diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/metal.m b/vendor/github.com/docker/model-runner/pkg/gpuinfo/metal.m new file mode 100644 index 00000000..edcfce1e --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/metal.m @@ -0,0 +1,15 @@ +//go:build darwin + +#include + +#include "metal.h" + +size_t getVRAMSize() { + id device = MTLCreateSystemDefaultDevice(); + if (device) { + size_t vramsz = [device recommendedMaxWorkingSetSize]; + [device release]; + return vramsz; + } + return 0; +} \ No newline at end of file diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/nvidia.c b/vendor/github.com/docker/model-runner/pkg/gpuinfo/nvidia.c new file mode 100644 index 00000000..e00aeb18 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/nvidia.c @@ -0,0 +1,71 @@ +//go:build linux + +#include "nvidia.h" + +typedef enum { + NVML_SUCCESS = 0 +} nvmlReturn_t; + +typedef struct { + unsigned long long total; + unsigned long long free; + unsigned long long used; +} nvmlMemory_t; + +typedef void* nvmlDevice_t; + +size_t getVRAMSize() { + void* handle; + nvmlReturn_t (*nvmlInit)(void); + nvmlReturn_t (*nvmlShutdown)(void); + nvmlReturn_t (*nvmlDeviceGetHandleByIndex)(unsigned int index, nvmlDevice_t* device); + nvmlReturn_t (*nvmlDeviceGetMemoryInfo)(nvmlDevice_t device, nvmlMemory_t* memory); + + nvmlReturn_t result; + nvmlDevice_t device; + nvmlMemory_t memory; + + // Try to load libnvidia-ml.so.1 first, then fallback to libnvidia-ml.so + handle = dlopen("libnvidia-ml.so.1", RTLD_LAZY); + if (!handle) { + handle = dlopen("libnvidia-ml.so", RTLD_LAZY); + if (!handle) { + return 0; + } + } + + // Load required functions + nvmlInit = dlsym(handle, "nvmlInit"); + nvmlShutdown = dlsym(handle, "nvmlShutdown"); + nvmlDeviceGetHandleByIndex = dlsym(handle, "nvmlDeviceGetHandleByIndex"); + nvmlDeviceGetMemoryInfo = dlsym(handle, "nvmlDeviceGetMemoryInfo"); + + if (!nvmlInit || !nvmlShutdown || !nvmlDeviceGetHandleByIndex || !nvmlDeviceGetMemoryInfo) { + dlclose(handle); + return 0; + } + + result = nvmlInit(); + if (result != NVML_SUCCESS) { + dlclose(handle); + return 0; + } + + result = nvmlDeviceGetHandleByIndex(0, &device); + if (result != NVML_SUCCESS) { + nvmlShutdown(); + dlclose(handle); + return 0; + } + + result = nvmlDeviceGetMemoryInfo(device, &memory); + if (result != NVML_SUCCESS) { + nvmlShutdown(); + dlclose(handle); + return 0; + } + + nvmlShutdown(); + dlclose(handle); + return memory.total; +} \ No newline at end of file diff --git a/vendor/github.com/docker/model-runner/pkg/gpuinfo/nvidia.h b/vendor/github.com/docker/model-runner/pkg/gpuinfo/nvidia.h new file mode 100644 index 00000000..302673b5 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/gpuinfo/nvidia.h @@ -0,0 +1,6 @@ +//go:build linux + +#include +#include + +size_t getVRAMSize(); \ No newline at end of file diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backend.go b/vendor/github.com/docker/model-runner/pkg/inference/backend.go index 0eae5d4c..944ec126 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/backend.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/backend.go @@ -34,6 +34,11 @@ type BackendConfiguration struct { RuntimeFlags []string `json:"runtime-flags,omitempty"` } +type RequiredMemory struct { + RAM uint64 + VRAM uint64 // TODO(p1-0tr): for now assume we are working with single GPU set-ups +} + // Backend is the interface implemented by inference engine backends. Backend // implementations need not be safe for concurrent invocation of the following // methods, though their underlying server implementations do need to support @@ -76,4 +81,7 @@ type Backend interface { Status() string // GetDiskUsage returns the disk usage of the backend. GetDiskUsage() (int64, error) + // GetRequiredMemoryForModel returns the required working memory for a given + // model. + GetRequiredMemoryForModel(model string, config *BackendConfiguration) (*RequiredMemory, error) } diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp.go index f745320d..09de11f5 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp.go @@ -1,6 +1,7 @@ package llamacpp import ( + "bufio" "context" "errors" "fmt" @@ -10,9 +11,12 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "strings" + parser "github.com/gpustack/gguf-parser-go" + "github.com/docker/model-runner/pkg/diskusage" "github.com/docker/model-runner/pkg/inference" "github.com/docker/model-runner/pkg/inference/config" @@ -44,6 +48,8 @@ type llamaCpp struct { status string // config is the configuration for the llama.cpp backend. config config.BackendConfig + // gpuSupported indicates whether the underlying llama-server is built with GPU support. + gpuSupported bool } // New creates a new llama.cpp-based backend. @@ -116,6 +122,9 @@ func (l *llamaCpp) Install(ctx context.Context, httpClient *http.Client) error { l.updatedLlamaCpp = true } + l.gpuSupported = l.checkGPUSupport(ctx) + l.log.Infof("installed llama-server with gpuSupport=%t", l.gpuSupported) + return nil } @@ -213,3 +222,86 @@ func (l *llamaCpp) GetDiskUsage() (int64, error) { } return size, nil } + +func (l *llamaCpp) GetRequiredMemoryForModel(model string, config *inference.BackendConfiguration) (*inference.RequiredMemory, error) { + mdl, err := l.modelManager.GetModel(model) + if err != nil { + return nil, fmt.Errorf("getting model(%s): %w", model, err) + } + mdlPath, err := mdl.GGUFPath() + if err != nil { + return nil, fmt.Errorf("getting gguf path for model(%s): %w", model, err) + } + mdlGguf, err := parser.ParseGGUFFile(mdlPath) + if err != nil { + return nil, fmt.Errorf("parsing gguf(%s): %w", mdlPath, err) + } + mdlConfig, err := mdl.Config() + if err != nil { + return nil, fmt.Errorf("accessing model(%s) config: %w", model, err) + } + + contextSize := GetContextSize(&mdlConfig, config) + + ngl := uint64(0) + if l.gpuSupported { + if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" && mdlConfig.Quantization != "Q4_0" { + ngl = 0 // only Q4_0 models can be accelerated on Adreno + } + ngl = 100 + } + + // TODO(p1-0tr): for now assume we are running on GPU (single one) - Devices[1]; + // sum up weights + kv cache + context for an estimate of total GPU memory needed + // while running inference with the given model + estimate := mdlGguf.EstimateLLaMACppRun(parser.WithLLaMACppContextSize(int32(contextSize)), + // TODO(p1-0tr): add logic for resolving other param values, instead of hardcoding them + parser.WithLLaMACppLogicalBatchSize(2048), + parser.WithLLaMACppOffloadLayers(ngl)) + ram := uint64(estimate.Devices[0].Weight.Sum() + estimate.Devices[0].KVCache.Sum() + estimate.Devices[0].Computation.Sum()) + var vram uint64 + if len(estimate.Devices) > 1 { + vram = uint64(estimate.Devices[1].Weight.Sum() + estimate.Devices[1].KVCache.Sum() + estimate.Devices[1].Computation.Sum()) + } + + if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" { + // TODO(p1-0tr): For now on windows/arm64 stick to the old behaviour, of allowing + // one model at a time. This WA requires gpuinfo.GetVRAMSize to return 1. + vram = 1 + } + + return &inference.RequiredMemory{ + RAM: ram, + VRAM: vram, + }, nil +} + +func (l *llamaCpp) checkGPUSupport(ctx context.Context) bool { + binPath := l.vendoredServerStoragePath + if l.updatedLlamaCpp { + binPath = l.updatedServerStoragePath + } + out, err := exec.CommandContext( + ctx, + filepath.Join(binPath, "com.docker.llama-server"), + "--list-devices", + ).CombinedOutput() + if err != nil { + l.log.Warnf("Failed to determine if llama-server is built with GPU support: %s", err) + return false + } + sc := bufio.NewScanner(strings.NewReader(string(out))) + expectDev := false + devRe := regexp.MustCompile(`\s{2}.*:\s`) + ndevs := 0 + for sc.Scan() { + if expectDev { + if devRe.MatchString(sc.Text()) { + ndevs += 1 + } + } else { + expectDev = strings.HasPrefix(sc.Text(), "Available devices:") + } + } + return ndevs > 0 +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp_config.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp_config.go index 5c8822d3..becc3a1b 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp_config.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp_config.go @@ -57,16 +57,10 @@ func (c *Config) GetArgs(model types.Model, socket string, mode inference.Backen args = append(args, "--embeddings") } - // Add arguments from model config - if modelCfg.ContextSize != nil { - args = append(args, "--ctx-size", strconv.FormatUint(*modelCfg.ContextSize, 10)) - } + args = append(args, "--ctx-size", strconv.FormatUint(GetContextSize(&modelCfg, config), 10)) // Add arguments from backend config if config != nil { - if config.ContextSize > 0 && !containsArg(args, "--ctx-size") { - args = append(args, "--ctx-size", strconv.FormatInt(config.ContextSize, 10)) - } args = append(args, config.RuntimeFlags...) } @@ -79,6 +73,19 @@ func (c *Config) GetArgs(model types.Model, socket string, mode inference.Backen return args, nil } +func GetContextSize(modelCfg *types.Config, backendCfg *inference.BackendConfiguration) uint64 { + // Model config takes precedence + if modelCfg != nil && modelCfg.ContextSize != nil { + return *modelCfg.ContextSize + } + // else use backend config + if backendCfg != nil && backendCfg.ContextSize > 0 { + return uint64(backendCfg.ContextSize) + } + // finally return default + return 4096 // llama.cpp default +} + // containsArg checks if the given argument is already in the args slice. func containsArg(args []string, arg string) bool { for _, a := range args { diff --git a/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go b/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go index 1c083abc..b0cf68bf 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go @@ -41,7 +41,7 @@ type Manager struct { // registryClient is the client for model registry. registryClient *registry.Client // lock is used to synchronize access to the models manager's router. - lock sync.Mutex + lock sync.RWMutex } type ClientConfig struct { @@ -120,6 +120,7 @@ func (m *Manager) RebuildRoutes(allowedOrigins []string) { func (m *Manager) routeHandlers(allowedOrigins []string) map[string]http.HandlerFunc { handlers := map[string]http.HandlerFunc{ "POST " + inference.ModelsPrefix + "/create": m.handleCreateModel, + "POST " + inference.ModelsPrefix + "/load": m.handleLoadModel, "GET " + inference.ModelsPrefix: m.handleGetModels, "GET " + inference.ModelsPrefix + "/{name...}": m.handleGetModel, "DELETE " + inference.ModelsPrefix + "/{name...}": m.handleDeleteModel, @@ -163,6 +164,10 @@ func (m *Manager) handleCreateModel(w http.ResponseWriter, r *http.Request) { // Pull the model. In the future, we may support additional operations here // besides pulling (such as model building). if err := m.PullModel(request.From, r, w); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + m.log.Infof("Request canceled/timed out while pulling model %q", request.From) + return + } if errors.Is(err, registry.ErrInvalidReference) { m.log.Warnf("Invalid model reference %q: %v", request.From, err) http.Error(w, "Invalid model reference", http.StatusBadRequest) @@ -183,6 +188,20 @@ func (m *Manager) handleCreateModel(w http.ResponseWriter, r *http.Request) { } } +// handleLoadModel handles POST /models/load requests. +func (m *Manager) handleLoadModel(w http.ResponseWriter, r *http.Request) { + if m.distributionClient == nil { + http.Error(w, "model distribution service unavailable", http.StatusServiceUnavailable) + return + } + + if _, err := m.distributionClient.LoadModel(r.Body, w); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return +} + // handleGetModels handles GET /models requests. func (m *Manager) handleGetModels(w http.ResponseWriter, r *http.Request) { if m.distributionClient == nil { @@ -256,6 +275,27 @@ func (m *Manager) handleGetModel(w http.ResponseWriter, r *http.Request) { } } +// ResolveModelID resolves a model reference to a model ID. If resolution fails, it returns the original ref. +func (m *Manager) ResolveModelID(modelRef string) string { + // Sanitize modelRef to prevent log forgery + sanitizedModelRef := strings.ReplaceAll(modelRef, "\n", "") + sanitizedModelRef = strings.ReplaceAll(sanitizedModelRef, "\r", "") + + model, err := m.GetModel(sanitizedModelRef) + if err != nil { + m.log.Warnf("Failed to resolve model ref %s to ID: %v", sanitizedModelRef, err) + return sanitizedModelRef + } + + modelID, err := model.ID() + if err != nil { + m.log.Warnf("Failed to get model ID for ref %s: %v", sanitizedModelRef, err) + return sanitizedModelRef + } + + return modelID +} + func getLocalModel(m *Manager, name string) (*Model, error) { if m.distributionClient == nil { return nil, errors.New("model distribution service unavailable") @@ -517,8 +557,8 @@ func (m *Manager) GetDiskUsage() (int64, error, int) { // ServeHTTP implement net/http.Handler.ServeHTTP. func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) { - m.lock.Lock() - defer m.lock.Unlock() + m.lock.RLock() + defer m.lock.RUnlock() m.router.ServeHTTP(w, r) } diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/loader.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/loader.go index 769eef27..ec7e1f5c 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/loader.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/loader.go @@ -10,10 +10,12 @@ import ( "time" "github.com/docker/model-runner/pkg/environment" + "github.com/docker/model-runner/pkg/gpuinfo" "github.com/docker/model-runner/pkg/inference" "github.com/docker/model-runner/pkg/inference/models" "github.com/docker/model-runner/pkg/logging" "github.com/docker/model-runner/pkg/metrics" + "github.com/elastic/go-sysinfo" ) const ( @@ -42,12 +44,20 @@ var ( type runnerKey struct { // backend is the backend associated with the runner. backend string - // model is the model associated with the runner. - model string + // modelID is the ID (digest) of the model associated with the runner. + modelID string // mode is the operation mode associated with the runner. mode inference.BackendMode } +// runnerInfo holds information about a runner including its slot and the original model reference used to load it. +type runnerInfo struct { + // slot is the slot index where the runner is stored. + slot int + // modelRef is the original model reference (tag) used to load the runner. + modelRef string +} + // loader manages the loading and unloading of backend runners. It regulates // active backends in a manner that avoids exhausting system resources. Loaders // assume that all of their backends have been installed, so no load requests @@ -63,7 +73,7 @@ type loader struct { // runnerIdleTimeout is the loader-specific default runner idle timeout. runnerIdleTimeout time.Duration // totalMemory is the total system memory allocated to the loader. - totalMemory uint64 + totalMemory inference.RequiredMemory // idleCheck is used to signal the run loop when timestamps have updated. idleCheck chan struct{} // guard is a sempahore controlling access to all subsequent fields. It is @@ -74,20 +84,20 @@ type loader struct { // loadsEnabled signals that loads are currently enabled. loadsEnabled bool // availableMemory is the available portion of the loader's total memory. - availableMemory uint64 + availableMemory inference.RequiredMemory // waiters is the set of signal channels associated with waiting loaders. We // use a set of signaling channels (instead of a sync.Cond) to enable // polling. Each signaling channel should be buffered (with size 1). waiters map[chan<- struct{}]bool // runners maps runner keys to their slot index. - runners map[runnerKey]int + runners map[runnerKey]runnerInfo // slots maps slot indices to associated runners. A slot is considered free // if the runner value in it is nil. slots []*runner // references maps slot indices to reference counts. references []uint // allocations maps slot indices to memory allocation sizes. - allocations []uint64 + allocations []inference.RequiredMemory // timestamps maps slot indices to last usage times. Values in this slice // are only valid if the corresponding reference count is zero. timestamps []time.Time @@ -103,6 +113,7 @@ func newLoader( backends map[string]inference.Backend, modelManager *models.Manager, openAIRecorder *metrics.OpenAIRecorder, + gpuInfo *gpuinfo.GPUInfo, ) *loader { // Compute the number of runner slots to allocate. Because of RAM and VRAM // limitations, it's unlikely that we'll ever be able to fully populate @@ -124,20 +135,31 @@ func newLoader( } // Compute the amount of available memory. - // - // TODO: For now, we treat the system as having memory size 1 and all models - // as having size 1 (and thus we'll only load a single model at a time). - // However, the loader is designed to use "real" values for each and to - // schedule appropriately. Thus, we should switch to polling the system - // VRAM size here (and potentially even reserving a portion of it) and - // computing model size through estimation (using parameter count and - // quantization data type size). - // - // HACK: On GPU-enabled cloud engines, we'll bump this to 2. We can remove - // this once we have VRAM estimation. - totalMemory := uint64(1) - if isGPUEnabledCloudEnvironment { - totalMemory = 2 + // TODO(p1-0tr): improve error handling + vramSize, err := gpuInfo.GetVRAMSize() + if err != nil { + vramSize = 1 + log.Warnf("Could not read VRAM size: %s", err) + } else { + log.Infof("Running on system with %dMB VRAM", vramSize/1024/1024) + } + ramSize := uint64(1) + hostInfo, err := sysinfo.Host() + if err != nil { + log.Warnf("Could not read host info: %s", err) + } else { + ram, err := hostInfo.Memory() + if err != nil { + log.Warnf("Could not read host RAM size: %s", err) + } else { + ramSize = ram.Total + log.Infof("Running on system with %dMB RAM", ramSize/1024/1024) + } + } + + totalMemory := inference.RequiredMemory{ + RAM: ramSize, + VRAM: vramSize, } // Create the loader. @@ -151,10 +173,10 @@ func newLoader( guard: make(chan struct{}, 1), availableMemory: totalMemory, waiters: make(map[chan<- struct{}]bool), - runners: make(map[runnerKey]int, nSlots), + runners: make(map[runnerKey]runnerInfo, nSlots), slots: make([]*runner, nSlots), references: make([]uint, nSlots), - allocations: make([]uint64, nSlots), + allocations: make([]inference.RequiredMemory, nSlots), timestamps: make([]time.Time, nSlots), runnerConfigs: make(map[runnerKey]inference.BackendConfiguration), openAIRecorder: openAIRecorder, @@ -196,24 +218,25 @@ func (l *loader) broadcast() { // lock. It returns the number of remaining runners. func (l *loader) evict(idleOnly bool) int { now := time.Now() - for r, slot := range l.runners { - unused := l.references[slot] == 0 - idle := unused && now.Sub(l.timestamps[slot]) > l.runnerIdleTimeout + for r, runnerInfo := range l.runners { + unused := l.references[runnerInfo.slot] == 0 + idle := unused && now.Sub(l.timestamps[runnerInfo.slot]) > l.runnerIdleTimeout defunct := false select { - case <-l.slots[slot].done: + case <-l.slots[runnerInfo.slot].done: defunct = true default: } if unused && (!idleOnly || idle || defunct) { - l.log.Infof("Evicting %s backend runner with model %s in %s mode", - r.backend, r.model, r.mode, + l.log.Infof("Evicting %s backend runner with model %s (%s) in %s mode", + r.backend, r.modelID, runnerInfo.modelRef, r.mode, ) - l.slots[slot].terminate() - l.slots[slot] = nil - l.availableMemory += l.allocations[slot] - l.allocations[slot] = 0 - l.timestamps[slot] = time.Time{} + l.slots[runnerInfo.slot].terminate() + l.slots[runnerInfo.slot] = nil + l.availableMemory.RAM += l.allocations[runnerInfo.slot].RAM + l.availableMemory.VRAM += l.allocations[runnerInfo.slot].VRAM + l.allocations[runnerInfo.slot] = inference.RequiredMemory{RAM: 0, VRAM: 0} + l.timestamps[runnerInfo.slot] = time.Time{} delete(l.runners, r) } } @@ -224,17 +247,18 @@ func (l *loader) evict(idleOnly bool) int { // It returns the number of remaining runners. func (l *loader) evictRunner(backend, model string, mode inference.BackendMode) int { allBackends := backend == "" - for r, slot := range l.runners { - unused := l.references[slot] == 0 - if unused && (allBackends || r.backend == backend) && r.model == model && r.mode == mode { - l.log.Infof("Evicting %s backend runner with model %s in %s mode", - r.backend, r.model, r.mode, + for r, runnerInfo := range l.runners { + unused := l.references[runnerInfo.slot] == 0 + if unused && (allBackends || r.backend == backend) && r.modelID == model && r.mode == mode { + l.log.Infof("Evicting %s backend runner with model %s (%s) in %s mode", + r.backend, r.modelID, runnerInfo.modelRef, r.mode, ) - l.slots[slot].terminate() - l.slots[slot] = nil - l.availableMemory += l.allocations[slot] - l.allocations[slot] = 0 - l.timestamps[slot] = time.Time{} + l.slots[runnerInfo.slot].terminate() + l.slots[runnerInfo.slot] = nil + l.availableMemory.RAM += l.allocations[runnerInfo.slot].RAM + l.availableMemory.VRAM += l.allocations[runnerInfo.slot].VRAM + l.allocations[runnerInfo.slot] = inference.RequiredMemory{RAM: 0, VRAM: 0} + l.timestamps[runnerInfo.slot] = time.Time{} delete(l.runners, r) } } @@ -254,11 +278,13 @@ func (l *loader) Unload(ctx context.Context, unload UnloadRequest) int { return l.evict(false) } else { for _, model := range unload.Models { - delete(l.runnerConfigs, runnerKey{unload.Backend, model, inference.BackendModeCompletion}) + modelID := l.modelManager.ResolveModelID(model) + delete(l.runnerConfigs, runnerKey{unload.Backend, modelID, inference.BackendModeCompletion}) + delete(l.runnerConfigs, runnerKey{unload.Backend, modelID, inference.BackendModeEmbedding}) // Evict both, completion and embedding models. We should consider // accepting a mode parameter in unload requests. - l.evictRunner(unload.Backend, model, inference.BackendModeCompletion) - l.evictRunner(unload.Backend, model, inference.BackendModeEmbedding) + l.evictRunner(unload.Backend, modelID, inference.BackendModeCompletion) + l.evictRunner(unload.Backend, modelID, inference.BackendModeEmbedding) } return len(l.runners) } @@ -282,15 +308,15 @@ func stopAndDrainTimer(timer *time.Timer) { func (l *loader) idleCheckDuration() time.Duration { // Compute the oldest usage time for any idle runner. var oldest time.Time - for _, slot := range l.runners { + for _, runnerInfo := range l.runners { select { - case <-l.slots[slot].done: + case <-l.slots[runnerInfo.slot].done: // Check immediately if a runner is defunct return 0 default: } - if l.references[slot] == 0 { - timestamp := l.timestamps[slot] + if l.references[runnerInfo.slot] == 0 { + timestamp := l.timestamps[runnerInfo.slot] if oldest.IsZero() || timestamp.Before(oldest) { oldest = timestamp } @@ -378,10 +404,10 @@ func (l *loader) run(ctx context.Context) { } } -// load allocates a runner using the specified backend and model. If allocated, +// load allocates a runner using the specified backend and modelID. If allocated, // it should be released by the caller using the release mechanism (once the // runner is no longer needed). -func (l *loader) load(ctx context.Context, backendName, model string, mode inference.BackendMode) (*runner, error) { +func (l *loader) load(ctx context.Context, backendName, modelID, modelRef string, mode inference.BackendMode) (*runner, error) { // Grab the backend. backend, ok := l.backends[backendName] if !ok { @@ -390,15 +416,24 @@ func (l *loader) load(ctx context.Context, backendName, model string, mode infer // Estimate the amount of memory that will be used by the model and check // that we're even capable of loading it. - // - // TODO: For now, we treat the system as having memory size 1 and all models - // as having size 1 (and thus we'll only load a single model at a time). - // However, the loader is designed to use "real" values for each and to - // schedule appropriately. Thus, we should switch to computing model size - // here through estimation (using parameter count and quantization data type - // size). - memory := uint64(1) - if memory > l.totalMemory { + var runnerConfig *inference.BackendConfiguration + if rc, ok := l.runnerConfigs[runnerKey{backendName, modelID, mode}]; ok { + runnerConfig = &rc + } + memory, err := backend.GetRequiredMemoryForModel(modelID, runnerConfig) + if err != nil { + return nil, err + } + l.log.Infof("Loading %s, which will require %dMB RAM and %dMB VRAM", modelID, memory.RAM/1024/1024, memory.VRAM/1024/1024) + if l.totalMemory.RAM == 1 { + l.log.Warnf("RAM size unknown. Assume model will fit, but only one.") + memory.RAM = 1 + } + if l.totalMemory.VRAM == 1 { + l.log.Warnf("VRAM size unknown. Assume model will fit, but only one.") + memory.VRAM = 1 + } + if memory.RAM > l.totalMemory.RAM || memory.VRAM > l.totalMemory.VRAM { return nil, errModelTooBig } @@ -426,31 +461,31 @@ func (l *loader) load(ctx context.Context, backendName, model string, mode infer } // See if we can satisfy the request with an existing runner. - existing, ok := l.runners[runnerKey{backendName, model, mode}] + existing, ok := l.runners[runnerKey{backendName, modelID, mode}] if ok { select { - case <-l.slots[existing].done: - l.log.Warnf("%s runner for %s is defunct. Waiting for it to be evicted.", backendName, model) - if l.references[existing] == 0 { - l.evictRunner(backendName, model, mode) + case <-l.slots[existing.slot].done: + l.log.Warnf("%s runner for %s is defunct. Waiting for it to be evicted.", backendName, existing.modelRef) + if l.references[existing.slot] == 0 { + l.evictRunner(backendName, modelID, mode) } else { goto WaitForChange } default: - l.references[existing] += 1 - l.timestamps[existing] = time.Time{} - return l.slots[existing], nil + l.references[existing.slot] += 1 + l.timestamps[existing.slot] = time.Time{} + return l.slots[existing.slot], nil } } // If there's not sufficient memory or all slots are full, then try // evicting unused runners. - if memory > l.availableMemory || len(l.runners) == len(l.slots) { + if memory.RAM > l.availableMemory.RAM || memory.VRAM > l.availableMemory.VRAM || len(l.runners) == len(l.slots) { l.evict(false) } // If there's sufficient memory and a free slot, then find the slot. - if memory <= l.availableMemory && len(l.runners) < len(l.slots) { + if memory.RAM <= l.availableMemory.RAM && memory.VRAM <= l.availableMemory.VRAM && len(l.runners) < len(l.slots) { for s, runner := range l.slots { if runner == nil { slot = s @@ -462,15 +497,15 @@ func (l *loader) load(ctx context.Context, backendName, model string, mode infer // If we've identified a slot, then we're ready to start a runner. if slot >= 0 { var runnerConfig *inference.BackendConfiguration - if rc, ok := l.runnerConfigs[runnerKey{backendName, model, mode}]; ok { + if rc, ok := l.runnerConfigs[runnerKey{backendName, modelID, mode}]; ok { runnerConfig = &rc } // Create the runner. - l.log.Infof("Loading %s backend runner with model %s in %s mode", backendName, model, mode) - runner, err := run(l.log, backend, model, mode, slot, runnerConfig, l.openAIRecorder) + l.log.Infof("Loading %s backend runner with model %s in %s mode", backendName, modelID, mode) + runner, err := run(l.log, backend, modelID, mode, slot, runnerConfig, l.openAIRecorder) if err != nil { l.log.Warnf("Unable to start %s backend runner with model %s in %s mode: %v", - backendName, model, mode, err, + backendName, modelID, mode, err, ) return nil, fmt.Errorf("unable to start runner: %w", err) } @@ -484,17 +519,19 @@ func (l *loader) load(ctx context.Context, backendName, model string, mode infer if err := runner.wait(ctx); err != nil { runner.terminate() l.log.Warnf("Initialization for %s backend runner with model %s in %s mode failed: %v", - backendName, model, mode, err, + backendName, modelID, mode, err, ) return nil, fmt.Errorf("error waiting for runner to be ready: %w", err) } // Perform registration and return the runner. - l.availableMemory -= memory - l.runners[runnerKey{backendName, model, mode}] = slot + l.availableMemory.RAM -= memory.RAM + l.availableMemory.VRAM -= memory.VRAM + l.runners[runnerKey{backendName, modelID, mode}] = runnerInfo{slot, modelRef} l.slots[slot] = runner l.references[slot] = 1 - l.allocations[slot] = memory + l.allocations[slot].RAM = memory.RAM + l.allocations[slot].VRAM = memory.VRAM return runner, nil } @@ -523,17 +560,17 @@ func (l *loader) release(runner *runner) { slot := l.runners[runnerKey{runner.backend.Name(), runner.model, runner.mode}] // Decrement the runner's reference count. - l.references[slot] -= 1 + l.references[slot.slot] -= 1 // If the runner's reference count is now zero, then check if it is still // active, and record now as its idle start time and signal the idle // checker. - if l.references[slot] == 0 { + if l.references[slot.slot] == 0 { select { case <-runner.done: l.evictRunner(runner.backend.Name(), runner.model, runner.mode) default: - l.timestamps[slot] = time.Now() + l.timestamps[slot.slot] = time.Now() select { case l.idleCheck <- struct{}{}: default: @@ -545,22 +582,22 @@ func (l *loader) release(runner *runner) { l.broadcast() } -func (l *loader) setRunnerConfig(ctx context.Context, backendName, model string, mode inference.BackendMode, runnerConfig inference.BackendConfiguration) error { +func (l *loader) setRunnerConfig(ctx context.Context, backendName, modelID string, mode inference.BackendMode, runnerConfig inference.BackendConfiguration) error { l.lock(ctx) defer l.unlock() - runnerId := runnerKey{backendName, model, mode} + runnerId := runnerKey{backendName, modelID, mode} // If the configuration hasn't changed, then just return. if existingConfig, ok := l.runnerConfigs[runnerId]; ok && reflect.DeepEqual(runnerConfig, existingConfig) { - l.log.Infof("Configuration for %s runner for model %s unchanged", backendName, model) + l.log.Infof("Configuration for %s runner for modelID %s unchanged", backendName, modelID) return nil } // If there's an active runner whose configuration we want to override, then // try evicting it (because it may not be in use). if _, ok := l.runners[runnerId]; ok { - l.evictRunner(backendName, model, mode) + l.evictRunner(backendName, modelID, mode) } // If there's still then active runner, then we can't (or at least @@ -569,7 +606,7 @@ func (l *loader) setRunnerConfig(ctx context.Context, backendName, model string, return errRunnerAlreadyActive } - l.log.Infof("Configuring %s runner for %s", backendName, model) + l.log.Infof("Configuring %s runner for %s", backendName, modelID) l.runnerConfigs[runnerId] = runnerConfig return nil } diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/runner.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/runner.go index 374d15b6..985fea48 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/runner.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/runner.go @@ -118,6 +118,12 @@ func run( r.URL.Path = trimRequestPathToOpenAIRoot(r.URL.Path) r.URL.RawPath = trimRequestPathToOpenAIRoot(r.URL.RawPath) } + proxy.ModifyResponse = func(resp *http.Response) error { + // CORS headers are set by the CorsMiddleware from pkg/inference/cors.go, + // so we remove them here to avoid duplication and potential misconfiguration. + resp.Header.Del("Access-Control-Allow-Origin") + return nil + } proxy.Transport = transport proxyLog := log.Writer() proxy.ErrorLog = logpkg.New(proxyLog, "", 0) diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/scheduler.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/scheduler.go index 10b7e6e6..3f716e9a 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/scheduler.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/scheduler.go @@ -13,6 +13,7 @@ import ( "time" "github.com/docker/model-distribution/distribution" + "github.com/docker/model-runner/pkg/gpuinfo" "github.com/docker/model-runner/pkg/inference" "github.com/docker/model-runner/pkg/inference/models" "github.com/docker/model-runner/pkg/logging" @@ -43,7 +44,7 @@ type Scheduler struct { // openAIRecorder is used to record OpenAI API inference requests and responses. openAIRecorder *metrics.OpenAIRecorder // lock is used to synchronize access to the scheduler's router. - lock sync.Mutex + lock sync.RWMutex } // NewScheduler creates a new inference scheduler. @@ -55,8 +56,9 @@ func NewScheduler( httpClient *http.Client, allowedOrigins []string, tracker *metrics.Tracker, + gpuInfo *gpuinfo.GPUInfo, ) *Scheduler { - openAIRecorder := metrics.NewOpenAIRecorder(log.WithField("component", "openai-recorder")) + openAIRecorder := metrics.NewOpenAIRecorder(log.WithField("component", "openai-recorder"), modelManager) // Create the scheduler. s := &Scheduler{ @@ -65,7 +67,7 @@ func NewScheduler( defaultBackend: defaultBackend, modelManager: modelManager, installer: newInstaller(log, backends, httpClient), - loader: newLoader(log, backends, modelManager, openAIRecorder), + loader: newLoader(log, backends, modelManager, openAIRecorder, gpuInfo), router: http.NewServeMux(), tracker: tracker, openAIRecorder: openAIRecorder, @@ -238,8 +240,10 @@ func (s *Scheduler) handleOpenAIInference(w http.ResponseWriter, r *http.Request s.tracker.TrackModel(model, r.UserAgent()) } + modelID := s.modelManager.ResolveModelID(request.Model) + // Request a runner to execute the request and defer its release. - runner, err := s.loader.load(r.Context(), backend.Name(), request.Model, backendMode) + runner, err := s.loader.load(r.Context(), backend.Name(), modelID, request.Model, backendMode) if err != nil { http.Error(w, fmt.Errorf("unable to load runner: %w", err).Error(), http.StatusInternalServerError) return @@ -295,17 +299,17 @@ func (s *Scheduler) getLoaderStatus(ctx context.Context) []BackendStatus { result := make([]BackendStatus, 0, len(s.loader.runners)) - for key, slot := range s.loader.runners { - if s.loader.slots[slot] != nil { + for key, runnerInfo := range s.loader.runners { + if s.loader.slots[runnerInfo.slot] != nil { status := BackendStatus{ BackendName: key.backend, - ModelName: key.model, + ModelName: runnerInfo.modelRef, Mode: key.mode.String(), LastUsed: time.Time{}, } - if s.loader.references[slot] == 0 { - status.LastUsed = s.loader.timestamps[slot] + if s.loader.references[runnerInfo.slot] == 0 { + status.LastUsed = s.loader.timestamps[runnerInfo.slot] } result = append(result, status) @@ -414,9 +418,9 @@ func (s *Scheduler) Configure(w http.ResponseWriter, r *http.Request) { // Configure is called by compose for each model. s.tracker.TrackModel(model, r.UserAgent()) } - - if err := s.loader.setRunnerConfig(r.Context(), backend.Name(), configureRequest.Model, inference.BackendModeCompletion, runnerConfig); err != nil { - s.log.Warnf("Failed to configure %s runner for %s: %s", backend.Name(), configureRequest.Model, err) + modelID := s.modelManager.ResolveModelID(configureRequest.Model) + if err := s.loader.setRunnerConfig(r.Context(), backend.Name(), modelID, inference.BackendModeCompletion, runnerConfig); err != nil { + s.log.Warnf("Failed to configure %s runner for %s (%s): %s", backend.Name(), configureRequest.Model, modelID, err) if errors.Is(err, errRunnerAlreadyActive) { http.Error(w, err.Error(), http.StatusConflict) } else { @@ -442,14 +446,14 @@ func (s *Scheduler) GetAllActiveRunners() []metrics.ActiveRunner { // Find the runner slot for this backend/model combination key := runnerKey{ backend: backend.BackendName, - model: backend.ModelName, + modelID: backend.ModelName, mode: parseBackendMode(backend.Mode), } - if slot, exists := s.loader.runners[key]; exists { - socket, err := RunnerSocketPath(slot) + if runnerInfo, exists := s.loader.runners[key]; exists { + socket, err := RunnerSocketPath(runnerInfo.slot) if err != nil { - s.log.Warnf("Failed to get socket path for runner %s/%s: %v", backend.BackendName, backend.ModelName, err) + s.log.Warnf("Failed to get socket path for runner %s/%s (%s): %v", backend.BackendName, backend.ModelName, key.modelID, err) continue } @@ -480,13 +484,13 @@ func (s *Scheduler) GetLlamaCppSocket() (string, error) { // Find the runner slot for this backend/model combination key := runnerKey{ backend: backend.BackendName, - model: backend.ModelName, + modelID: backend.ModelName, mode: parseBackendMode(backend.Mode), } - if slot, exists := s.loader.runners[key]; exists { + if runnerInfo, exists := s.loader.runners[key]; exists { // Use the RunnerSocketPath function to get the socket path - return RunnerSocketPath(slot) + return RunnerSocketPath(runnerInfo.slot) } } } @@ -508,7 +512,7 @@ func parseBackendMode(mode string) inference.BackendMode { // ServeHTTP implements net/http.Handler.ServeHTTP. func (s *Scheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.lock.Lock() - defer s.lock.Unlock() + s.lock.RLock() + defer s.lock.RUnlock() s.router.ServeHTTP(w, r) } diff --git a/vendor/github.com/docker/model-runner/pkg/metrics/openai_recorder.go b/vendor/github.com/docker/model-runner/pkg/metrics/openai_recorder.go index e1d5fc49..93a73349 100644 --- a/vendor/github.com/docker/model-runner/pkg/metrics/openai_recorder.go +++ b/vendor/github.com/docker/model-runner/pkg/metrics/openai_recorder.go @@ -10,9 +10,14 @@ import ( "time" "github.com/docker/model-runner/pkg/inference" + "github.com/docker/model-runner/pkg/inference/models" "github.com/docker/model-runner/pkg/logging" ) +// maximumRecordsPerModel is the maximum number of records that will be stored +// per model. +const maximumRecordsPerModel = 10 + type responseRecorder struct { http.ResponseWriter body *bytes.Buffer @@ -53,15 +58,17 @@ type ModelData struct { } type OpenAIRecorder struct { - log logging.Logger - records map[string]*ModelData - m sync.RWMutex + log logging.Logger + records map[string]*ModelData // key is model ID + modelManager *models.Manager // for resolving model tags to IDs + m sync.RWMutex } -func NewOpenAIRecorder(log logging.Logger) *OpenAIRecorder { +func NewOpenAIRecorder(log logging.Logger, modelManager *models.Manager) *OpenAIRecorder { return &OpenAIRecorder{ - log: log, - records: make(map[string]*ModelData), + log: log, + modelManager: modelManager, + records: make(map[string]*ModelData), } } @@ -71,24 +78,28 @@ func (r *OpenAIRecorder) SetConfigForModel(model string, config *inference.Backe return } + modelID := r.modelManager.ResolveModelID(model) + r.m.Lock() defer r.m.Unlock() - if r.records[model] == nil { - r.records[model] = &ModelData{ + if r.records[modelID] == nil { + r.records[modelID] = &ModelData{ Records: make([]*RequestResponsePair, 0, 10), Config: inference.BackendConfiguration{}, } } - r.records[model].Config = *config + r.records[modelID].Config = *config } func (r *OpenAIRecorder) RecordRequest(model string, req *http.Request, body []byte) string { + modelID := r.modelManager.ResolveModelID(model) + r.m.Lock() defer r.m.Unlock() - recordID := fmt.Sprintf("%s_%d", model, time.Now().UnixNano()) + recordID := fmt.Sprintf("%s_%d", modelID, time.Now().UnixNano()) record := &RequestResponsePair{ ID: recordID, @@ -100,17 +111,28 @@ func (r *OpenAIRecorder) RecordRequest(model string, req *http.Request, body []b UserAgent: req.UserAgent(), } - if r.records[model] == nil { - r.records[model] = &ModelData{ - Records: make([]*RequestResponsePair, 0, 10), + modelData := r.records[modelID] + if modelData == nil { + modelData = &ModelData{ + Records: make([]*RequestResponsePair, 0, maximumRecordsPerModel), Config: inference.BackendConfiguration{}, } + r.records[modelID] = modelData } - r.records[model].Records = append(r.records[model].Records, record) - - if len(r.records[model].Records) > 10 { - r.records[model].Records = r.records[model].Records[1:] + // Ideally we would use a ring buffer or a linked list for storing records, + // but we want this data returnable as JSON, so we have to live with this + // slightly inefficieny memory shuffle. Note that truncating the front of + // the slice and continually appending would cause the slice's capacity to + // grow unbounded. + if len(modelData.Records) == maximumRecordsPerModel { + copy( + modelData.Records[:maximumRecordsPerModel-1], + modelData.Records[1:], + ) + modelData.Records[maximumRecordsPerModel-1] = record + } else { + modelData.Records = append(modelData.Records, record) } return recordID @@ -138,10 +160,12 @@ func (r *OpenAIRecorder) RecordResponse(id, model string, rw http.ResponseWriter response = responseBody } + modelID := r.modelManager.ResolveModelID(model) + r.m.Lock() defer r.m.Unlock() - if modelData, exists := r.records[model]; exists { + if modelData, exists := r.records[modelID]; exists { for _, record := range modelData.Records { if record.ID == id { record.Response = response @@ -149,9 +173,9 @@ func (r *OpenAIRecorder) RecordResponse(id, model string, rw http.ResponseWriter return } } - r.log.Errorf("Matching request (id=%s) not found for model %s - %d\n%s", id, model, statusCode, response) + r.log.Errorf("Matching request (id=%s) not found for model %s - %d\n%s", id, modelID, statusCode, response) } else { - r.log.Errorf("Model %s not found in records - %d\n%s", model, statusCode, response) + r.log.Errorf("Model %s not found in records - %d\n%s", modelID, statusCode, response) } } @@ -237,11 +261,12 @@ func (r *OpenAIRecorder) GetRecordsByModelHandler() http.HandlerFunc { return } + modelID := r.modelManager.ResolveModelID(model) if err := json.NewEncoder(w).Encode(map[string]interface{}{ "model": model, "records": records, "count": len(records), - "config": r.records[model].Config, + "config": r.records[modelID].Config, }); err != nil { http.Error(w, fmt.Sprintf("Failed to encode records for model '%s': %v", model, err), http.StatusInternalServerError) @@ -252,10 +277,12 @@ func (r *OpenAIRecorder) GetRecordsByModelHandler() http.HandlerFunc { } func (r *OpenAIRecorder) GetRecordsByModel(model string) []*RequestResponsePair { + modelID := r.modelManager.ResolveModelID(model) + r.m.RLock() defer r.m.RUnlock() - if modelData, exists := r.records[model]; exists { + if modelData, exists := r.records[modelID]; exists { result := make([]*RequestResponsePair, len(modelData.Records)) copy(result, modelData.Records) return result @@ -265,13 +292,15 @@ func (r *OpenAIRecorder) GetRecordsByModel(model string) []*RequestResponsePair } func (r *OpenAIRecorder) RemoveModel(model string) { + modelID := r.modelManager.ResolveModelID(model) + r.m.Lock() defer r.m.Unlock() - if _, exists := r.records[model]; exists { - delete(r.records, model) - r.log.Infof("Removed records for model: %s", model) + if _, exists := r.records[modelID]; exists { + delete(r.records, modelID) + r.log.Infof("Removed records for model: %s", modelID) } else { - r.log.Warnf("No records found for model: %s", model) + r.log.Warnf("No records found for model: %s", modelID) } } diff --git a/vendor/github.com/elastic/go-sysinfo/.editorconfig b/vendor/github.com/elastic/go-sysinfo/.editorconfig new file mode 100644 index 00000000..8cc16d1c --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/.editorconfig @@ -0,0 +1,27 @@ +# See: http://editorconfig.org +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*.json] +indent_size = 2 +indent_style = space + +[*.py] +indent_style = space +indent_size = 4 + +[*.yml] +indent_style = space +indent_size = 2 + +[Makefile] +indent_style = tab + +[Vagrantfile] +indent_size = 2 +indent_style = space diff --git a/vendor/github.com/elastic/go-sysinfo/.gitattributes b/vendor/github.com/elastic/go-sysinfo/.gitattributes new file mode 100644 index 00000000..875f4996 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/.gitattributes @@ -0,0 +1,5 @@ +# Treat all files in the Go repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. + +* -text diff --git a/vendor/github.com/elastic/go-sysinfo/.gitignore b/vendor/github.com/elastic/go-sysinfo/.gitignore new file mode 100644 index 00000000..52a75b73 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/.gitignore @@ -0,0 +1,11 @@ +*.iml +*.swp +*.o +.idea +.vagrant +_obj + +*TEST.out + +build/ +**/testdata/fuzz diff --git a/vendor/github.com/elastic/go-sysinfo/.golangci.yml b/vendor/github.com/elastic/go-sysinfo/.golangci.yml new file mode 100644 index 00000000..5c0e8616 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/.golangci.yml @@ -0,0 +1,16 @@ +--- + +run: + issues-exit-code: 1 + modules-download-mode: readonly + +linters: + disable-all: true + fast: false + enable: + - goimports + - revive + +linters-settings: + goimports: + local-prefixes: github.com/elastic/go-sysinfo diff --git a/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md b/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md new file mode 100644 index 00000000..c206aa31 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md @@ -0,0 +1,16 @@ +# Contributing + +Pull requests are welcomed. You must + +- Sign the Elastic [Contributor License Agreement](https://www.elastic.co/contributor-agreement). +- Include a [changelog][changelog_docs] entry at `.changelog/{pr-number}.txt` with your pull request. +- Include tests that demonstrate the change is working. + +[changelog_docs]: https://github.com/GoogleCloudPlatform/magic-modules/blob/2834761fec3acbf35cacbffe100530f82eada650/.ci/RELEASE_NOTES_GUIDE.md#expected-format + +## Releasing + +To create a new release use the release workflow in GitHub actions. This will create a new draft +release in GitHub releases with a changelog. After the job completes, review the draft and if +everything is correct, publish the release. When the release is published GitHub will create the +git tag. diff --git a/vendor/github.com/elastic/go-sysinfo/LICENSE.txt b/vendor/github.com/elastic/go-sysinfo/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/go-sysinfo/Makefile b/vendor/github.com/elastic/go-sysinfo/Makefile new file mode 100644 index 00000000..9d4e6b17 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/Makefile @@ -0,0 +1,14 @@ +.phony: update +update: fmt lic imports + +.PHONY: lic +lic: + go run github.com/elastic/go-licenser@latest + +.PHONY: fmt +fmt: + go run mvdan.cc/gofumpt@latest -w -l ./ + +.PHONY: imports +imports: + go run golang.org/x/tools/cmd/goimports@latest -l -local github.com/elastic/go-sysinfo ./ diff --git a/vendor/github.com/elastic/go-sysinfo/NOTICE.txt b/vendor/github.com/elastic/go-sysinfo/NOTICE.txt new file mode 100644 index 00000000..cb8e89d5 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/NOTICE.txt @@ -0,0 +1,5 @@ +Elastic go-sysinfo +Copyright 2017-2024 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/go-sysinfo/README.md b/vendor/github.com/elastic/go-sysinfo/README.md new file mode 100644 index 00000000..b8b0002d --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/README.md @@ -0,0 +1,87 @@ +# go-sysinfo + +[![go](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml/badge.svg)](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml) +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: http://godoc.org/github.com/elastic/go-sysinfo + +go-sysinfo is a library for collecting system information. This includes +information about the host machine and processes running on the host. + +The available features vary based on what has been implemented by the "provider" +for the operating system. At runtime you check to see if additional interfaces +are implemented by the returned `Host` or `Process`. For example: + +```go +process, err := sysinfo.Self() +if err != nil { + return err +} + +if handleCounter, ok := process.(types.OpenHandleCounter); ok { + count, err := handleCounter.OpenHandleCount() + if err != nil { + return err + } + log.Printf("%d open handles", count) +} +``` + +These tables show what methods are implemented as well as the extra interfaces +that are implemented. + +| `Host` Features | Darwin | Linux | Windows | AIX | +|------------------|--------|-------|---------|-----| +| `Info()` | x | x | x | x | +| `Memory()` | x | x | x | x | +| `CPUTimer` | x | x | x | x | +| `LoadAverage` | x | x | | | +| `VMStat` | | x | | | +| `NetworkCounters`| | x | | | + +| `Process` Features | Darwin | Linux | Windows | AIX | +|------------------------|--------|-------|---------|-----| +| `Info()` | x | x | x | x | +| `Memory()` | x | x | x | x | +| `User()` | x | x | x | x | +| `Parent()` | x | x | x | x | +| `CPUTimer` | x | x | x | x | +| `Environment` | x | x | | x | +| `OpenHandleEnumerator` | | x | | | +| `OpenHandleCounter` | | x | | | +| `Seccomp` | | x | | | +| `Capabilities` | | x | | | +| `NetworkCounters` | | x | | | + +### GOOS / GOARCH Pairs + +This table lists the OS and architectures for which a "provider" is implemented. + +| GOOS / GOARCH | Requires CGO | Tested | +|----------------|--------------|--------| +| aix/ppc64 | x | | +| darwin/amd64 | optional * | x | +| darwin/arm64 | optional * | x | +| linux/386 | | | +| linux/amd64 | | x | +| linux/arm | | | +| linux/arm64 | | | +| linux/mips | | | +| linux/mips64 | | | +| linux/mips64le | | | +| linux/mipsle | | | +| linux/ppc64 | | | +| linux/ppc64le | | | +| linux/riscv64 | | | +| linux/s390x | | | +| windows/amd64 | | x | +| windows/arm64 | | | +| windows/arm | | | + +* On darwin (macOS) host information like machineid and process information like memory, cpu, user and starttime require cgo. + +### Supported Go versions + +go-sysinfo supports the [two most recent Go releases][ci_go_versions]. + +[ci_go_versions]: https://github.com/elastic/go-sysinfo/blob/main/.github/workflows/go.yml#L40-L41 diff --git a/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go b/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go new file mode 100644 index 00000000..00a9d2c7 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package registry + +import ( + "fmt" + + "github.com/elastic/go-sysinfo/types" +) + +type ( + HostOptsCreator = func(ProviderOptions) HostProvider + ProcessOptsCreator = func(ProviderOptions) ProcessProvider +) + +// HostProvider defines interfaces that provide host-specific metrics +type HostProvider interface { + Host() (types.Host, error) +} + +// ProcessProvider defines interfaces that provide process-specific metrics +type ProcessProvider interface { + Processes() ([]types.Process, error) + Process(pid int) (types.Process, error) + Self() (types.Process, error) +} + +type ProviderOptions struct { + Hostfs string +} + +var ( + hostProvider HostProvider + processProvider ProcessProvider + processProviderWithOpts ProcessOptsCreator + hostProviderWithOpts HostOptsCreator +) + +// Register a metrics provider. `provider` should implement one or more of `ProcessProvider` or `HostProvider` +func Register(provider interface{}) { + if h, ok := provider.(ProcessOptsCreator); ok { + if processProviderWithOpts != nil { + panic(fmt.Sprintf("ProcessOptsCreator already registered: %T", processProviderWithOpts)) + } + processProviderWithOpts = h + } + + if h, ok := provider.(HostOptsCreator); ok { + if hostProviderWithOpts != nil { + panic(fmt.Sprintf("HostOptsCreator already registered: %T", hostProviderWithOpts)) + } + hostProviderWithOpts = h + } + + if h, ok := provider.(HostProvider); ok { + if hostProvider != nil { + panic(fmt.Sprintf("HostProvider already registered: %v", hostProvider)) + } + hostProvider = h + } + + if p, ok := provider.(ProcessProvider); ok { + if processProvider != nil { + panic(fmt.Sprintf("ProcessProvider already registered: %v", processProvider)) + } + processProvider = p + } +} + +// GetHostProvider returns the HostProvider registered for the system. May return nil. +func GetHostProvider(opts ProviderOptions) HostProvider { + if hostProviderWithOpts != nil { + return hostProviderWithOpts(opts) + } + return hostProvider +} + +// GetProcessProvider returns the ProcessProvider registered on the system. May return nil. +func GetProcessProvider(opts ProviderOptions) ProcessProvider { + if processProviderWithOpts != nil { + return processProviderWithOpts(opts) + } + return processProvider +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go new file mode 100644 index 00000000..e158f467 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 + +package aix + +import ( + "encoding/binary" + "fmt" + "os" + "time" +) + +// utmp can't be used by "encoding/binary" if generated by cgo, +// some pads will be missing. +type utmp struct { + User [256]uint8 + Id [14]uint8 + Line [64]uint8 + XPad1 int16 + Pid int32 + Type int16 + XPad2 int16 + Time int64 + Termination int16 + Exit int16 + Host [256]uint8 + Xdblwordpad int32 + XreservedA [2]int32 + XreservedV [6]int32 +} + +const ( + typeBootTime = 2 +) + +// BootTime returns the time at which the machine was started, truncated to the nearest second +func BootTime() (time.Time, error) { + return bootTime("/etc/utmp") +} + +func bootTime(filename string) (time.Time, error) { + // Get boot time from /etc/utmp + file, err := os.Open(filename) + if err != nil { + return time.Time{}, fmt.Errorf("failed to get host uptime: cannot open /etc/utmp: %w", err) + } + + defer file.Close() + + for { + var utmp utmp + if err := binary.Read(file, binary.BigEndian, &utmp); err != nil { + break + } + + if utmp.Type == typeBootTime { + return time.Unix(utmp.Time, 0), nil + } + } + + return time.Time{}, fmt.Errorf("failed to get host uptime: no utmp record: %w", err) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/doc.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/doc.go new file mode 100644 index 00000000..aadec23a --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package aix implements the HostProvider and ProcessProvider interfaces +// for providing information about IBM AIX on ppc64. +package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go new file mode 100644 index 00000000..9af09e51 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#cgo LDFLAGS: -L/usr/lib -lperfstat + +#include +#include +#include + +*/ +import "C" + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/providers/shared" + "github.com/elastic/go-sysinfo/types" +) + +//go:generate sh -c "go tool cgo -godefs defs_aix.go | sed 's/*byte/uint64/g' > ztypes_aix_ppc64.go" +// As cgo will return some psinfo's fields with *byte, binary.Read will refuse this type. + +func init() { + registry.Register(aixSystem{}) +} + +type aixSystem struct{} + +// Host returns a new AIX host. +func (aixSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +// Architecture returns the architecture of the host +func Architecture() (string, error) { + return "ppc", nil +} + +// Info returns the host details. +func (h *host) Info() types.HostInfo { + return h.info +} + +// Info returns the current CPU usage of the host. +func (*host) CPUTime() (types.CPUTimes, error) { + clock := uint64(C.sysconf(C._SC_CLK_TCK)) + tick2nsec := func(val uint64) uint64 { + return val * 1e9 / clock + } + + cpudata := C.perfstat_cpu_total_t{} + + if _, err := C.perfstat_cpu_total(nil, &cpudata, C.sizeof_perfstat_cpu_total_t, 1); err != nil { + return types.CPUTimes{}, fmt.Errorf("error while callin perfstat_cpu_total: %w", err) + } + + return types.CPUTimes{ + User: time.Duration(tick2nsec(uint64(cpudata.user))), + System: time.Duration(tick2nsec(uint64(cpudata.sys))), + Idle: time.Duration(tick2nsec(uint64(cpudata.idle))), + IOWait: time.Duration(tick2nsec(uint64(cpudata.wait))), + }, nil +} + +// Memory returns the current memory usage of the host. +func (*host) Memory() (*types.HostMemoryInfo, error) { + var mem types.HostMemoryInfo + + pagesize := uint64(os.Getpagesize()) + + meminfo := C.perfstat_memory_total_t{} + _, err := C.perfstat_memory_total(nil, &meminfo, C.sizeof_perfstat_memory_total_t, 1) + if err != nil { + return nil, fmt.Errorf("perfstat_memory_total failed: %w", err) + } + + mem.Total = uint64(meminfo.real_total) * pagesize + mem.Free = uint64(meminfo.real_free) * pagesize + mem.Used = uint64(meminfo.real_inuse) * pagesize + + // There is no real equivalent to memory available in AIX. + mem.Available = mem.Free + + mem.VirtualTotal = uint64(meminfo.virt_total) * pagesize + mem.VirtualFree = mem.Free + uint64(meminfo.pgsp_free)*pagesize + mem.VirtualUsed = mem.VirtualTotal - mem.VirtualFree + + mem.Metrics = map[string]uint64{ + "bytes_coalesced": uint64(meminfo.bytes_coalesced), + "bytes_coalesced_mempool": uint64(meminfo.bytes_coalesced_mempool), + "real_pinned": uint64(meminfo.real_pinned) * pagesize, + "pgins": uint64(meminfo.pgins), + "pgouts": uint64(meminfo.pgouts), + "pgsp_free": uint64(meminfo.pgsp_free) * pagesize, + "pgsp_rsvd": uint64(meminfo.pgsp_rsvd) * pagesize, + } + + return &mem, nil +} + +func (h *host) FQDNWithContext(ctx context.Context) (string, error) { + return shared.FQDNWithContext(ctx) +} + +func (h *host) FQDN() (string, error) { + return h.FQDNWithContext(context.Background()) +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return errors.Join(r.errs...) + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = v +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (*reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go new file mode 100644 index 00000000..dc3af830 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#include +*/ +import "C" + +import ( + "fmt" + "strconv" +) + +var oslevel string + +func getKernelVersion() (int, int, error) { + name := C.struct_utsname{} + if _, err := C.uname(&name); err != nil { + return 0, 0, fmt.Errorf("kernel version: uname: %w", err) + } + + version, err := strconv.Atoi(C.GoString(&name.version[0])) + if err != nil { + return 0, 0, fmt.Errorf("parsing kernel version: %w", err) + } + + release, err := strconv.Atoi(C.GoString(&name.release[0])) + if err != nil { + return 0, 0, fmt.Errorf("parsing kernel release: %w", err) + } + return version, release, nil +} + +// KernelVersion returns the version of AIX kernel +func KernelVersion() (string, error) { + major, minor, err := getKernelVersion() + if err != nil { + return "", err + } + return strconv.Itoa(major) + "." + strconv.Itoa(minor), nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go new file mode 100644 index 00000000..945ce348 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#include +*/ +import "C" + +import "fmt" + +// MachineID returns the id of the machine +func MachineID() (string, error) { + name := C.struct_utsname{} + if _, err := C.uname(&name); err != nil { + return "", fmt.Errorf("machine id: %w", err) + } + return C.GoString(&name.machine[0]), nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go new file mode 100644 index 00000000..3a603f68 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/elastic/go-sysinfo/types" +) + +// OperatingSystem returns information of the host operating system +func OperatingSystem() (*types.OSInfo, error) { + return getOSInfo() +} + +func getOSInfo() (*types.OSInfo, error) { + major, minor, err := getKernelVersion() + if err != nil { + return nil, err + } + + // Retrieve build version from "/proc/version". + procVersion, err := os.ReadFile("/proc/version") + if err != nil { + return nil, fmt.Errorf("failed to get OS info: cannot open /proc/version: %w", err) + } + build := strings.SplitN(string(procVersion), "\n", 4)[2] + + return &types.OSInfo{ + Type: "unix", + Family: "aix", + Platform: "aix", + Name: "aix", + Version: strconv.Itoa(major) + "." + strconv.Itoa(minor), + Major: major, + Minor: minor, + Patch: 0, // No patch version + Build: build, + }, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go new file mode 100644 index 00000000..6fb669df --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go @@ -0,0 +1,301 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#cgo LDFLAGS: -L/usr/lib -lperfstat + +#include +#include +#include + +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/elastic/go-sysinfo/types" +) + +// Processes returns a list of all actives processes. +func (aixSystem) Processes() ([]types.Process, error) { + // Retrieve processes using /proc instead of calling + // getprocs which will also retrieve kernel threads. + files, err := os.ReadDir("/proc") + if err != nil { + return nil, fmt.Errorf("error while reading /proc: %w", err) + } + + processes := make([]types.Process, 0, len(files)) + for _, f := range files { + // Check that the file is a correct process directory. + // /proc also contains special files (/proc/version) and threads + // directories (/proc/pid directory but without any "as" file) + if _, err := os.Stat("/proc/" + f.Name() + "/as"); err == nil { + pid, _ := strconv.Atoi(f.Name()) + processes = append(processes, &process{pid: pid}) + } + } + + return processes, nil +} + +// Process returns the process designed by PID. +func (aixSystem) Process(pid int) (types.Process, error) { + p := process{pid: pid} + return &p, nil +} + +// Self returns the current process. +func (s aixSystem) Self() (types.Process, error) { + return s.Process(os.Getpid()) +} + +type process struct { + pid int + info *types.ProcessInfo + env map[string]string +} + +// PID returns the PID of a process. +func (p *process) PID() int { + return p.pid +} + +// Parent returns the parent of a process. +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + return &process{pid: info.PPID}, nil +} + +// Info returns all information about the process. +func (p *process) Info() (types.ProcessInfo, error) { + if p.info != nil { + return *p.info, nil + } + + p.info = &types.ProcessInfo{ + PID: p.pid, + } + + // Retrieve PPID and StartTime + info := C.struct_procsinfo64{} + cpid := C.pid_t(p.pid) + + num, err := C.getprocs(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, nil, 0, &cpid, 1) + if num != 1 { + err = syscall.ESRCH + } + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error while calling getprocs: %w", err) + } + + p.info.PPID = int(info.pi_ppid) + // pi_start is the time in second since the process have started. + p.info.StartTime = time.Unix(0, int64(uint64(info.pi_start)*1000*uint64(time.Millisecond))) + + // Retrieve arguments and executable name + // If buffer is not large enough, args are truncated + buf := make([]byte, 8192) + var args []string + if _, err := C.getargs(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, (*C.char)(&buf[0]), 8192); err != nil { + return types.ProcessInfo{}, fmt.Errorf("error while calling getargs: %w", err) + } + + bbuf := bytes.NewBuffer(buf) + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF || arg[0] == 0 { + break + } + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error while reading arguments: %w", err) + } + + args = append(args, string(chop(arg))) + } + + // For some special programs, getargs might return an empty buffer. + if len(args) == 0 { + args = append(args, "") + } + + // The first element of the arguments list is the executable path. + // There are some exceptions which don't have an executable path + // but rather a special name directly in args[0]. + if strings.Contains(args[0], "sshd: ") { + // ssh connections can be named "sshd: root@pts/11". + // If we are using filepath.Base, the result will only + // be 11 because of the last "/". + p.info.Name = args[0] + } else { + p.info.Name = filepath.Base(args[0]) + } + + // The process was launched using its absolute path, so we can retrieve + // the executable path from its "name". + if filepath.IsAbs(args[0]) { + p.info.Exe = filepath.Clean(args[0]) + } else { + // TODO: improve this case. The executable full path can still + // be retrieve in some cases. Look at os/executable_path.go + // in the stdlib. + // For the moment, let's "exe" be the same as "name" + p.info.Exe = p.info.Name + } + p.info.Args = args + + // Get CWD + cwd, err := os.Readlink("/proc/" + strconv.Itoa(p.pid) + "/cwd") + if err != nil { + if !os.IsNotExist(err) { + return types.ProcessInfo{}, fmt.Errorf("error while reading /proc/%s/cwd: %w", strconv.Itoa(p.pid), err) + } + } + + p.info.CWD = strings.TrimSuffix(cwd, "/") + + return *p.info, nil +} + +// Environment returns the environment of a process. +func (p *process) Environment() (map[string]string, error) { + if p.env != nil { + return p.env, nil + } + p.env = map[string]string{} + + /* If buffer is not large enough, args are truncated */ + buf := make([]byte, 8192) + info := C.struct_procsinfo64{} + info.pi_pid = C.pid_t(p.pid) + + if _, err := C.getevars(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, (*C.char)(&buf[0]), 8192); err != nil { + return nil, fmt.Errorf("error while calling getevars: %w", err) + } + + bbuf := bytes.NewBuffer(buf) + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + if err != nil { + return nil, fmt.Errorf("error while calling getevars: %w", err) + } + + pair := bytes.SplitN(chop(line), delim, 2) + if len(pair) != 2 { + return nil, errors.New("error reading process environment") + } + p.env[string(pair[0])] = string(pair[1]) + } + + return p.env, nil +} + +// User returns the user IDs of a process. +func (p *process) User() (types.UserInfo, error) { + var prcred prcred + if err := p.decodeProcfsFile("cred", &prcred); err != nil { + return types.UserInfo{}, err + } + return types.UserInfo{ + UID: strconv.Itoa(int(prcred.Ruid)), + EUID: strconv.Itoa(int(prcred.Euid)), + SUID: strconv.Itoa(int(prcred.Suid)), + GID: strconv.Itoa(int(prcred.Rgid)), + EGID: strconv.Itoa(int(prcred.Egid)), + SGID: strconv.Itoa(int(prcred.Sgid)), + }, nil +} + +// Memory returns the current memory usage of a process. +func (p *process) Memory() (types.MemoryInfo, error) { + var mem types.MemoryInfo + pagesize := uint64(os.Getpagesize()) + + info := C.struct_procsinfo64{} + cpid := C.pid_t(p.pid) + + num, err := C.getprocs(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, nil, 0, &cpid, 1) + if num != 1 { + err = syscall.ESRCH + } + if err != nil { + return types.MemoryInfo{}, fmt.Errorf("error while calling getprocs: %w", err) + } + + mem.Resident = uint64(info.pi_drss+info.pi_trss) * pagesize + mem.Virtual = uint64(info.pi_dvm) * pagesize + + return mem, nil +} + +// CPUTime returns the current CPU usage of a process. +func (p *process) CPUTime() (types.CPUTimes, error) { + var pstatus pstatus + if err := p.decodeProcfsFile("status", &pstatus); err != nil { + return types.CPUTimes{}, err + } + return types.CPUTimes{ + User: time.Duration(pstatus.Utime.Sec*1e9 + int64(pstatus.Utime.Nsec)), + System: time.Duration(pstatus.Stime.Sec*1e9 + int64(pstatus.Stime.Nsec)), + }, nil +} + +func (p *process) decodeProcfsFile(name string, data interface{}) error { + fileName := "/proc/" + strconv.Itoa(p.pid) + "/" + name + + file, err := os.Open(fileName) + if err != nil { + return fmt.Errorf("error while opening %s: %w", fileName, err) + } + defer file.Close() + + if err := binary.Read(file, binary.BigEndian, data); err != nil { + return fmt.Errorf("error while decoding %s: %w", fileName, err) + } + + return nil +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go new file mode 100644 index 00000000..0e369bb6 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +//go:build aix && ppc64 + +package aix + +type prcred struct { + Euid uint64 + Ruid uint64 + Suid uint64 + Egid uint64 + Rgid uint64 + Sgid uint64 + X_pad [8]uint64 + X_pad1 uint32 + Ngroups uint32 + Groups [1]uint64 +} + +type pstatus struct { + Flag uint32 + Flag2 uint32 + Flags uint32 + Nlwp uint32 + Stat uint8 + Dmodel uint8 + X_pad1 [6]uint8 + Sigpend prSigset + Brkbase uint64 + Brksize uint64 + Stkbase uint64 + Stksize uint64 + Pid uint64 + Ppid uint64 + Pgid uint64 + Sid uint64 + Utime prTimestruc64 + Stime prTimestruc64 + Cutime prTimestruc64 + Cstime prTimestruc64 + Sigtrace prSigset + Flttrace fltset + Sysentry_offset uint32 + Sysexit_offset uint32 + X_pad [8]uint64 + Lwp lwpstatus +} + +type prTimestruc64 struct { + Sec int64 + Nsec int32 + X__pad uint32 +} + +type prSigset struct { + Set [4]uint64 +} + +type fltset struct { + Set [4]uint64 +} + +type lwpstatus struct { + Lwpid uint64 + Flags uint32 + X_pad1 [1]uint8 + State uint8 + Cursig uint16 + Why uint16 + What uint16 + Policy uint32 + Clname [8]uint8 + Lwppend prSigset + Lwphold prSigset + Info prSiginfo64 + Altstack prStack64 + Action prSigaction64 + X_pad2 uint32 + Syscall uint16 + Nsysarg uint16 + Sysarg [8]uint64 + Errno int32 + Ptid uint32 + X_pad [9]uint64 + Reg prgregset + Fpreg prfpregset + Family pfamily +} + +type prSiginfo64 struct { + Signo int32 + Errno int32 + Code int32 + Imm int32 + Status int32 + X__pad1 uint32 + Uid uint64 + Pid uint64 + Addr uint64 + Band int64 + Value [8]byte + X__pad [4]uint32 +} + +type prStack64 struct { + Sp uint64 + Size uint64 + Flags int32 + X__pad [5]int32 +} + +type prSigaction64 struct { + Union [8]byte + Mask prSigset + Flags int32 + X__pad [5]int32 +} + +type prgregset struct { + X__iar uint64 + X__msr uint64 + X__cr uint64 + X__lr uint64 + X__ctr uint64 + X__xer uint64 + X__fpscr uint64 + X__fpscrx uint64 + X__gpr [32]uint64 + X__pad1 [8]uint64 +} + +type prfpregset struct { + X__fpr [32]float64 +} + +type pfamily struct { + Extoff uint64 + Extsize uint64 + Pad [14]uint64 +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go new file mode 100644 index 00000000..92251c35 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +const ( + hardwareMIB = "hw.machine" + procTranslated = "sysctl.proc_translated" + archIntel = "x86_64" + archApple = "arm64" +) + +func Architecture() (string, error) { + arch, err := unix.Sysctl(hardwareMIB) + if err != nil { + return "", fmt.Errorf("failed to get architecture: %w", err) + } + + return arch, nil +} + +func NativeArchitecture() (string, error) { + processArch, err := Architecture() + if err != nil { + return "", err + } + + // https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment + + translated, err := unix.SysctlUint32(procTranslated) + if err != nil { + // macos without Rosetta installed doesn't have sysctl.proc_translated + if os.IsNotExist(err) { + return processArch, nil + } + return "", fmt.Errorf("failed to read sysctl.proc_translated: %w", err) + } + + var nativeArch string + + switch translated { + case 0: + nativeArch = processArch + case 1: + // Rosetta 2 is supported only on Apple silicon + nativeArch = archApple + } + + return nativeArch, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go new file mode 100644 index 00000000..1954e2a2 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "fmt" + "time" + + "golang.org/x/sys/unix" +) + +const kernBoottimeMIB = "kern.boottime" + +func BootTime() (time.Time, error) { + tv, err := unix.SysctlTimeval(kernBoottimeMIB) + if err != nil { + return time.Time{}, fmt.Errorf("failed to get host uptime: %w", err) + } + + bootTime := time.Unix(int64(tv.Sec), int64(tv.Usec)*int64(time.Microsecond)) + return bootTime, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/doc.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/doc.go new file mode 100644 index 00000000..20e80f06 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package darwin implements the HostProvider and ProcessProvider interfaces +// for providing information about MacOS. +package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go new file mode 100644 index 00000000..8b53eee3 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go @@ -0,0 +1,266 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/providers/shared" + "github.com/elastic/go-sysinfo/types" +) + +func init() { + registry.Register(darwinSystem{}) +} + +type darwinSystem struct{} + +func (s darwinSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) CPUTime() (types.CPUTimes, error) { + cpu, err := getHostCPULoadInfo() + if err != nil { + return types.CPUTimes{}, fmt.Errorf("failed to get host CPU usage: %w", err) + } + + ticksPerSecond := time.Duration(getClockTicks()) + + return types.CPUTimes{ + User: time.Duration(cpu.User) * time.Second / ticksPerSecond, + System: time.Duration(cpu.System) * time.Second / ticksPerSecond, + Idle: time.Duration(cpu.Idle) * time.Second / ticksPerSecond, + Nice: time.Duration(cpu.Nice) * time.Second / ticksPerSecond, + }, nil +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + var mem types.HostMemoryInfo + + // Total physical memory. + total, err := MemTotal() + if err != nil { + return nil, fmt.Errorf("failed to get total physical memory: %w", err) + } + + mem.Total = total + + // Page size for computing byte totals. + pageSizeBytes, err := getPageSize() + if err != nil { + return nil, fmt.Errorf("failed to get page size: %w", err) + } + + // Swap + swap, err := getSwapUsage() + if err != nil { + return nil, fmt.Errorf("failed to get swap usage: %w", err) + } + + mem.VirtualTotal = swap.Total + mem.VirtualUsed = swap.Used + mem.VirtualFree = swap.Available + + // Virtual Memory Statistics + vmStat, err := getHostVMInfo64() + if errors.Is(err, types.ErrNotImplemented) { + return &mem, nil + } + + if err != nil { + return nil, fmt.Errorf("failed to get virtual memory statistics: %w", err) + } + + inactiveBytes := uint64(vmStat.Inactive_count) * pageSizeBytes + purgeableBytes := uint64(vmStat.Purgeable_count) * pageSizeBytes + mem.Metrics = map[string]uint64{ + "active_bytes": uint64(vmStat.Active_count) * pageSizeBytes, + "compressed_bytes": uint64(vmStat.Compressor_page_count) * pageSizeBytes, + "compressions_bytes": uint64(vmStat.Compressions) * pageSizeBytes, // Cumulative compressions. + "copy_on_write_faults": vmStat.Cow_faults, + "decompressions_bytes": uint64(vmStat.Decompressions) * pageSizeBytes, // Cumulative decompressions. + "external_bytes": uint64(vmStat.External_page_count) * pageSizeBytes, // File Cache / File-backed pages + "inactive_bytes": inactiveBytes, + "internal_bytes": uint64(vmStat.Internal_page_count) * pageSizeBytes, // App Memory / Anonymous + "page_ins_bytes": uint64(vmStat.Pageins) * pageSizeBytes, + "page_outs_bytes": uint64(vmStat.Pageouts) * pageSizeBytes, + "purgeable_bytes": purgeableBytes, + "purged_bytes": uint64(vmStat.Purges) * pageSizeBytes, + "reactivated_bytes": uint64(vmStat.Reactivations) * pageSizeBytes, + "speculative_bytes": uint64(vmStat.Speculative_count) * pageSizeBytes, + "swap_ins_bytes": uint64(vmStat.Swapins) * pageSizeBytes, + "swap_outs_bytes": uint64(vmStat.Swapouts) * pageSizeBytes, + "throttled_bytes": uint64(vmStat.Throttled_count) * pageSizeBytes, + "translation_faults": vmStat.Faults, + "uncompressed_bytes": uint64(vmStat.Total_uncompressed_pages_in_compressor) * pageSizeBytes, + "wired_bytes": uint64(vmStat.Wire_count) * pageSizeBytes, + "zero_filled_bytes": uint64(vmStat.Zero_fill_count) * pageSizeBytes, + } + + // From Activity Monitor: Memory Used = App Memory (internal) + Wired + Compressed + // https://support.apple.com/en-us/HT201538 + mem.Used = uint64(vmStat.Internal_page_count+vmStat.Wire_count+vmStat.Compressor_page_count) * pageSizeBytes + mem.Free = uint64(vmStat.Free_count) * pageSizeBytes + mem.Available = mem.Free + inactiveBytes + purgeableBytes + + return &mem, nil +} + +func (h *host) FQDNWithContext(ctx context.Context) (string, error) { + return shared.FQDNWithContext(ctx) +} + +func (h *host) FQDN() (string, error) { + return h.FQDNWithContext(context.Background()) +} + +func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { + load, err := getLoadAverage() + if err != nil { + return nil, fmt.Errorf("failed to get loadavg: %w", err) + } + + scale := float64(load.scale) + + return &types.LoadAverageInfo{ + One: float64(load.load[0]) / scale, + Five: float64(load.load[1]) / scale, + Fifteen: float64(load.load[2]) / scale, + }, nil +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.nativeArchitecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return errors.Join(r.errs...) + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) nativeArchitecture(h *host) { + v, err := NativeArchitecture() + if r.addErr(err) { + return + } + h.info.NativeArchitecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = v +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go new file mode 100644 index 00000000..72462575 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !386 + +package darwin + +import ( + "fmt" + "syscall" +) + +const kernelReleaseMIB = "kern.osrelease" + +func KernelVersion() (string, error) { + version, err := syscall.Sysctl(kernelReleaseMIB) + if err != nil { + return "", fmt.Errorf("failed to get kernel version: %w", err) + } + + return version, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go new file mode 100644 index 00000000..34f3a347 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const loadAverage = "vm.loadavg" + +type loadAvg struct { + load [3]uint32 + scale int +} + +func getLoadAverage() (*loadAvg, error) { + data, err := unix.SysctlRaw(loadAverage) + if err != nil { + return nil, err + } + + load := *(*loadAvg)(unsafe.Pointer((&data[0]))) + + return &load, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go new file mode 100644 index 00000000..4339366a --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +// MachineID returns the Hardware UUID also accessible via +// About this Mac -> System Report and as the field +// IOPlatformUUID in the output of "ioreg -d2 -c IOPlatformExpertDevice". +func MachineID() (string, error) { + return getHostUUID() +} + +func getHostUUID() (string, error) { + var uuidC C.uuid_t + var id [unsafe.Sizeof(uuidC)]C.uchar + wait := C.struct_timespec{5, 0} // 5 seconds + + ret, err := C.gethostuuid(&id[0], &wait) + if ret != 0 { + if err != nil { + return "", fmt.Errorf("gethostuuid failed with %v: %w", ret, err) + } + + return "", fmt.Errorf("gethostuuid failed with %v", ret) + } + + var uuidStringC C.uuid_string_t + var uuid [unsafe.Sizeof(uuidStringC)]C.char + _, err = C.uuid_unparse_upper(&id[0], &uuid[0]) + if err != nil { + return "", fmt.Errorf("uuid_unparse_upper failed: %w", err) + } + + return C.GoString(&uuid[0]), nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go new file mode 100644 index 00000000..a692fdee --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import ( + "fmt" + + "github.com/elastic/go-sysinfo/types" +) + +func MachineID() (string, error) { + return "", fmt.Errorf("machineid requires cgo: %w", types.ErrNotImplemented) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go new file mode 100644 index 00000000..73dd7cf8 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +const hwMemsizeMIB = "hw.memsize" + +func MemTotal() (uint64, error) { + size, err := unix.SysctlUint64(hwMemsizeMIB) + if err != nil { + return 0, fmt.Errorf("failed to get mem total: %w", err) + } + + return size, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/os_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/os_darwin.go new file mode 100644 index 00000000..94309446 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/os_darwin.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "fmt" + "os" + "strconv" + "strings" + + "howett.net/plist" + + "github.com/elastic/go-sysinfo/types" +) + +const ( + systemVersionPlist = "/System/Library/CoreServices/SystemVersion.plist" + + plistProductName = "ProductName" + plistProductVersion = "ProductVersion" + plistProductBuildVersion = "ProductBuildVersion" +) + +func OperatingSystem() (*types.OSInfo, error) { + data, err := os.ReadFile(systemVersionPlist) + if err != nil { + return nil, fmt.Errorf("failed to read plist file: %w", err) + } + + return getOSInfo(data) +} + +func getOSInfo(data []byte) (*types.OSInfo, error) { + attrs := map[string]string{} + if _, err := plist.Unmarshal(data, &attrs); err != nil { + return nil, fmt.Errorf("failed to unmarshal plist data: %w", err) + } + + productName, found := attrs[plistProductName] + if !found { + return nil, fmt.Errorf("plist key %v not found", plistProductName) + } + + version, found := attrs[plistProductVersion] + if !found { + return nil, fmt.Errorf("plist key %v not found", plistProductVersion) + } + + build, found := attrs[plistProductBuildVersion] + if !found { + return nil, fmt.Errorf("plist key %v not found", plistProductBuildVersion) + } + + var major, minor, patch int + for i, v := range strings.SplitN(version, ".", 3) { + switch i { + case 0: + major, _ = strconv.Atoi(v) + case 1: + minor, _ = strconv.Atoi(v) + case 2: + patch, _ = strconv.Atoi(v) + default: + break + } + } + + return &types.OSInfo{ + Type: "macos", + Family: "darwin", + Platform: "darwin", + Name: productName, + Version: version, + Major: major, + Minor: minor, + Patch: patch, + Build: build, + }, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go new file mode 100644 index 00000000..6a72f9fb --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +// #include +// #include +import "C" + +import ( + "errors" + "unsafe" +) + +//go:generate sh -c "go tool cgo -godefs defs_darwin.go > ztypes_darwin.go" + +func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n, err := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if err != nil { + return err + } else if n != size { + return errors.New("failed to read process info with proc_pidinfo") + } + + return nil +} + +func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDVNODEPATHINFO, 0, ptr, size) + if n != size { + return errors.New("failed to read vnode info with proc_pidinfo") + } + + return nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go new file mode 100644 index 00000000..7c73b69b --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "strconv" + "strings" + "syscall" + "time" + + "golang.org/x/sys/unix" + + "github.com/elastic/go-sysinfo/types" +) + +var errInvalidProcargs2Data = errors.New("invalid kern.procargs2 data") + +func (s darwinSystem) Processes() ([]types.Process, error) { + ps, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return nil, fmt.Errorf("failed to read process table: %w", err) + } + + processes := make([]types.Process, 0, len(ps)) + for _, kp := range ps { + pid := kp.Proc.P_pid + if pid == 0 { + continue + } + + processes = append(processes, &process{ + pid: int(pid), + }) + } + + return processes, nil +} + +func (s darwinSystem) Process(pid int) (types.Process, error) { + p := process{pid: pid} + + return &p, nil +} + +func (s darwinSystem) Self() (types.Process, error) { + return s.Process(os.Getpid()) +} + +type process struct { + info *types.ProcessInfo + pid int + cwd string + exe string + args []string + env map[string]string +} + +func (p *process) PID() int { + return p.pid +} + +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + return &process{pid: info.PPID}, nil +} + +func (p *process) Info() (types.ProcessInfo, error) { + if p.info != nil { + return *p.info, nil + } + + var task procTaskAllInfo + if err := getProcTaskAllInfo(p.pid, &task); err != nil && err != types.ErrNotImplemented { + return types.ProcessInfo{}, err + } + + var vnode procVnodePathInfo + if err := getProcVnodePathInfo(p.pid, &vnode); err != nil && err != types.ErrNotImplemented { + return types.ProcessInfo{}, err + } + + if err := kern_procargs(p.pid, p); err != nil { + return types.ProcessInfo{}, err + } + + p.info = &types.ProcessInfo{ + Name: int8SliceToString(task.Pbsd.Pbi_name[:]), + PID: p.pid, + PPID: int(task.Pbsd.Pbi_ppid), + CWD: int8SliceToString(vnode.Cdir.Path[:]), + Exe: p.exe, + Args: p.args, + StartTime: time.Unix(int64(task.Pbsd.Pbi_start_tvsec), + int64(task.Pbsd.Pbi_start_tvusec)*int64(time.Microsecond)), + } + + return *p.info, nil +} + +func (p *process) User() (types.UserInfo, error) { + kproc, err := unix.SysctlKinfoProc("kern.proc.pid", p.pid) + if err != nil { + return types.UserInfo{}, err + } + + egid := "" + if len(kproc.Eproc.Ucred.Groups) > 0 { + egid = strconv.Itoa(int(kproc.Eproc.Ucred.Groups[0])) + } + + return types.UserInfo{ + UID: strconv.Itoa(int(kproc.Eproc.Pcred.P_ruid)), + EUID: strconv.Itoa(int(kproc.Eproc.Ucred.Uid)), + SUID: strconv.Itoa(int(kproc.Eproc.Pcred.P_svuid)), + GID: strconv.Itoa(int(kproc.Eproc.Pcred.P_rgid)), + SGID: strconv.Itoa(int(kproc.Eproc.Pcred.P_svgid)), + EGID: egid, + }, nil +} + +func (p *process) Environment() (map[string]string, error) { + return p.env, nil +} + +func (p *process) CPUTime() (types.CPUTimes, error) { + var task procTaskAllInfo + if err := getProcTaskAllInfo(p.pid, &task); err != nil { + return types.CPUTimes{}, err + } + return types.CPUTimes{ + User: time.Duration(task.Ptinfo.Total_user), + System: time.Duration(task.Ptinfo.Total_system), + }, nil +} + +func (p *process) Memory() (types.MemoryInfo, error) { + var task procTaskAllInfo + if err := getProcTaskAllInfo(p.pid, &task); err != nil { + return types.MemoryInfo{}, err + } + return types.MemoryInfo{ + Virtual: task.Ptinfo.Virtual_size, + Resident: task.Ptinfo.Resident_size, + Metrics: map[string]uint64{ + "page_ins": uint64(task.Ptinfo.Pageins), + "page_faults": uint64(task.Ptinfo.Faults), + }, + }, nil +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, p *process) error { + data, err := unix.SysctlRaw("kern.procargs2", pid) + if err != nil { + if errors.Is(err, syscall.EINVAL) { + // sysctl returns "invalid argument" for both "no such process" + // and "operation not permitted" errors. + return fmt.Errorf("no such process or operation not permitted: %w", err) + } + return err + } + + return parseKernProcargs2(data, p) +} + +func parseKernProcargs2(data []byte, p *process) error { + // argc + if len(data) < 4 { + return errInvalidProcargs2Data + } + argc := binary.LittleEndian.Uint32(data) + data = data[4:] + + // exe + lines := strings.Split(string(data), "\x00") + p.exe = lines[0] + lines = lines[1:] + + // Skip nulls that may be appended after the exe. + for len(lines) > 0 { + if lines[0] != "" { + break + } + lines = lines[1:] + } + + // argv + if c := min(argc, uint32(len(lines))); c > 0 { + p.args = lines[:c] + lines = lines[c:] + } + + // env vars + env := make(map[string]string, len(lines)) + for _, l := range lines { + if len(l) == 0 { + break + } + + key, val, _ := strings.Cut(l, "=") + env[key] = val + } + p.env = env + + return nil +} + +func int8SliceToString(s []int8) string { + buf := bytes.NewBuffer(make([]byte, len(s))) + buf.Reset() + + for _, b := range s { + if b == 0 { + break + } + buf.WriteByte(byte(b)) + } + return buf.String() +} + +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_nocgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_nocgo_darwin.go new file mode 100644 index 00000000..0ca7a869 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_nocgo_darwin.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import "github.com/elastic/go-sysinfo/types" + +func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { + return types.ErrNotImplemented +} + +func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { + return types.ErrNotImplemented +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go new file mode 100644 index 00000000..ce4ee108 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +/* +#cgo LDFLAGS:-lproc +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func getHostCPULoadInfo() (*cpuUsage, error) { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpu cpuUsage + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpu)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics returned status %d", status) + } + + return &cpu, nil +} + +// getClockTicks returns the number of click ticks in one jiffie. +func getClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} + +func getHostVMInfo64() (*vmStatistics64Data, error) { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO64_COUNT + + var vmStat vmStatistics64Data + status := C.host_statistics64( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO64, + C.host_info_t(unsafe.Pointer(&vmStat)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics64 returned status %d", status) + } + + return &vmStat, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go new file mode 100644 index 00000000..fe14050a --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "bytes" + "encoding/binary" + "fmt" + + "golang.org/x/sys/unix" +) + +type cpuUsage struct { + User uint32 + System uint32 + Idle uint32 + Nice uint32 +} + +func getPageSize() (uint64, error) { + i, err := unix.SysctlUint32("vm.pagesize") + if err != nil { + return 0, fmt.Errorf("vm.pagesize returned %w", err) + } + + return uint64(i), nil +} + +// From sysctl.h - xsw_usage. +type swapUsage struct { + Total uint64 + Available uint64 + Used uint64 + PageSize uint64 +} + +const vmSwapUsageMIB = "vm.swapusage" + +func getSwapUsage() (*swapUsage, error) { + var swap swapUsage + data, err := unix.SysctlRaw(vmSwapUsageMIB) + if err != nil { + return nil, err + } + + if err := binary.Read(bytes.NewReader(data), binary.LittleEndian, &swap); err != nil { + return nil, err + } + + return &swap, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go new file mode 100644 index 00000000..6a74d8d8 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import ( + "fmt" + + "github.com/elastic/go-sysinfo/types" +) + +func getHostCPULoadInfo() (*cpuUsage, error) { + return nil, fmt.Errorf("host cpu load requires cgo: %w", types.ErrNotImplemented) +} + +// getClockTicks returns the number of click ticks in one jiffie. +func getClockTicks() int { + return 0 +} + +func getHostVMInfo64() (*vmStatistics64Data, error) { + return nil, fmt.Errorf("host vm info requires cgo: %w", types.ErrNotImplemented) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/ztypes_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/ztypes_darwin.go new file mode 100644 index 00000000..4ad67792 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/ztypes_darwin.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package darwin + +type processState uint32 + +const ( + stateSIDL processState = iota + 1 + stateRun + stateSleep + stateStop + stateZombie +) + +const argMax = 0x40000 + +type bsdInfo struct { + Pbi_flags uint32 + Pbi_status uint32 + Pbi_xstatus uint32 + Pbi_pid uint32 + Pbi_ppid uint32 + Pbi_uid uint32 + Pbi_gid uint32 + Pbi_ruid uint32 + Pbi_rgid uint32 + Pbi_svuid uint32 + Pbi_svgid uint32 + Rfu_1 uint32 + Pbi_comm [16]int8 + Pbi_name [32]int8 + Pbi_nfiles uint32 + Pbi_pgid uint32 + Pbi_pjobc uint32 + E_tdev uint32 + E_tpgid uint32 + Pbi_nice int32 + Pbi_start_tvsec uint64 + Pbi_start_tvusec uint64 +} + +type procTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + +type procTaskAllInfo struct { + Pbsd bsdInfo + Ptinfo procTaskInfo +} + +type vinfoStat struct { + Dev uint32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Atime int64 + Atimensec int64 + Mtime int64 + Mtimensec int64 + Ctime int64 + Ctimensec int64 + Birthtime int64 + Birthtimensec int64 + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Rdev uint32 + Qspare [2]int64 +} + +type fsid struct { + Val [2]int32 +} + +type vnodeInfo struct { + Stat vinfoStat + Type int32 + Pad int32 + Fsid fsid +} + +type vnodeInfoPath struct { + Vi vnodeInfo + Path [1024]int8 +} + +type procVnodePathInfo struct { + Cdir vnodeInfoPath + Rdir vnodeInfoPath +} + +type vmStatisticsData struct { + Free_count uint32 + Active_count uint32 + Inactive_count uint32 + Wire_count uint32 + Zero_fill_count uint32 + Reactivations uint32 + Pageins uint32 + Pageouts uint32 + Faults uint32 + Cow_faults uint32 + Lookups uint32 + Hits uint32 + Purgeable_count uint32 + Purges uint32 + Speculative_count uint32 +} + +type vmStatistics64Data struct { + Free_count uint32 + Active_count uint32 + Inactive_count uint32 + Wire_count uint32 + Zero_fill_count uint64 + Reactivations uint64 + Pageins uint64 + Pageouts uint64 + Faults uint64 + Cow_faults uint64 + Lookups uint64 + Hits uint64 + Purges uint64 + Purgeable_count uint32 + Speculative_count uint32 + Decompressions uint64 + Compressions uint64 + Swapins uint64 + Swapouts uint64 + Compressor_page_count uint32 + Throttled_count uint32 + External_page_count uint32 + Internal_page_count uint32 + Total_uncompressed_pages_in_compressor uint64 +} + +type vmSize uint64 + +const ( + cpuStateUser = 0x0 + cpuStateSystem = 0x1 + cpuStateIdle = 0x2 + cpuStateNice = 0x3 +) + +type hostCPULoadInfo struct { + Ticks [4]uint32 +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go new file mode 100644 index 00000000..34615385 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + "os" + "strings" + "syscall" +) + +const ( + procSysKernelArch = "/proc/sys/kernel/arch" + procVersion = "/proc/version" + arch8664 = "x86_64" + archAmd64 = "amd64" + archArm64 = "arm64" + archAarch64 = "aarch64" +) + +func Architecture() (string, error) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return "", fmt.Errorf("architecture: %w", err) + } + + data := make([]byte, 0, len(uname.Machine)) + for _, v := range uname.Machine { + if v == 0 { + break + } + data = append(data, byte(v)) + } + + return string(data), nil +} + +func NativeArchitecture() (string, error) { + // /proc/sys/kernel/arch was introduced in Kernel 6.1 + // https://www.kernel.org/doc/html/v6.1/admin-guide/sysctl/kernel.html#arch + // It's the same as uname -m, except that for a process running in emulation + // machine returned from syscall reflects the emulated machine, whilst /proc + // filesystem is read as file so its value is not emulated + data, err := os.ReadFile(procSysKernelArch) + if err != nil { + if os.IsNotExist(err) { + // fallback to checking version string for older kernels + version, err := os.ReadFile(procVersion) + if err != nil && !os.IsNotExist(err) { + return "", fmt.Errorf("failed to read kernel version: %w", err) + } + + versionStr := string(version) + if strings.Contains(versionStr, archAmd64) || strings.Contains(versionStr, arch8664) { + return archAmd64, nil + } else if strings.Contains(versionStr, archArm64) || strings.Contains(versionStr, archAarch64) { + // for parity with Architecture() and /proc/sys/kernel/arch + // as aarch64 and arm64 are used interchangeably + return archAarch64, nil + } + return "", nil + } + + return "", fmt.Errorf("failed to read kernel arch: %w", err) + } + + nativeArch := string(data) + nativeArch = strings.TrimRight(nativeArch, "\n") + + return nativeArch, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go new file mode 100644 index 00000000..58665a7c --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "sync" + "time" + + "github.com/prometheus/procfs" +) + +var ( + bootTimeValue time.Time // Cached boot time. + bootTimeLock sync.Mutex // Lock that guards access to bootTime. +) + +func bootTime(fs procfs.FS) (time.Time, error) { + bootTimeLock.Lock() + defer bootTimeLock.Unlock() + + if !bootTimeValue.IsZero() { + return bootTimeValue, nil + } + + stat, err := fs.Stat() + if err != nil { + return time.Time{}, err + } + + bootTimeValue = time.Unix(int64(stat.BootTime), 0) + return bootTimeValue, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go new file mode 100644 index 00000000..40bf454d --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "strconv" + + "github.com/elastic/go-sysinfo/types" +) + +// capabilityNames is mapping of capability constant values to names. +// +// Generated with: +// +// curl -s https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/capability.h | \ +// grep -P '^#define CAP_\w+\s+\d+' | \ +// perl -pe 's/#define CAP_(\w+)\s+(\d+)/\2: "\L\1",/g' +var capabilityNames = map[int]string{ + 0: "chown", + 1: "dac_override", + 2: "dac_read_search", + 3: "fowner", + 4: "fsetid", + 5: "kill", + 6: "setgid", + 7: "setuid", + 8: "setpcap", + 9: "linux_immutable", + 10: "net_bind_service", + 11: "net_broadcast", + 12: "net_admin", + 13: "net_raw", + 14: "ipc_lock", + 15: "ipc_owner", + 16: "sys_module", + 17: "sys_rawio", + 18: "sys_chroot", + 19: "sys_ptrace", + 20: "sys_pacct", + 21: "sys_admin", + 22: "sys_boot", + 23: "sys_nice", + 24: "sys_resource", + 25: "sys_time", + 26: "sys_tty_config", + 27: "mknod", + 28: "lease", + 29: "audit_write", + 30: "audit_control", + 31: "setfcap", + 32: "mac_override", + 33: "mac_admin", + 34: "syslog", + 35: "wake_alarm", + 36: "block_suspend", + 37: "audit_read", + 38: "perfmon", + 39: "bpf", + 40: "checkpoint_restore", +} + +func capabilityName(num int) string { + name, found := capabilityNames[num] + if found { + return name + } + + return strconv.Itoa(num) +} + +func readCapabilities(content []byte) (*types.CapabilityInfo, error) { + var cap types.CapabilityInfo + + err := parseKeyValue(content, ':', func(key, value []byte) error { + var err error + switch string(key) { + case "CapInh": + cap.Inheritable, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapPrm": + cap.Permitted, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapEff": + cap.Effective, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapBnd": + cap.Bounding, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapAmb": + cap.Ambient, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + } + return nil + }) + + return &cap, err +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go new file mode 100644 index 00000000..c66dd323 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "fmt" + "os" +) + +const procOneCgroup = "/proc/1/cgroup" + +// IsContainerized returns true if this process is containerized. +func IsContainerized() (bool, error) { + data, err := os.ReadFile(procOneCgroup) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + + return false, fmt.Errorf("failed to read process cgroups: %w", err) + } + + return isContainerizedCgroup(data) +} + +func isContainerizedCgroup(data []byte) (bool, error) { + s := bufio.NewScanner(bytes.NewReader(data)) + for n := 0; s.Scan(); n++ { + line := s.Bytes() + + // Following a suggestion on Stack Overflow on how to detect + // being inside a container: https://stackoverflow.com/a/20012536/235203 + if bytes.Contains(line, []byte("docker")) || bytes.Contains(line, []byte(".slice")) || bytes.Contains(line, []byte("lxc")) || bytes.Contains(line, []byte("kubepods")) { + return true, nil + } + } + + return false, s.Err() +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/doc.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/doc.go new file mode 100644 index 00000000..53d3c36c --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package linux implements the HostProvider and ProcessProvider interfaces +// for providing information about Linux. +package linux diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go new file mode 100644 index 00000000..24e72d0c --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/prometheus/procfs" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/providers/shared" + "github.com/elastic/go-sysinfo/types" +) + +func init() { + // register wrappers that implement the HostFS versions of the ProcessProvider and HostProvider + registry.Register(func(opts registry.ProviderOptions) registry.HostProvider { return newLinuxSystem(opts.Hostfs) }) + registry.Register(func(opts registry.ProviderOptions) registry.ProcessProvider { return newLinuxSystem(opts.Hostfs) }) +} + +type linuxSystem struct { + procFS procFS +} + +func newLinuxSystem(hostFS string) linuxSystem { + mountPoint := filepath.Join(hostFS, procfs.DefaultMountPoint) + fs, _ := procfs.NewFS(mountPoint) + return linuxSystem{ + procFS: procFS{FS: fs, mountPoint: mountPoint, baseMount: hostFS}, + } +} + +func (s linuxSystem) Host() (types.Host, error) { + return newHost(s.procFS) +} + +type host struct { + procFS procFS + stat procfs.Stat + info types.HostInfo +} + +// Info returns host info +func (h *host) Info() types.HostInfo { + return h.info +} + +// Memory returns memory info +func (h *host) Memory() (*types.HostMemoryInfo, error) { + path := h.procFS.path("meminfo") + content, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("error reading meminfo file %s: %w", path, err) + } + + return parseMemInfo(content) +} + +func (h *host) FQDNWithContext(ctx context.Context) (string, error) { + return shared.FQDNWithContext(ctx) +} + +func (h *host) FQDN() (string, error) { + return h.FQDNWithContext(context.Background()) +} + +// VMStat reports data from /proc/vmstat on linux. +func (h *host) VMStat() (*types.VMStatInfo, error) { + path := h.procFS.path("vmstat") + content, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("error reading vmstat file %s: %w", path, err) + } + + return parseVMStat(content) +} + +// LoadAverage reports data from /proc/loadavg on linux. +func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { + loadAvg, err := h.procFS.LoadAvg() + if err != nil { + return nil, fmt.Errorf("error fetching load averages: %w", err) + } + + return &types.LoadAverageInfo{ + One: loadAvg.Load1, + Five: loadAvg.Load5, + Fifteen: loadAvg.Load15, + }, nil +} + +// NetworkCounters reports data from /proc/net on linux +func (h *host) NetworkCounters() (*types.NetworkCountersInfo, error) { + snmpFile := h.procFS.path("net/snmp") + snmpRaw, err := os.ReadFile(snmpFile) + if err != nil { + return nil, fmt.Errorf("error fetching net/snmp file %s: %w", snmpFile, err) + } + snmp, err := getNetSnmpStats(snmpRaw) + if err != nil { + return nil, fmt.Errorf("error parsing SNMP stats: %w", err) + } + + netstatFile := h.procFS.path("net/netstat") + netstatRaw, err := os.ReadFile(netstatFile) + if err != nil { + return nil, fmt.Errorf("error fetching net/netstat file %s: %w", netstatFile, err) + } + netstat, err := getNetstatStats(netstatRaw) + if err != nil { + return nil, fmt.Errorf("error parsing netstat file: %w", err) + } + + return &types.NetworkCountersInfo{SNMP: snmp, Netstat: netstat}, nil +} + +// CPUTime returns host CPU usage metrics +func (h *host) CPUTime() (types.CPUTimes, error) { + stat, err := h.procFS.Stat() + if err != nil { + return types.CPUTimes{}, fmt.Errorf("error fetching CPU stats: %w", err) + } + + return types.CPUTimes{ + User: time.Duration(stat.CPUTotal.User * float64(time.Second)), + System: time.Duration(stat.CPUTotal.System * float64(time.Second)), + Idle: time.Duration(stat.CPUTotal.Idle * float64(time.Second)), + IOWait: time.Duration(stat.CPUTotal.Iowait * float64(time.Second)), + IRQ: time.Duration(stat.CPUTotal.IRQ * float64(time.Second)), + Nice: time.Duration(stat.CPUTotal.Nice * float64(time.Second)), + SoftIRQ: time.Duration(stat.CPUTotal.SoftIRQ * float64(time.Second)), + Steal: time.Duration(stat.CPUTotal.Steal * float64(time.Second)), + }, nil +} + +func newHost(fs procFS) (*host, error) { + stat, err := fs.Stat() + if err != nil { + return nil, fmt.Errorf("failed to read proc stat: %w", err) + } + + h := &host{stat: stat, procFS: fs} + r := &reader{} + r.architecture(h) + r.nativeArchitecture(h) + r.bootTime(h) + r.containerized(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return errors.Join(r.errs...) + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) nativeArchitecture(h *host) { + v, err := NativeArchitecture() + if r.addErr(err) { + return + } + h.info.NativeArchitecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := bootTime(h.procFS.FS) + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) containerized(h *host) { + v, err := IsContainerized() + if r.addErr(err) { + return + } + h.info.Containerized = &v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = v +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := getOSInfo(h.procFS.baseMount) + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineIDHostfs(h.procFS.baseMount) + if r.addErr(err) { + return + } + h.info.UniqueID = v +} + +type procFS struct { + procfs.FS + mountPoint string + baseMount string +} + +func (fs *procFS) path(p ...string) string { + elem := append([]string{fs.mountPoint}, p...) + return filepath.Join(elem...) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go new file mode 100644 index 00000000..1695fb81 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + "syscall" +) + +func KernelVersion() (string, error) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return "", fmt.Errorf("kernel version: %w", err) + } + + data := make([]byte, 0, len(uname.Release)) + for _, v := range uname.Release { + if v == 0 { + break + } + data = append(data, byte(v)) + } + + return string(data), nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go new file mode 100644 index 00000000..a5e8afaa --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "github.com/elastic/go-sysinfo/types" +) + +// Possible (current and historic) locations of the machine-id file. +// These will be searched in order. +var machineIDFiles = []string{"/etc/machine-id", "/var/lib/dbus/machine-id", "/var/db/dbus/machine-id"} + +func machineID(hostfs string) (string, error) { + var contents []byte + var err error + + for _, file := range machineIDFiles { + contents, err = os.ReadFile(filepath.Join(hostfs, file)) + if err != nil { + if os.IsNotExist(err) { + // Try next location + continue + } + + // Return with error on any other error + return "", fmt.Errorf("failed to read %v: %w", file, err) + } + + // Found it + break + } + + if os.IsNotExist(err) { + // None of the locations existed + return "", types.ErrNotImplemented + } + + contents = bytes.TrimSpace(contents) + return string(contents), nil +} + +func MachineIDHostfs(hostfs string) (string, error) { + return machineID(hostfs) +} + +func MachineID() (string, error) { + return machineID("") +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go new file mode 100644 index 00000000..c0c5ab85 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + + "github.com/elastic/go-sysinfo/types" +) + +func parseMemInfo(content []byte) (*types.HostMemoryInfo, error) { + memInfo := &types.HostMemoryInfo{ + Metrics: map[string]uint64{}, + } + + hasAvailable := false + err := parseKeyValue(content, ':', func(key, value []byte) error { + num, err := parseBytesOrNumber(value) + if err != nil { + return fmt.Errorf("failed to parse %v value of %v: %w", string(key), string(value), err) + } + + k := string(key) + switch k { + case "MemTotal": + memInfo.Total = num + case "MemAvailable": + hasAvailable = true + memInfo.Available = num + case "MemFree": + memInfo.Free = num + case "SwapTotal": + memInfo.VirtualTotal = num + case "SwapFree": + memInfo.VirtualFree = num + default: + memInfo.Metrics[k] = num + } + + return nil + }) + if err != nil { + return nil, err + } + + memInfo.Used = memInfo.Total - memInfo.Free + memInfo.VirtualUsed = memInfo.VirtualTotal - memInfo.VirtualFree + + // MemAvailable was added in kernel 3.14. + if !hasAvailable { + // Linux uses this for the calculation (but we are using a simpler calculation). + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 + memInfo.Available = memInfo.Free + memInfo.Metrics["Buffers"] + memInfo.Metrics["Cached"] + } + + return memInfo, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go new file mode 100644 index 00000000..f2e366ab --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go @@ -0,0 +1,329 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/elastic/go-sysinfo/types" +) + +const ( + osRelease = "/etc/os-release" + lsbRelease = "/etc/lsb-release" + distribRelease = "/etc/*-release" + versionGrok = `(?P(?P[0-9]+)\.?(?P[0-9]+)?\.?(?P\w+)?)(?: \((?P[-\w ]+)\))?` + versionGrokSuse = `(?P(?P[0-9]+)(?:[.-]?(?:SP)?(?P[0-9]+))?(?:[.-](?P[0-9]+|\w+))?)(?: \((?P[-\w ]+)\))?` +) + +var ( + // distribReleaseRegexp parses the /etc/-release file. See man lsb-release. + distribReleaseRegexp = regexp.MustCompile(`(?P[\w]+).* ` + versionGrok) + + // versionRegexp parses version numbers (e.g. 6 or 6.1 or 6.1.0 or 6.1.0_20150102). + versionRegexp = regexp.MustCompile(versionGrok) + + // versionRegexpSuse parses version numbers for SUSE (e.g. 15-SP1). + versionRegexpSuse = regexp.MustCompile(versionGrokSuse) +) + +// familyMap contains a mapping of family -> []platforms. +var familyMap = map[string][]string{ + "alpine": {"alpine"}, + "arch": {"arch", "antergos", "manjaro"}, + "redhat": { + "redhat", "fedora", "centos", "scientific", "oraclelinux", "ol", + "amzn", "rhel", "almalinux", "openeuler", "rocky", + }, + "debian": {"debian", "ubuntu", "raspbian", "linuxmint"}, + "suse": {"suse", "sles", "opensuse"}, +} + +var platformToFamilyMap map[string]string + +func init() { + platformToFamilyMap = map[string]string{} + for family, platformList := range familyMap { + for _, platform := range platformList { + platformToFamilyMap[platform] = family + } + } +} + +// OperatingSystem returns OS info. This does not take an alternate hostfs. +// to get OS info from an alternate root path, use reader.os() +func OperatingSystem() (*types.OSInfo, error) { + return getOSInfo("") +} + +func getOSInfo(baseDir string) (*types.OSInfo, error) { + osInfo, err := getOSRelease(baseDir) + if err != nil { + // Fallback + return findDistribRelease(baseDir) + } + + // For the redhat family, enrich version info with data from + // /etc/[distrib]-release because the minor and patch info isn't always + // present in os-release. + if osInfo.Family != "redhat" { + return osInfo, nil + } + + distInfo, err := findDistribRelease(baseDir) + if err != nil { + return osInfo, err + } + osInfo.Major = distInfo.Major + osInfo.Minor = distInfo.Minor + osInfo.Patch = distInfo.Patch + osInfo.Codename = distInfo.Codename + return osInfo, nil +} + +func getOSRelease(baseDir string) (*types.OSInfo, error) { + lsbRel, _ := os.ReadFile(filepath.Join(baseDir, lsbRelease)) + + osRel, err := os.ReadFile(filepath.Join(baseDir, osRelease)) + if err != nil { + return nil, err + } + if len(osRel) == 0 { + return nil, fmt.Errorf("%v is empty: %w", osRelease, err) + } + + return parseOSRelease(append(lsbRel, osRel...)) +} + +func parseOSRelease(content []byte) (*types.OSInfo, error) { + fields := map[string]string{} + + s := bufio.NewScanner(bytes.NewReader(content)) + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + + // Skip blank lines and comments. + if len(line) == 0 || bytes.HasPrefix(line, []byte("#")) { + continue + } + + parts := bytes.SplitN(s.Bytes(), []byte("="), 2) + if len(parts) != 2 { + continue + } + + key := string(bytes.TrimSpace(parts[0])) + val := string(bytes.TrimSpace(parts[1])) + fields[key] = val + + // Trim quotes. + val, err := strconv.Unquote(val) + if err == nil { + fields[key] = strings.TrimSpace(val) + } + } + + if s.Err() != nil { + return nil, s.Err() + } + + return makeOSInfo(fields) +} + +func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { + os := &types.OSInfo{ + Type: "linux", + Platform: firstOf(osRelease, "ID", "DISTRIB_ID"), + Name: firstOf(osRelease, "NAME", "PRETTY_NAME"), + Version: firstOf(osRelease, "VERSION", "VERSION_ID", "DISTRIB_RELEASE"), + Build: osRelease["BUILD_ID"], + Codename: firstOf(osRelease, "VERSION_CODENAME", "DISTRIB_CODENAME"), + } + + if os.Codename == "" { + // Some OSes use their own CODENAME keys (e.g UBUNTU_CODENAME). + for k, v := range osRelease { + if strings.Contains(k, "CODENAME") { + os.Codename = v + break + } + } + } + + if os.Platform == "" { + // Fallback to the first word of the Name field. + os.Platform, _, _ = strings.Cut(os.Name, " ") + } + + os.Family = linuxFamily(os.Platform) + if os.Family == "" { + // ID_LIKE is a space-separated list of OS identifiers that this + // OS is similar to. Use this to figure out the Linux family. + for _, id := range strings.Fields(osRelease["ID_LIKE"]) { + os.Family = linuxFamily(id) + if os.Family != "" { + break + } + } + } + + if osRelease["ID_LIKE"] == "suse" { + extractVersionDetails(os, os.Version, versionRegexpSuse) + } else if os.Version != "" { + extractVersionDetails(os, os.Version, versionRegexp) + } + + return os, nil +} + +func extractVersionDetails(os *types.OSInfo, version string, re *regexp.Regexp) { + keys := re.SubexpNames() + for i, match := range re.FindStringSubmatch(version) { + switch keys[i] { + case "major": + os.Major, _ = strconv.Atoi(match) + case "minor": + os.Minor, _ = strconv.Atoi(match) + case "patch": + os.Patch, _ = strconv.Atoi(match) + case "codename": + if os.Codename == "" { + os.Codename = match + } + } + } +} + +func findDistribRelease(baseDir string) (*types.OSInfo, error) { + matches, err := filepath.Glob(filepath.Join(baseDir, distribRelease)) + if err != nil { + return nil, err + } + var errs []error + for _, path := range matches { + if strings.HasSuffix(path, osRelease) || strings.HasSuffix(path, lsbRelease) { + continue + } + + info, err := os.Stat(path) + if err != nil || info.IsDir() || info.Size() == 0 { + continue + } + + osInfo, err := getDistribRelease(path) + if err != nil { + errs = append(errs, fmt.Errorf("in %s: %w", path, err)) + continue + } + return osInfo, nil + } + return nil, fmt.Errorf("no valid /etc/-release file found: %w", errors.Join(errs...)) +} + +func getDistribRelease(file string) (*types.OSInfo, error) { + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + parts := bytes.SplitN(data, []byte("\n"), 2) + if len(parts) != 2 { + return nil, fmt.Errorf("failed to parse %v", file) + } + + // Use distrib as platform name. + var platform string + if parts := strings.SplitN(filepath.Base(file), "-", 2); len(parts) > 0 { + platform = strings.ToLower(parts[0]) + } + + return parseDistribRelease(platform, parts[0]) +} + +func parseDistribRelease(platform string, content []byte) (*types.OSInfo, error) { + var ( + line = string(bytes.TrimSpace(content)) + keys = distribReleaseRegexp.SubexpNames() + os = &types.OSInfo{ + Type: "linux", + Platform: platform, + } + ) + + for i, m := range distribReleaseRegexp.FindStringSubmatch(line) { + switch keys[i] { + case "name": + os.Name = m + case "version": + os.Version = m + case "major": + os.Major, _ = strconv.Atoi(m) + case "minor": + os.Minor, _ = strconv.Atoi(m) + case "patch": + os.Patch, _ = strconv.Atoi(m) + case "codename": + os.Version += " (" + m + ")" + os.Codename = m + } + } + + os.Family = linuxFamily(os.Platform) + return os, nil +} + +// firstOf returns the first non-empty value found in the map while +// iterating over keys. +func firstOf(kv map[string]string, keys ...string) string { + for _, key := range keys { + if v := kv[key]; v != "" { + return v + } + } + return "" +} + +// linuxFamily returns the linux distribution family associated to the OS platform. +// If there is no family associated then it returns an empty string. +func linuxFamily(platform string) string { + if platform == "" { + return "" + } + + platform = strings.ToLower(platform) + + // First try a direct lookup. + if family, found := platformToFamilyMap[platform]; found { + return family + } + + // Try prefix matching (e.g. opensuse matches opensuse-tumpleweed). + for platformPrefix, family := range platformToFamilyMap { + if strings.HasPrefix(platform, platformPrefix) { + return family + } + } + return "" +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go new file mode 100644 index 00000000..fc3c25be --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go @@ -0,0 +1,297 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bytes" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/prometheus/procfs" + + "github.com/elastic/go-sysinfo/types" +) + +const userHz = 100 + +// Processes returns a list of processes on the system +func (s linuxSystem) Processes() ([]types.Process, error) { + procs, err := s.procFS.AllProcs() + if err != nil { + return nil, fmt.Errorf("error fetching all processes: %w", err) + } + + processes := make([]types.Process, 0, len(procs)) + for _, proc := range procs { + processes = append(processes, &process{Proc: proc, fs: s.procFS}) + } + return processes, nil +} + +// Process returns the given process +func (s linuxSystem) Process(pid int) (types.Process, error) { + proc, err := s.procFS.Proc(pid) + if err != nil { + return nil, fmt.Errorf("error fetching process: %w", err) + } + + return &process{Proc: proc, fs: s.procFS}, nil +} + +// Self returns process info for the caller's own PID +func (s linuxSystem) Self() (types.Process, error) { + proc, err := s.procFS.Self() + if err != nil { + return nil, fmt.Errorf("error fetching self process info: %w", err) + } + + return &process{Proc: proc, fs: s.procFS}, nil +} + +type process struct { + procfs.Proc + fs procFS + info *types.ProcessInfo +} + +// PID returns the PID of the process +func (p *process) PID() int { + return p.Proc.PID +} + +// Parent returns the parent process +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, fmt.Errorf("error fetching process info: %w", err) + } + + proc, err := p.fs.Proc(info.PPID) + if err != nil { + return nil, fmt.Errorf("error fetching data for parent process: %w", err) + } + + return &process{Proc: proc, fs: p.fs}, nil +} + +func (p *process) path(pa ...string) string { + return p.fs.path(append([]string{strconv.Itoa(p.PID())}, pa...)...) +} + +// CWD returns the current working directory +func (p *process) CWD() (string, error) { + cwd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return cwd, err +} + +// Info returns basic process info +func (p *process) Info() (types.ProcessInfo, error) { + if p.info != nil { + return *p.info, nil + } + + stat, err := p.Stat() + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error fetching process stats: %w", err) + } + + exe, err := p.Executable() + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error fetching process executable info: %w", err) + } + + args, err := p.CmdLine() + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error fetching process cmdline: %w", err) + } + + cwd, err := p.CWD() + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error fetching process CWD: %w", err) + } + + bootTime, err := bootTime(p.fs.FS) + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error fetching boot time: %w", err) + } + + p.info = &types.ProcessInfo{ + Name: stat.Comm, + PID: p.PID(), + PPID: stat.PPID, + CWD: cwd, + Exe: exe, + Args: args, + StartTime: bootTime.Add(ticksToDuration(stat.Starttime)), + } + + return *p.info, nil +} + +// Memory returns memory stats for the process +func (p *process) Memory() (types.MemoryInfo, error) { + stat, err := p.Stat() + if err != nil { + return types.MemoryInfo{}, err + } + + return types.MemoryInfo{ + Resident: uint64(stat.ResidentMemory()), + Virtual: uint64(stat.VirtualMemory()), + }, nil +} + +// CPUTime returns CPU usage time for the process +func (p *process) CPUTime() (types.CPUTimes, error) { + stat, err := p.Stat() + if err != nil { + return types.CPUTimes{}, err + } + + return types.CPUTimes{ + User: ticksToDuration(uint64(stat.UTime)), + System: ticksToDuration(uint64(stat.STime)), + }, nil +} + +// OpenHandles returns the list of open file descriptors of the process. +func (p *process) OpenHandles() ([]string, error) { + return p.Proc.FileDescriptorTargets() +} + +// OpenHandles returns the number of open file descriptors of the process. +func (p *process) OpenHandleCount() (int, error) { + return p.Proc.FileDescriptorsLen() +} + +// Environment returns a list of environment variables for the process +func (p *process) Environment() (map[string]string, error) { + // TODO: add Environment to procfs + content, err := os.ReadFile(p.path("environ")) + if err != nil { + return nil, err + } + + env := map[string]string{} + pairs := bytes.Split(content, []byte{0}) + for _, kv := range pairs { + parts := bytes.SplitN(kv, []byte{'='}, 2) + if len(parts) != 2 { + continue + } + + key := string(bytes.TrimSpace(parts[0])) + if key == "" { + continue + } + + env[key] = string(parts[1]) + } + + return env, nil +} + +// Seccomp returns seccomp info for the process +func (p *process) Seccomp() (*types.SeccompInfo, error) { + content, err := os.ReadFile(p.path("status")) + if err != nil { + return nil, err + } + + return readSeccompFields(content) +} + +// Capabilities returns capability info for the process +func (p *process) Capabilities() (*types.CapabilityInfo, error) { + content, err := os.ReadFile(p.path("status")) + if err != nil { + return nil, err + } + + return readCapabilities(content) +} + +// User returns user info for the process +func (p *process) User() (types.UserInfo, error) { + content, err := os.ReadFile(p.path("status")) + if err != nil { + return types.UserInfo{}, err + } + + var user types.UserInfo + err = parseKeyValue(content, ':', func(key, value []byte) error { + // See proc(5) for the format of /proc/[pid]/status + switch string(key) { + case "Uid": + ids := strings.Split(string(value), "\t") + if len(ids) >= 3 { + user.UID = ids[0] + user.EUID = ids[1] + user.SUID = ids[2] + } + case "Gid": + ids := strings.Split(string(value), "\t") + if len(ids) >= 3 { + user.GID = ids[0] + user.EGID = ids[1] + user.SGID = ids[2] + } + } + return nil + }) + if err != nil { + return user, fmt.Errorf("error partsing key-values in user data: %w", err) + } + + return user, nil +} + +// NetworkStats reports network stats for an individual PID. +func (p *process) NetworkCounters() (*types.NetworkCountersInfo, error) { + snmpRaw, err := os.ReadFile(p.path("net/snmp")) + if err != nil { + return nil, fmt.Errorf("error reading net/snmp file: %w", err) + } + snmp, err := getNetSnmpStats(snmpRaw) + if err != nil { + return nil, fmt.Errorf("error parsing SNMP network data: %w", err) + } + + netstatRaw, err := os.ReadFile(p.path("net/netstat")) + if err != nil { + return nil, fmt.Errorf("error reading net/netstat file: %w", err) + } + netstat, err := getNetstatStats(netstatRaw) + if err != nil { + return nil, fmt.Errorf("error parsing netstat file: %w", err) + } + + return &types.NetworkCountersInfo{SNMP: snmp, Netstat: netstat}, nil +} + +func ticksToDuration(ticks uint64) time.Duration { + seconds := float64(ticks) / float64(userHz) * float64(time.Second) + return time.Duration(int64(seconds)) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/procnet.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/procnet.go new file mode 100644 index 00000000..1356c2a8 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/procnet.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/elastic/go-sysinfo/types" +) + +// fillStruct is some reflection work that can dynamically fill one of our tagged `netstat` structs with netstat data +func fillStruct(str interface{}, data map[string]map[string]uint64) { + val := reflect.ValueOf(str).Elem() + typ := reflect.TypeOf(str).Elem() + + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + if tag := field.Tag.Get("netstat"); tag != "" { + if values, ok := data[tag]; ok { + val.Field(i).Set(reflect.ValueOf(values)) + } + } + } +} + +// parseEntry parses two lines from the net files, the first line being keys, the second being values +func parseEntry(line1, line2 string) (map[string]uint64, error) { + keyArr := strings.Split(strings.TrimSpace(line1), " ") + valueArr := strings.Split(strings.TrimSpace(line2), " ") + + if len(keyArr) != len(valueArr) { + return nil, errors.New("key and value lines are mismatched") + } + + counters := make(map[string]uint64, len(valueArr)) + for iter, value := range valueArr { + + // This if-else block is to deal with the MaxConn value in SNMP, + // which is a signed value according to RFC2012. + // This library emulates the behavior of the kernel: store all values as a uint, then cast to a signed value for printing + // Users of this library need to be aware that this value should be printed as a signed int or hex value to make it useful. + var parsed uint64 + var err error + if strings.Contains(value, "-") { + signedParsed, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing string to int in line: %#v: %w", valueArr, err) + } + parsed = uint64(signedParsed) + } else { + parsed, err = strconv.ParseUint(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing string to int in line: %#v: %w", valueArr, err) + } + } + + counters[keyArr[iter]] = parsed + } + return counters, nil +} + +// parseNetFile parses an entire file, and returns a 2D map, representing how files are sorted by protocol +func parseNetFile(body string) (map[string]map[string]uint64, error) { + fileMetrics := make(map[string]map[string]uint64) + bodySplit := strings.Split(strings.TrimSpace(body), "\n") + // There should be an even number of lines. If not, something is wrong. + if len(bodySplit)%2 != 0 { + return nil, fmt.Errorf("badly parsed body: %s", body) + } + // in the network counters, data is divided into two-line sections: a line of keys, and a line of values + // With each line + for index := 0; index < len(bodySplit); index += 2 { + keysSplit := strings.Split(bodySplit[index], ":") + valuesSplit := strings.Split(bodySplit[index+1], ":") + if len(keysSplit) != 2 || len(valuesSplit) != 2 { + return nil, fmt.Errorf("wrong number of keys: %#v", keysSplit) + } + valMap, err := parseEntry(keysSplit[1], valuesSplit[1]) + if err != nil { + return nil, fmt.Errorf("error parsing lines: %w", err) + } + fileMetrics[valuesSplit[0]] = valMap + } + return fileMetrics, nil +} + +// getNetSnmpStats pulls snmp stats from /proc/net +func getNetSnmpStats(raw []byte) (types.SNMP, error) { + snmpData, err := parseNetFile(string(raw)) + if err != nil { + return types.SNMP{}, fmt.Errorf("error parsing SNMP: %w", err) + } + output := types.SNMP{} + fillStruct(&output, snmpData) + + return output, nil +} + +// getNetstatStats pulls netstat stats from /proc/net +func getNetstatStats(raw []byte) (types.Netstat, error) { + netstatData, err := parseNetFile(string(raw)) + if err != nil { + return types.Netstat{}, fmt.Errorf("error parsing netstat: %w", err) + } + output := types.Netstat{} + fillStruct(&output, netstatData) + return output, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go new file mode 100644 index 00000000..fd38ea45 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "strconv" + + "github.com/elastic/go-sysinfo/types" +) + +type SeccompMode uint8 + +const ( + SeccompModeDisabled SeccompMode = iota + SeccompModeStrict + SeccompModeFilter +) + +func (m SeccompMode) String() string { + switch m { + case SeccompModeDisabled: + return "disabled" + case SeccompModeStrict: + return "strict" + case SeccompModeFilter: + return "filter" + default: + return strconv.Itoa(int(m)) + } +} + +func readSeccompFields(content []byte) (*types.SeccompInfo, error) { + var seccomp types.SeccompInfo + + err := parseKeyValue(content, ':', func(key, value []byte) error { + switch string(key) { + case "Seccomp": + mode, err := strconv.ParseUint(string(value), 10, 8) + if err != nil { + return err + } + seccomp.Mode = SeccompMode(mode).String() + case "NoNewPrivs": + noNewPrivs, err := strconv.ParseBool(string(value)) + if err != nil { + return err + } + seccomp.NoNewPrivs = &noNewPrivs + } + return nil + }) + + return &seccomp, err +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go new file mode 100644 index 00000000..1c1d0584 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "os" + "strconv" +) + +// parseKeyValue parses key/val pairs separated by the provided separator from +// each line in content and invokes the callback. White-space is trimmed from +// val. Empty lines are ignored. All non-empty lines must contain the separator +// otherwise an error is returned. +func parseKeyValue(content []byte, separator byte, callback func(key, value []byte) error) error { + var line []byte + + for len(content) > 0 { + line, content, _ = bytes.Cut(content, []byte{'\n'}) + if len(line) == 0 { + continue + } + + key, value, ok := bytes.Cut(line, []byte{separator}) + if !ok { + return fmt.Errorf("separator %q not found", separator) + } + + callback(key, bytes.TrimSpace(value)) + } + + return nil +} + +func findValue(filename, separator, key string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + var line []byte + sc := bufio.NewScanner(bytes.NewReader(content)) + for sc.Scan() { + if bytes.HasPrefix(sc.Bytes(), []byte(key)) { + line = sc.Bytes() + break + } + } + if len(line) == 0 { + return "", fmt.Errorf("%v not found", key) + } + + parts := bytes.SplitN(line, []byte(separator), 2) + if len(parts) != 2 { + return "", fmt.Errorf("unexpected line format for '%v'", string(line)) + } + + return string(bytes.TrimSpace(parts[1])), nil +} + +func decodeBitMap(s string, lookupName func(int) string) ([]string, error) { + mask, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + var names []string + for i := 0; i < 64; i++ { + bit := mask & (1 << uint(i)) + if bit > 0 { + names = append(names, lookupName(i)) + } + } + + return names, nil +} + +// parses a meminfo field, returning either a raw numerical value, or the kB value converted to bytes +func parseBytesOrNumber(data []byte) (uint64, error) { + parts := bytes.Fields(data) + + if len(parts) == 0 { + return 0, errors.New("empty value") + } + + num, err := strconv.ParseUint(string(parts[0]), 10, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse value: %w", err) + } + + var multiplier uint64 = 1 + if len(parts) >= 2 { + switch string(parts[1]) { + case "kB": + multiplier = 1024 + default: + return 0, fmt.Errorf("unhandled unit %v", string(parts[1])) + } + } + + return num * multiplier, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go new file mode 100644 index 00000000..ea918c84 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + "reflect" + + "github.com/elastic/go-sysinfo/types" +) + +// vmstatTagToFieldIndex contains a mapping of json struct tags to struct field indices. +var vmstatTagToFieldIndex = make(map[string]int) + +func init() { + var vmstat types.VMStatInfo + val := reflect.ValueOf(vmstat) + typ := reflect.TypeOf(vmstat) + + for i := 0; i < val.NumField(); i++ { + field := typ.Field(i) + if tag := field.Tag.Get("json"); tag != "" { + vmstatTagToFieldIndex[tag] = i + } + } +} + +// parseVMStat parses the contents of /proc/vmstat. +func parseVMStat(content []byte) (*types.VMStatInfo, error) { + var vmStat types.VMStatInfo + refValues := reflect.ValueOf(&vmStat).Elem() + + err := parseKeyValue(content, ' ', func(key, value []byte) error { + // turn our []byte value into an int + val, err := parseBytesOrNumber(value) + if err != nil { + return fmt.Errorf("failed to parse %v value of %v: %w", string(key), string(value), err) + } + + idx, ok := vmstatTagToFieldIndex[string(key)] + if !ok { + return nil + } + + sval := refValues.Field(idx) + + if sval.CanSet() { + sval.SetUint(val) + } + return nil + }) + + return &vmStat, err +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go new file mode 100644 index 00000000..b8bb4552 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux || darwin || aix + +package shared + +import ( + "context" + "fmt" + "net" + "os" + "strings" +) + +// FQDNWithContext attempts to lookup the host's fully-qualified domain name and returns it. +// It does so using the following algorithm: +// +// 1. It gets the hostname from the OS. If this step fails, it returns an error. +// +// 2. It tries to perform a CNAME DNS lookup for the hostname. If this succeeds, it +// returns the CNAME (after trimming any trailing period) as the FQDN. +// +// 3. It tries to perform an IP lookup for the hostname. If this succeeds, it tries +// to perform a reverse DNS lookup on the returned IPs and returns the first +// successful result (after trimming any trailing period) as the FQDN. +// +// 4. If steps 2 and 3 both fail, an empty string is returned as the FQDN along with +// errors from those steps. +func FQDNWithContext(ctx context.Context) (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("could not get hostname to look for FQDN: %w", err) + } + + return fqdn(ctx, hostname) +} + +// FQDN just calls FQDNWithContext with a background context. +// Deprecated. +func FQDN() (string, error) { + return FQDNWithContext(context.Background()) +} + +func fqdn(ctx context.Context, hostname string) (string, error) { + var errs error + cname, err := net.DefaultResolver.LookupCNAME(ctx, hostname) + if err != nil { + errs = fmt.Errorf("could not get FQDN, all methods failed: failed looking up CNAME: %w", + err) + } + + if cname != "" { + cname = strings.TrimSuffix(cname, ".") + + // Go might lowercase the cname "for convenience". Therefore, if cname + // is the same as hostname, return hostname as is. + // See https://github.com/golang/go/blob/go1.22.5/src/net/hosts.go#L38 + if strings.ToLower(cname) == strings.ToLower(hostname) { + return hostname, nil + } + + return cname, nil + } + + ips, err := net.DefaultResolver.LookupIP(ctx, "ip", hostname) + if err != nil { + errs = fmt.Errorf("%s: failed looking up IP: %w", errs, err) + } + + for _, ip := range ips { + names, err := net.DefaultResolver.LookupAddr(ctx, ip.String()) + if err != nil || len(names) == 0 { + continue + } + return strings.TrimSuffix(names[0], "."), nil + } + + return "", errs +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go b/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go new file mode 100644 index 00000000..bed19f4e --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package shared + +import ( + "net" +) + +func Network() (ips, macs []string, err error) { + ifcs, err := net.Interfaces() + if err != nil { + return nil, nil, err + } + + // This function fetches all the addresses in a single syscall. Fetching addresses individually for each interface + // can be expensive when the host has a lot of interfaces. This usually happens when the host is doing virtualized + // networking for guests, in Kubernetes for example. + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, nil, err + } + ips = make([]string, 0, len(addrs)) + for _, addr := range addrs { + ips = append(ips, addr.String()) + } + + macs = make([]string, 0, len(ifcs)) + for _, ifc := range ifcs { + mac := ifc.HardwareAddr.String() + if mac != "" { + macs = append(macs, mac) + } + } + + return ips, macs, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go new file mode 100644 index 00000000..81afb81c --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "errors" + + "golang.org/x/sys/windows" + + gowindows "github.com/elastic/go-windows" +) + +const ( + imageFileMachineAmd64 = 0x8664 + imageFileMachineArm64 = 0xAA64 + archIntel = "x86_64" + archArm64 = "arm64" +) + +func Architecture() (string, error) { + systemInfo, err := gowindows.GetNativeSystemInfo() + if err != nil { + return "", err + } + + return systemInfo.ProcessorArchitecture.String(), nil +} + +func NativeArchitecture() (string, error) { + var processMachine, nativeMachine uint16 + // the pseudo handle doesn't need to be closed + currentProcessHandle := windows.CurrentProcess() + + // IsWow64Process2 was introduced in version 1709 (build 16299 acording to the tables) + // https://learn.microsoft.com/en-us/windows/release-health/release-information + // https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info + err := windows.IsWow64Process2(currentProcessHandle, &processMachine, &nativeMachine) + if err != nil { + if errors.Is(err, windows.ERROR_PROC_NOT_FOUND) { + major, minor, build := windows.RtlGetNtVersionNumbers() + if major < 10 || (major == 10 && minor == 0 && build < 16299) { + return "", nil + } + } + return "", err + } + + var nativeArch string + + switch nativeMachine { + case imageFileMachineAmd64: + // for parity with Architecture() as amd64 and x86_64 are used interchangeably + nativeArch = archIntel + case imageFileMachineArm64: + nativeArch = archArm64 + } + + return nativeArch, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go new file mode 100644 index 00000000..e04d9a40 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "time" + + "golang.org/x/sys/windows" +) + +func BootTime() (time.Time, error) { + bootTime := time.Now().Add(-1 * windows.DurationSinceBoot()) + + // According to GetTickCount64, the resolution of the value is limited to + // the resolution of the system timer, which is typically in the range of + // 10 milliseconds to 16 milliseconds. So this will round the value to the + // nearest second to not mislead anyone about the precision of the value + // and to provide a stable value. + bootTime = bootTime.Round(time.Second) + return bootTime, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/device_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/device_windows.go new file mode 100644 index 00000000..372f125f --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/device_windows.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "errors" + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/windows" +) + +const ( + // DeviceMup is the device used for unmounted network filesystems + DeviceMup = "\\device\\mup" + + // LANManRedirector is an string that appears in mounted network filesystems + LANManRedirector = "lanmanredirector" +) + +var ( + // ErrNoDevice is the error returned by DevicePathToDrivePath when + // an invalid device-path is supplied. + ErrNoDevice = errors.New("not a device path") + + // ErrDeviceNotFound is the error returned by DevicePathToDrivePath when + // a path pointing to an unmapped device is passed. + ErrDeviceNotFound = errors.New("logical device not found") +) + +type deviceProvider interface { + GetLogicalDrives() (uint32, error) + QueryDosDevice(*uint16, *uint16, uint32) (uint32, error) +} + +type deviceMapper struct { + deviceProvider +} + +type winapiDeviceProvider struct{} + +type testingDeviceProvider map[byte]string + +func newDeviceMapper() deviceMapper { + return deviceMapper{ + deviceProvider: winapiDeviceProvider{}, + } +} + +func fixNetworkDrivePath(device string) string { + // For a VirtualBox share: + // device=\device\vboxminirdr\;z:\vboxsvr\share + // path=\device\vboxminirdr\vboxsvr\share + // + // For a network share: + // device=\device\lanmanredirector\;q:nnnnnnn\server\share + // path=\device\mup\server\share + + semicolonPos := strings.IndexByte(device, ';') + colonPos := strings.IndexByte(device, ':') + if semicolonPos == -1 || colonPos != semicolonPos+2 { + return device + } + pathStart := strings.IndexByte(device[colonPos+1:], '\\') + if pathStart == -1 { + return device + } + dev := device[:semicolonPos] + path := device[colonPos+pathStart+1:] + n := len(dev) + if n > 0 && dev[n-1] == '\\' { + dev = dev[:n-1] + } + return dev + path +} + +func (mapper *deviceMapper) getDevice(driveLetter byte) (string, error) { + driveBuf := [3]uint16{uint16(driveLetter), ':', 0} + + for bufSize := 64; bufSize <= 1024; bufSize *= 2 { + deviceBuf := make([]uint16, bufSize) + n, err := mapper.QueryDosDevice(&driveBuf[0], &deviceBuf[0], uint32(len(deviceBuf))) + if err != nil { + if err == windows.ERROR_INSUFFICIENT_BUFFER { + continue + } + return "", err + } + return windows.UTF16ToString(deviceBuf[:n]), nil + } + return "", windows.ERROR_INSUFFICIENT_BUFFER +} + +func (mapper *deviceMapper) DevicePathToDrivePath(path string) (string, error) { + pathLower := strings.ToLower(path) + isMUP := strings.Index(pathLower, DeviceMup) == 0 + mask, err := mapper.GetLogicalDrives() + if err != nil { + return "", fmt.Errorf("GetLogicalDrives: %w", err) + } + + for bit := uint32(0); mask != 0 && bit < uint32('Z'-'A'+1); bit++ { + if mask&(1< \\server\share\path + if isMUP { + return "\\" + path[len(DeviceMup):], nil + } + return "", ErrDeviceNotFound +} + +func (winapiDeviceProvider) GetLogicalDrives() (uint32, error) { + return windows.GetLogicalDrives() +} + +func (winapiDeviceProvider) QueryDosDevice(name *uint16, buf *uint16, length uint32) (uint32, error) { + return windows.QueryDosDevice(name, buf, length) +} + +func (m testingDeviceProvider) GetLogicalDrives() (mask uint32, err error) { + for drive := range m { + mask |= 1 << uint32(drive-'A') + } + return mask, nil +} + +func ptrOffset(ptr *uint16, off uint32) *uint16 { + return (*uint16)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) + uintptr(off*2))) +} + +func (m testingDeviceProvider) QueryDosDevice(nameW *uint16, buf *uint16, length uint32) (uint32, error) { + drive := byte(*nameW) + if byte(*ptrOffset(nameW, 1)) != ':' { + return 0, errors.New("not a drive") + } + if *ptrOffset(nameW, 2) != 0 { + return 0, errors.New("drive not terminated") + } + path, ok := m[drive] + if !ok { + return 0, fmt.Errorf("drive %c not found", drive) + } + n := uint32(len(path)) + if n+2 > length { + return 0, windows.ERROR_INSUFFICIENT_BUFFER + } + for i := uint32(0); i < n; i++ { + *ptrOffset(buf, i) = uint16(path[i]) + } + *ptrOffset(buf, n) = 0 + *ptrOffset(buf, n+1) = 0 + return n + 2, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/doc.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/doc.go new file mode 100644 index 00000000..fa351940 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package windows implements the HostProvider and ProcessProvider interfaces +// for providing information about Windows. +package windows diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/helpers_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/helpers_windows.go new file mode 100644 index 00000000..38940ff6 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/helpers_windows.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + + syswin "golang.org/x/sys/windows" +) + +// sidToString wraps the `String()` functions used to return SID strings in golang.org/x/sys +// These can return an error or no error, depending on the release. +func sidToString(strFunc *syswin.SID) (string, error) { + switch sig := (interface{})(strFunc).(type) { + case fmt.Stringer: + return sig.String(), nil + case errString: + return sig.String() + default: + return "", fmt.Errorf("missing or unexpected String() function signature for %#v", sig) + } +} + +type errString interface { + String() (string, error) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go new file mode 100644 index 00000000..ed948819 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "syscall" + "time" + + stdwindows "golang.org/x/sys/windows" + + windows "github.com/elastic/go-windows" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/providers/shared" + "github.com/elastic/go-sysinfo/types" +) + +func init() { + registry.Register(windowsSystem{}) +} + +type windowsSystem struct{} + +func (s windowsSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) CPUTime() (types.CPUTimes, error) { + idle, kernel, user, err := windows.GetSystemTimes() + if err != nil { + return types.CPUTimes{}, err + } + + return types.CPUTimes{ + System: kernel, + User: user, + Idle: idle, + }, nil +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + mem, err := windows.GlobalMemoryStatusEx() + if err != nil { + return nil, err + } + + return &types.HostMemoryInfo{ + Total: mem.TotalPhys, + Used: mem.TotalPhys - mem.AvailPhys, + Free: mem.AvailPhys, + Available: mem.AvailPhys, + VirtualTotal: mem.TotalPageFile, + VirtualUsed: mem.TotalPageFile - mem.AvailPageFile, + VirtualFree: mem.AvailPageFile, + }, nil +} + +func (h *host) FQDNWithContext(_ context.Context) (string, error) { + fqdn, err := getComputerNameEx(stdwindows.ComputerNamePhysicalDnsFullyQualified) + if err != nil { + return "", fmt.Errorf("could not get windows FQDN: %s", err) + } + + return strings.TrimSuffix(fqdn, "."), nil +} + +func (h *host) FQDN() (string, error) { + return h.FQDNWithContext(context.Background()) +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.nativeArchitecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return errors.Join(r.errs...) + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) nativeArchitecture(h *host) { + v, err := NativeArchitecture() + if r.addErr(err) { + return + } + h.info.NativeArchitecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = v +} + +func getComputerNameEx(name uint32) (string, error) { + size := uint32(64) + + for { + buff := make([]uint16, size) + err := stdwindows.GetComputerNameEx( + name, &buff[0], &size) + if err == nil { + return syscall.UTF16ToString(buff[:size]), nil + } + + // ERROR_MORE_DATA means buff is too small and size is set to the + // number of bytes needed to store the FQDN. For details, see + // https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getcomputernameexw#return-value + if errors.Is(err, syscall.ERROR_MORE_DATA) { + // Safeguard to avoid an infinite loop. + if size <= uint32(len(buff)) { + return "", fmt.Errorf( + "windows.GetComputerNameEx returned ERROR_MORE_DATA, " + + "but data size should fit into buffer") + } else { + // Grow the buffer and try again. + buff = make([]uint16, size) + continue + } + } + + return "", fmt.Errorf("could not get windows FQDN: could not get windows.ComputerNamePhysicalDnsFullyQualified: %w", err) + } +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/kernel_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/kernel_windows.go new file mode 100644 index 00000000..c295c799 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/kernel_windows.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + windows "github.com/elastic/go-windows" +) + +const windowsKernelExe = `C:\Windows\System32\ntoskrnl.exe` + +func KernelVersion() (string, error) { + versionData, err := windows.GetFileVersionInfo(windowsKernelExe) + if err != nil { + return "", err + } + + fileVersion, err := versionData.QueryValue("FileVersion") + if err == nil { + return fileVersion, nil + } + + // Make a second attempt through the fixed version info. + info, err := versionData.FixedFileInfo() + if err != nil { + return "", err + } + return info.ProductVersion(), nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go new file mode 100644 index 00000000..0c69c89d --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + + "golang.org/x/sys/windows/registry" +) + +func MachineID() (string, error) { + return getMachineGUID() +} + +func getMachineGUID() (string, error) { + const key = registry.LOCAL_MACHINE + const path = `SOFTWARE\Microsoft\Cryptography` + const name = "MachineGuid" + + k, err := registry.OpenKey(key, path, registry.READ|registry.WOW64_64KEY) + if err != nil { + return "", fmt.Errorf(`failed to open HKLM\%v: %w`, path, err) + } + defer k.Close() + + guid, _, err := k.GetStringValue(name) + if err != nil { + return "", fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + + return guid, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go new file mode 100644 index 00000000..5fa696a0 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + "strconv" + "strings" + + "golang.org/x/sys/windows/registry" + + "github.com/elastic/go-sysinfo/types" +) + +func OperatingSystem() (*types.OSInfo, error) { + const key = registry.LOCAL_MACHINE + const path = `SOFTWARE\Microsoft\Windows NT\CurrentVersion` + const flags = registry.READ | registry.WOW64_64KEY + + k, err := registry.OpenKey(key, path, flags) + if err != nil { + return nil, fmt.Errorf(`failed to open HKLM\%v: %w`, path, err) + } + defer k.Close() + + osInfo := &types.OSInfo{ + Type: "windows", + Family: "windows", + Platform: "windows", + } + name := "ProductName" + osInfo.Name, _, err = k.GetStringValue(name) + if err != nil { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + + // Newer versions (Win 10 and 2016) have CurrentMajor/CurrentMinor. + major, _, majorErr := k.GetIntegerValue("CurrentMajorVersionNumber") + minor, _, minorErr := k.GetIntegerValue("CurrentMinorVersionNumber") + if majorErr == nil && minorErr == nil { + osInfo.Major = int(major) + osInfo.Minor = int(minor) + osInfo.Version = fmt.Sprintf("%d.%d", major, minor) + } else { + name = "CurrentVersion" + osInfo.Version, _, err = k.GetStringValue(name) + if err != nil { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + parts := strings.SplitN(osInfo.Version, ".", 3) + for i, p := range parts { + switch i { + case 0: + osInfo.Major, _ = strconv.Atoi(p) + case 1: + osInfo.Minor, _ = strconv.Atoi(p) + } + } + } + + name = "CurrentBuild" + currentBuild, _, err := k.GetStringValue(name) + if err != nil { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + osInfo.Build = currentBuild + + // Update Build Revision (optional) + name = "UBR" + updateBuildRevision, _, err := k.GetIntegerValue(name) + if err != nil && err != registry.ErrNotExist { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } else { + osInfo.Build = fmt.Sprintf("%v.%d", osInfo.Build, updateBuildRevision) + } + + fixWindows11Naming(currentBuild, osInfo) + + return osInfo, nil +} + +// fixWindows11Naming adjusts the OS name because the ProductName registry value +// was not changed in Windows 11 and still contains Windows 10. If the product +// name contains "Windows 10" and the version is greater than or equal to +// 10.0.22000 then "Windows 10" is replaced with "Windows 11" in the OS name. +// +// https://docs.microsoft.com/en-us/answers/questions/586619/windows-11-build-ver-is-still-10022000194.html +func fixWindows11Naming(currentBuild string, osInfo *types.OSInfo) { + buildNumber, err := strconv.Atoi(currentBuild) + if err != nil { + return + } + + // "Anything above [or equal] 10.0.22000.0 is Win 11. Anything below is Win 10." + if osInfo.Major > 10 || + osInfo.Major == 10 && osInfo.Minor > 0 || + osInfo.Major == 10 && osInfo.Minor == 0 && buildNumber >= 22000 { + osInfo.Name = strings.Replace(osInfo.Name, "Windows 10", "Windows 11", 1) + } +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/process_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/process_windows.go new file mode 100644 index 00000000..7086bce0 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/process_windows.go @@ -0,0 +1,383 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "time" + "unsafe" + + syswin "golang.org/x/sys/windows" + + windows "github.com/elastic/go-windows" + + "github.com/elastic/go-sysinfo/types" +) + +var ( + selfPID = os.Getpid() + devMapper = newDeviceMapper() +) + +func (s windowsSystem) Processes() (procs []types.Process, err error) { + pids, err := windows.EnumProcesses() + if err != nil { + return nil, fmt.Errorf("EnumProcesses: %w", err) + } + procs = make([]types.Process, 0, len(pids)) + var proc types.Process + for _, pid := range pids { + if pid == 0 || pid == 4 { + // The Idle and System processes (PIDs 0 and 4) can never be + // opened by user-level code (see documentation for OpenProcess). + continue + } + + if proc, err = s.Process(int(pid)); err == nil { + procs = append(procs, proc) + } + } + if len(procs) == 0 { + return nil, err + } + return procs, nil +} + +func (s windowsSystem) Process(pid int) (types.Process, error) { + return newProcess(pid) +} + +func (s windowsSystem) Self() (types.Process, error) { + return newProcess(selfPID) +} + +type process struct { + pid int + info types.ProcessInfo +} + +func (p *process) PID() int { + return p.pid +} + +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + return newProcess(info.PPID) +} + +func newProcess(pid int) (*process, error) { + p := &process{pid: pid} + if err := p.init(); err != nil { + return nil, err + } + return p, nil +} + +func (p *process) init() error { + handle, err := p.open() + if err != nil { + return err + } + defer syscall.CloseHandle(handle) + + var path string + if imgf, err := windows.GetProcessImageFileName(handle); err == nil { + path, err = devMapper.DevicePathToDrivePath(imgf) + if err != nil { + path = imgf + } + } + + var creationTime, exitTime, kernelTime, userTime syscall.Filetime + if err := syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return err + } + + // Try to read the RTL_USER_PROCESS_PARAMETERS struct from the target process + // memory. This can fail due to missing access rights or when we are running + // as a 32bit process in a 64bit system (WOW64). + // Don't make this a fatal error: If it fails, `args` and `cwd` fields will + // be missing. + var args []string + var cwd string + var ppid int + pbi, err := getProcessBasicInformation(syswin.Handle(handle)) + if err == nil { + ppid = int(pbi.InheritedFromUniqueProcessID) + userProcParams, err := getUserProcessParams(syswin.Handle(handle), pbi) + if err == nil { + if argsW, err := readProcessUnicodeString(handle, &userProcParams.CommandLine); err == nil { + args, err = splitCommandline(argsW) + if err != nil { + args = nil + } + } + if cwdW, err := readProcessUnicodeString(handle, &userProcParams.CurrentDirectoryPath); err == nil { + cwd, _, err = windows.UTF16BytesToString(cwdW) + if err != nil { + cwd = "" + } + // Remove trailing separator + cwd = strings.TrimRight(cwd, "\\") + } + } + } + + p.info = types.ProcessInfo{ + Name: filepath.Base(path), + PID: p.pid, + PPID: ppid, + Exe: path, + Args: args, + CWD: cwd, + StartTime: time.Unix(0, creationTime.Nanoseconds()), + } + return nil +} + +func getProcessBasicInformation(handle syswin.Handle) (pbi windows.ProcessBasicInformationStruct, err error) { + var actualSize uint32 + err = syswin.NtQueryInformationProcess(handle, syswin.ProcessBasicInformation, unsafe.Pointer(&pbi), uint32(windows.SizeOfProcessBasicInformationStruct), &actualSize) + if actualSize < uint32(windows.SizeOfProcessBasicInformationStruct) { + return pbi, errors.New("bad size for PROCESS_BASIC_INFORMATION") + } + return pbi, err +} + +func getUserProcessParams(handle syswin.Handle, pbi windows.ProcessBasicInformationStruct) (params windows.RtlUserProcessParameters, err error) { + const is32bitProc = unsafe.Sizeof(uintptr(0)) == 4 + + // Offset of params field within PEB structure. + // This structure is different in 32 and 64 bit. + paramsOffset := 0x20 + if is32bitProc { + paramsOffset = 0x10 + } + + // Read the PEB from the target process memory + pebSize := paramsOffset + 8 + peb := make([]byte, pebSize) + var nRead uintptr + err = syswin.ReadProcessMemory(handle, pbi.PebBaseAddress, &peb[0], uintptr(pebSize), &nRead) + if err != nil { + return params, err + } + if nRead != uintptr(pebSize) { + return params, fmt.Errorf("PEB: short read (%d/%d)", nRead, pebSize) + } + + // Get the RTL_USER_PROCESS_PARAMETERS struct pointer from the PEB + paramsAddr := *(*uintptr)(unsafe.Pointer(&peb[paramsOffset])) + + // Read the RTL_USER_PROCESS_PARAMETERS from the target process memory + paramsBuf := make([]byte, windows.SizeOfRtlUserProcessParameters) + err = syswin.ReadProcessMemory(handle, paramsAddr, ¶msBuf[0], uintptr(windows.SizeOfRtlUserProcessParameters), &nRead) + if err != nil { + return params, err + } + if nRead != uintptr(windows.SizeOfRtlUserProcessParameters) { + return params, fmt.Errorf("RTL_USER_PROCESS_PARAMETERS: short read (%d/%d)", nRead, windows.SizeOfRtlUserProcessParameters) + } + + params = *(*windows.RtlUserProcessParameters)(unsafe.Pointer(¶msBuf[0])) + return params, nil +} + +// read an UTF-16 string from another process memory. Result is an []byte +// with the UTF-16 data. +func readProcessUnicodeString(handle syscall.Handle, s *windows.UnicodeString) ([]byte, error) { + // Allocate an extra UTF-16 null character at the end in case the read string + // is not terminated. + extra := 2 + if s.Size&1 != 0 { + extra = 3 // If size is odd, need 3 nulls to terminate. + } + buf := make([]byte, int(s.Size)+extra) + nRead, err := windows.ReadProcessMemory(handle, s.Buffer, buf[:s.Size]) + if err != nil { + return nil, err + } + if nRead != uintptr(s.Size) { + return nil, fmt.Errorf("unicode string: short read: (%d/%d)", nRead, s.Size) + } + return buf, nil +} + +// Use Windows' CommandLineToArgv API to split an UTF-16 command line string +// into a list of parameters. +func splitCommandline(utf16 []byte) ([]string, error) { + n := len(utf16) + // Discard odd byte + if n&1 != 0 { + n-- + utf16 = utf16[:n] + } + if n == 0 { + return nil, nil + } + terminated := false + for i := 0; i < n && !terminated; i += 2 { + terminated = utf16[i] == 0 && utf16[i+1] == 0 + } + if !terminated { + // Append a null uint16 at the end if terminator is missing + utf16 = append(utf16, 0, 0) + } + var numArgs int32 + argsWide, err := syscall.CommandLineToArgv((*uint16)(unsafe.Pointer(&utf16[0])), &numArgs) + if err != nil { + return nil, err + } + + // Free memory allocated for CommandLineToArgvW arguments. + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(argsWide))) + + args := make([]string, numArgs) + for idx := range args { + args[idx] = syscall.UTF16ToString(argsWide[idx][:]) + } + return args, nil +} + +func (p *process) open() (handle syscall.Handle, err error) { + if p.pid == selfPID { + return syscall.GetCurrentProcess() + } + + // Try different access rights, from broader to more limited. + // PROCESS_VM_READ is needed to get command-line and working directory + // PROCESS_QUERY_LIMITED_INFORMATION is only available in Vista+ + for _, permissions := range [4]uint32{ + syscall.PROCESS_QUERY_INFORMATION | windows.PROCESS_VM_READ, + windows.PROCESS_QUERY_LIMITED_INFORMATION | windows.PROCESS_VM_READ, + syscall.PROCESS_QUERY_INFORMATION, + windows.PROCESS_QUERY_LIMITED_INFORMATION, + } { + if handle, err = syscall.OpenProcess(permissions, false, uint32(p.pid)); err == nil { + break + } + } + return handle, err +} + +func (p *process) Info() (types.ProcessInfo, error) { + return p.info, nil +} + +func (p *process) User() (types.UserInfo, error) { + handle, err := p.open() + if err != nil { + return types.UserInfo{}, fmt.Errorf("OpenProcess failed: %w", err) + } + defer syscall.CloseHandle(handle) + + var accessToken syswin.Token + err = syswin.OpenProcessToken(syswin.Handle(handle), syscall.TOKEN_QUERY, &accessToken) + if err != nil { + return types.UserInfo{}, fmt.Errorf("OpenProcessToken failed: %w", err) + } + defer accessToken.Close() + + tokenUser, err := accessToken.GetTokenUser() + if err != nil { + return types.UserInfo{}, fmt.Errorf("GetTokenUser failed: %w", err) + } + + sid, err := sidToString(tokenUser.User.Sid) + if sid == "" || err != nil { + if err != nil { + return types.UserInfo{}, fmt.Errorf("failed to look up user SID: %w", err) + } + return types.UserInfo{}, errors.New("failed to look up user SID") + } + + tokenGroup, err := accessToken.GetTokenPrimaryGroup() + if err != nil { + return types.UserInfo{}, fmt.Errorf("GetTokenPrimaryGroup failed: %w", err) + } + + gsid, err := sidToString(tokenGroup.PrimaryGroup) + if gsid == "" || err != nil { + if err != nil { + return types.UserInfo{}, fmt.Errorf("failed to look up primary group SID: %w", err) + } + return types.UserInfo{}, errors.New("failed to look up primary group SID") + } + + return types.UserInfo{ + UID: sid, + GID: gsid, + }, nil +} + +func (p *process) Memory() (types.MemoryInfo, error) { + handle, err := p.open() + if err != nil { + return types.MemoryInfo{}, err + } + defer syscall.CloseHandle(handle) + + counters, err := windows.GetProcessMemoryInfo(handle) + if err != nil { + return types.MemoryInfo{}, err + } + + return types.MemoryInfo{ + Resident: uint64(counters.WorkingSetSize), + Virtual: uint64(counters.PrivateUsage), + }, nil +} + +func (p *process) CPUTime() (types.CPUTimes, error) { + handle, err := p.open() + if err != nil { + return types.CPUTimes{}, err + } + defer syscall.CloseHandle(handle) + + var creationTime, exitTime, kernelTime, userTime syscall.Filetime + if err := syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return types.CPUTimes{}, err + } + + return types.CPUTimes{ + User: windows.FiletimeToDuration(&userTime), + System: windows.FiletimeToDuration(&kernelTime), + }, nil +} + +// OpenHandles returns the number of open handles of the process. +func (p *process) OpenHandleCount() (int, error) { + handle, err := p.open() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(handle) + + count, err := windows.GetProcessHandleCount(handle) + return int(count), err +} diff --git a/vendor/github.com/elastic/go-sysinfo/system.go b/vendor/github.com/elastic/go-sysinfo/system.go new file mode 100644 index 00000000..e2edfe22 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/system.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sysinfo + +import ( + "runtime" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/types" + + // Register host and process providers. + _ "github.com/elastic/go-sysinfo/providers/aix" + _ "github.com/elastic/go-sysinfo/providers/darwin" + _ "github.com/elastic/go-sysinfo/providers/linux" + _ "github.com/elastic/go-sysinfo/providers/windows" +) + +type ProviderOption func(*registry.ProviderOptions) + +// WithHostFS returns a provider with a custom HostFS root path, +// enabling use of the library from within a container, or an alternate root path on linux. +// For example, WithHostFS("/hostfs") can be used when /hostfs points to the root filesystem of the container host. +// For full functionality, the alternate hostfs should have: +// - /proc +// - /var +// - /etc +func WithHostFS(hostfs string) ProviderOption { + return func(po *registry.ProviderOptions) { + po.Hostfs = hostfs + } +} + +// Go returns information about the Go runtime. +func Go() types.GoInfo { + return types.GoInfo{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + MaxProcs: runtime.GOMAXPROCS(0), + Version: runtime.Version(), + } +} + +func applyOptsAndReturnProvider(opts ...ProviderOption) registry.ProviderOptions { + options := registry.ProviderOptions{} + for _, opt := range opts { + opt(&options) + } + return options +} + +// setupProcessProvider returns a ProcessProvider. +// Most of the exported functions here deal with processes, +// so this just gets wrapped by all the external functions +func setupProcessProvider(opts ...ProviderOption) (registry.ProcessProvider, error) { + provider := registry.GetProcessProvider(applyOptsAndReturnProvider(opts...)) + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider, nil +} + +// Host returns information about host on which this process is running. If +// host information collection is not implemented for this platform then +// types.ErrNotImplemented is returned. +// On Darwin (macOS) a types.ErrNotImplemented is returned with cgo disabled. +func Host(opts ...ProviderOption) (types.Host, error) { + provider := registry.GetHostProvider(applyOptsAndReturnProvider(opts...)) + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Host() +} + +// Process returns a types.Process object representing the process associated +// with the given PID. The types.Process object can be used to query information +// about the process. If process information collection is not implemented for +// this platform then types.ErrNotImplemented is returned. +func Process(pid int, opts ...ProviderOption) (types.Process, error) { + provider, err := setupProcessProvider(opts...) + if err != nil { + return nil, err + } + return provider.Process(pid) +} + +// Processes return a list of all processes. If process information collection +// is not implemented for this platform then types.ErrNotImplemented is +// returned. +func Processes(opts ...ProviderOption) ([]types.Process, error) { + provider, err := setupProcessProvider(opts...) + if err != nil { + return nil, err + } + return provider.Processes() +} + +// Self return a types.Process object representing this process. If process +// information collection is not implemented for this platform then +// types.ErrNotImplemented is returned. +func Self(opts ...ProviderOption) (types.Process, error) { + provider, err := setupProcessProvider(opts...) + if err != nil { + return nil, err + } + return provider.Self() +} diff --git a/vendor/github.com/elastic/go-sysinfo/types/errors.go b/vendor/github.com/elastic/go-sysinfo/types/errors.go new file mode 100644 index 00000000..7e509bc4 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/types/errors.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "errors" + +// ErrNotImplemented represents an error for a function that is not implemented on a particular platform. +var ErrNotImplemented = errors.New("unimplemented") diff --git a/vendor/github.com/elastic/go-sysinfo/types/go.go b/vendor/github.com/elastic/go-sysinfo/types/go.go new file mode 100644 index 00000000..62377441 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/types/go.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +// GoInfo contains info about the go runtime +type GoInfo struct { + OS string `json:"os"` + Arch string `json:"arch"` + MaxProcs int `json:"max_procs"` + Version string `json:"version"` +} diff --git a/vendor/github.com/elastic/go-sysinfo/types/host.go b/vendor/github.com/elastic/go-sysinfo/types/host.go new file mode 100644 index 00000000..9661ce70 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/types/host.go @@ -0,0 +1,310 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import ( + "context" + "time" +) + +// Host is the interface that wraps methods for returning Host stats +// It may return partial information if the provider +// implementation is unable to collect all of the necessary data. +type Host interface { + CPUTimer + Info() HostInfo + Memory() (*HostMemoryInfo, error) + + // FQDNWithContext returns the fully-qualified domain name of the host. + FQDNWithContext(ctx context.Context) (string, error) + + // FQDN calls FQDNWithContext with a background context. + // Deprecated: Use FQDNWithContext instead. + FQDN() (string, error) +} + +// NetworkCounters represents network stats from /proc/net +type NetworkCounters interface { + NetworkCounters() (*NetworkCountersInfo, error) +} + +// SNMP represents the data from /proc/net/snmp +// Note that according to RFC 2012,TCP.MaxConn, if present, is a signed value and should be cast to int64 +type SNMP struct { + IP map[string]uint64 `json:"ip" netstat:"Ip"` + ICMP map[string]uint64 `json:"icmp" netstat:"Icmp"` + ICMPMsg map[string]uint64 `json:"icmp_msg" netstat:"IcmpMsg"` + TCP map[string]uint64 `json:"tcp" netstat:"Tcp"` + UDP map[string]uint64 `json:"udp" netstat:"Udp"` + UDPLite map[string]uint64 `json:"udp_lite" netstat:"UdpLite"` +} + +// Netstat represents the data from /proc/net/netstat +type Netstat struct { + TCPExt map[string]uint64 `json:"tcp_ext" netstat:"TcpExt"` + IPExt map[string]uint64 `json:"ip_ext" netstat:"IpExt"` +} + +// NetworkCountersInfo represents available network counters from /proc/net +type NetworkCountersInfo struct { + SNMP SNMP `json:"snmp"` + Netstat Netstat `json:"netstat"` +} + +// VMStat is the interface wrapper for platforms that support /proc/vmstat. +type VMStat interface { + VMStat() (*VMStatInfo, error) +} + +// HostInfo contains basic host information. +type HostInfo struct { + Architecture string `json:"architecture"` // Process hardware architecture (e.g. x86_64, arm, ppc, mips). + NativeArchitecture string `json:"native_architecture"` // Native OS hardware architecture (e.g. x86_64, arm, ppc, mips). + BootTime time.Time `json:"boot_time"` // Host boot time. + Containerized *bool `json:"containerized,omitempty"` // Is the process containerized. + Hostname string `json:"name"` // Hostname. + IPs []string `json:"ip,omitempty"` // List of all IPs. + KernelVersion string `json:"kernel_version"` // Kernel version. + MACs []string `json:"mac"` // List of MAC addresses. + OS *OSInfo `json:"os"` // OS information. + Timezone string `json:"timezone"` // System timezone. + TimezoneOffsetSec int `json:"timezone_offset_sec"` // Timezone offset (seconds from UTC). + UniqueID string `json:"id,omitempty"` // Unique ID of the host (optional). +} + +// Uptime returns the system uptime +func (host HostInfo) Uptime() time.Duration { + return time.Since(host.BootTime) +} + +// OSInfo contains basic OS information +type OSInfo struct { + Type string `json:"type"` // OS Type (one of linux, macos, unix, windows). + Family string `json:"family"` // OS Family (e.g. redhat, debian, freebsd, windows). + Platform string `json:"platform"` // OS platform (e.g. centos, ubuntu, windows). + Name string `json:"name"` // OS Name (e.g. Mac OS X, CentOS). + Version string `json:"version"` // OS version (e.g. 10.12.6). + Major int `json:"major"` // Major release version. + Minor int `json:"minor"` // Minor release version. + Patch int `json:"patch"` // Patch release version. + Build string `json:"build,omitempty"` // Build (e.g. 16G1114). + Codename string `json:"codename,omitempty"` // OS codename (e.g. jessie). +} + +// LoadAverage is the interface that wraps the LoadAverage method. +// LoadAverage returns load info on the host +type LoadAverage interface { + LoadAverage() (*LoadAverageInfo, error) +} + +// LoadAverageInfo contains load statistics +type LoadAverageInfo struct { + One float64 `json:"one_min"` + Five float64 `json:"five_min"` + Fifteen float64 `json:"fifteen_min"` +} + +// HostMemoryInfo (all values are specified in bytes). +type HostMemoryInfo struct { + Total uint64 `json:"total_bytes"` // Total physical memory. + Used uint64 `json:"used_bytes"` // Total - Free + Available uint64 `json:"available_bytes"` // Amount of memory available without swapping. + Free uint64 `json:"free_bytes"` // Amount of memory not used by the system. + VirtualTotal uint64 `json:"virtual_total_bytes"` // Total virtual memory. + VirtualUsed uint64 `json:"virtual_used_bytes"` // VirtualTotal - VirtualFree + VirtualFree uint64 `json:"virtual_free_bytes"` // Virtual memory that is not used. + Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. +} + +// VMStatInfo contains parsed info from /proc/vmstat. +// This procfs file has expanded much over the years +// with different kernel versions. If we don't have a field in vmstat, +// the field in the struct will just be blank. The comments represent kernel versions. +type VMStatInfo struct { + NrFreePages uint64 `json:"nr_free_pages"` // (since Linux 2.6.31) + NrAllocBatch uint64 `json:"nr_alloc_batch"` // (since Linux 3.12) + NrInactiveAnon uint64 `json:"nr_inactive_anon"` // (since Linux 2.6.28) + NrActiveAnon uint64 `json:"nr_active_anon"` // (since Linux 2.6.28) + NrInactiveFile uint64 `json:"nr_inactive_file"` // (since Linux 2.6.28) + NrActiveFile uint64 `json:"nr_active_file"` // (since Linux 2.6.28) + NrUnevictable uint64 `json:"nr_unevictable"` // (since Linux 2.6.28) + NrMlock uint64 `json:"nr_mlock"` // (since Linux 2.6.28) + NrAnonPages uint64 `json:"nr_anon_pages"` // (since Linux 2.6.18) + NrMapped uint64 `json:"nr_mapped"` // (since Linux 2.6.0) + NrFilePages uint64 `json:"nr_file_pages"` // (since Linux 2.6.18) + NrDirty uint64 `json:"nr_dirty"` // (since Linux 2.6.0) + NrWriteback uint64 `json:"nr_writeback"` // (since Linux 2.6.0) + NrSlabReclaimable uint64 `json:"nr_slab_reclaimable"` // (since Linux 2.6.19) + NrSlabUnreclaimable uint64 `json:"nr_slab_unreclaimable"` // (since Linux 2.6.19) + NrPageTablePages uint64 `json:"nr_page_table_pages"` // (since Linux 2.6.0) + NrKernelStack uint64 `json:"nr_kernel_stack"` // (since Linux 2.6.32) Amount of memory allocated to kernel stacks. + NrUnstable uint64 `json:"nr_unstable"` // (since Linux 2.6.0) + NrBounce uint64 `json:"nr_bounce"` // (since Linux 2.6.12) + NrVmscanWrite uint64 `json:"nr_vmscan_write"` // (since Linux 2.6.19) + NrVmscanImmediateReclaim uint64 `json:"nr_vmscan_immediate_reclaim"` // (since Linux 3.2) + NrWritebackTemp uint64 `json:"nr_writeback_temp"` // (since Linux 2.6.26) + NrIsolatedAnon uint64 `json:"nr_isolated_anon"` // (since Linux 2.6.32) + NrIsolatedFile uint64 `json:"nr_isolated_file"` // (since Linux 2.6.32) + NrShmem uint64 `json:"nr_shmem"` // (since Linux 2.6.32) Pages used by shmem and tmpfs(5). + NrDirtied uint64 `json:"nr_dirtied"` // (since Linux 2.6.37) + NrWritten uint64 `json:"nr_written"` // (since Linux 2.6.37) + NrPagesScanned uint64 `json:"nr_pages_scanned"` // (since Linux 3.17) + NumaHit uint64 `json:"numa_hit"` // (since Linux 2.6.18) + NumaMiss uint64 `json:"numa_miss"` // (since Linux 2.6.18) + NumaForeign uint64 `json:"numa_foreign"` // (since Linux 2.6.18) + NumaInterleave uint64 `json:"numa_interleave"` // (since Linux 2.6.18) + NumaLocal uint64 `json:"numa_local"` // (since Linux 2.6.18) + NumaOther uint64 `json:"numa_other"` // (since Linux 2.6.18) + WorkingsetRefault uint64 `json:"workingset_refault"` // (since Linux 3.15) + WorkingsetActivate uint64 `json:"workingset_activate"` // (since Linux 3.15) + WorkingsetNodereclaim uint64 `json:"workingset_nodereclaim"` // (since Linux 3.15) + NrAnonTransparentHugepages uint64 `json:"nr_anon_transparent_hugepages"` // (since Linux 2.6.38) + NrFreeCma uint64 `json:"nr_free_cma"` // (since Linux 3.7) Number of free CMA (Contiguous Memory Allocator) pages. + NrDirtyThreshold uint64 `json:"nr_dirty_threshold"` // (since Linux 2.6.37) + NrDirtyBackgroundThreshold uint64 `json:"nr_dirty_background_threshold"` // (since Linux 2.6.37) + Pgpgin uint64 `json:"pgpgin"` // (since Linux 2.6.0) + Pgpgout uint64 `json:"pgpgout"` // (since Linux 2.6.0) + Pswpin uint64 `json:"pswpin"` // (since Linux 2.6.0) + Pswpout uint64 `json:"pswpout"` // (since Linux 2.6.0) + PgallocDma uint64 `json:"pgalloc_dma"` // (since Linux 2.6.5) + PgallocDma32 uint64 `json:"pgalloc_dma32"` // (since Linux 2.6.16) + PgallocNormal uint64 `json:"pgalloc_normal"` // (since Linux 2.6.5) + PgallocHigh uint64 `json:"pgalloc_high"` // (since Linux 2.6.5) + PgallocMovable uint64 `json:"pgalloc_movable"` // (since Linux 2.6.23) + Pgfree uint64 `json:"pgfree"` // (since Linux 2.6.0) + Pgactivate uint64 `json:"pgactivate"` // (since Linux 2.6.0) + Pgdeactivate uint64 `json:"pgdeactivate"` // (since Linux 2.6.0) + Pgfault uint64 `json:"pgfault"` // (since Linux 2.6.0) + Pgmajfault uint64 `json:"pgmajfault"` // (since Linux 2.6.0) + PgrefillDma uint64 `json:"pgrefill_dma"` // (since Linux 2.6.5) + PgrefillDma32 uint64 `json:"pgrefill_dma32"` // (since Linux 2.6.16) + PgrefillNormal uint64 `json:"pgrefill_normal"` // (since Linux 2.6.5) + PgrefillHigh uint64 `json:"pgrefill_high"` // (since Linux 2.6.5) + PgrefillMovable uint64 `json:"pgrefill_movable"` // (since Linux 2.6.23) + PgstealKswapdDma uint64 `json:"pgsteal_kswapd_dma"` // (since Linux 3.4) + PgstealKswapdDma32 uint64 `json:"pgsteal_kswapd_dma32"` // (since Linux 3.4) + PgstealKswapdNormal uint64 `json:"pgsteal_kswapd_normal"` // (since Linux 3.4) + PgstealKswapdHigh uint64 `json:"pgsteal_kswapd_high"` // (since Linux 3.4) + PgstealKswapdMovable uint64 `json:"pgsteal_kswapd_movable"` // (since Linux 3.4) + PgstealDirectDma uint64 `json:"pgsteal_direct_dma"` + PgstealDirectDma32 uint64 `json:"pgsteal_direct_dma32"` // (since Linux 3.4) + PgstealDirectNormal uint64 `json:"pgsteal_direct_normal"` // (since Linux 3.4) + PgstealDirectHigh uint64 `json:"pgsteal_direct_high"` // (since Linux 3.4) + PgstealDirectMovable uint64 `json:"pgsteal_direct_movable"` // (since Linux 2.6.23) + PgscanKswapdDma uint64 `json:"pgscan_kswapd_dma"` + PgscanKswapdDma32 uint64 `json:"pgscan_kswapd_dma32"` // (since Linux 2.6.16) + PgscanKswapdNormal uint64 `json:"pgscan_kswapd_normal"` // (since Linux 2.6.5) + PgscanKswapdHigh uint64 `json:"pgscan_kswapd_high"` + PgscanKswapdMovable uint64 `json:"pgscan_kswapd_movable"` // (since Linux 2.6.23) + PgscanDirectDma uint64 `json:"pgscan_direct_dma"` // + PgscanDirectDma32 uint64 `json:"pgscan_direct_dma32"` // (since Linux 2.6.16) + PgscanDirectNormal uint64 `json:"pgscan_direct_normal"` + PgscanDirectHigh uint64 `json:"pgscan_direct_high"` + PgscanDirectMovable uint64 `json:"pgscan_direct_movable"` // (since Linux 2.6.23) + PgscanDirectThrottle uint64 `json:"pgscan_direct_throttle"` // (since Linux 3.6) + ZoneReclaimFailed uint64 `json:"zone_reclaim_failed"` // (since linux 2.6.31) + Pginodesteal uint64 `json:"pginodesteal"` // (since linux 2.6.0) + SlabsScanned uint64 `json:"slabs_scanned"` // (since linux 2.6.5) + KswapdInodesteal uint64 `json:"kswapd_inodesteal"` // (since linux 2.6.0) + KswapdLowWmarkHitQuickly uint64 `json:"kswapd_low_wmark_hit_quickly"` // (since 2.6.33) + KswapdHighWmarkHitQuickly uint64 `json:"kswapd_high_wmark_hit_quickly"` // (since 2.6.33) + Pageoutrun uint64 `json:"pageoutrun"` // (since Linux 2.6.0) + Allocstall uint64 `json:"allocstall"` // (since Linux 2.6.0) + Pgrotated uint64 `json:"pgrotated"` // (since Linux 2.6.0) + DropPagecache uint64 `json:"drop_pagecache"` // (since Linux 3.15) + DropSlab uint64 `json:"drop_slab"` // (since Linux 3.15) + NumaPteUpdates uint64 `json:"numa_pte_updates"` // (since Linux 3.8) + NumaHugePteUpdates uint64 `json:"numa_huge_pte_updates"` // (since Linux 3.13) + NumaHintFaults uint64 `json:"numa_hint_faults"` // (since Linux 3.8) + NumaHintFaultsLocal uint64 `json:"numa_hint_faults_local"` // (since Linux 3.8) + NumaPagesMigrated uint64 `json:"numa_pages_migrated"` // (since Linux 3.8) + PgmigrateSuccess uint64 `json:"pgmigrate_success"` // (since Linux 3.8) + PgmigrateFail uint64 `json:"pgmigrate_fail"` // (since Linux 3.8) + CompactMigrateScanned uint64 `json:"compact_migrate_scanned"` // (since Linux 3.8) + CompactFreeScanned uint64 `json:"compact_free_scanned"` // (since Linux 3.8) + CompactIsolated uint64 `json:"compact_isolated"` // (since Linux 3.8) + CompactStall uint64 `json:"compact_stall"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + CompactFail uint64 `json:"compact_fail"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + CompactSuccess uint64 `json:"compact_success"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + HtlbBuddyAllocSuccess uint64 `json:"htlb_buddy_alloc_success"` // (since Linux 2.6.26) + HtlbBuddyAllocFail uint64 `json:"htlb_buddy_alloc_fail"` // (since Linux 2.6.26) + UnevictablePgsCulled uint64 `json:"unevictable_pgs_culled"` // (since Linux 2.6.28) + UnevictablePgsScanned uint64 `json:"unevictable_pgs_scanned"` // (since Linux 2.6.28) + UnevictablePgsRescued uint64 `json:"unevictable_pgs_rescued"` // (since Linux 2.6.28) + UnevictablePgsMlocked uint64 `json:"unevictable_pgs_mlocked"` // (since Linux 2.6.28) + UnevictablePgsMunlocked uint64 `json:"unevictable_pgs_munlocked"` // (since Linux 2.6.28) + UnevictablePgsCleared uint64 `json:"unevictable_pgs_cleared"` // (since Linux 2.6.28) + UnevictablePgsStranded uint64 `json:"unevictable_pgs_stranded"` // (since Linux 2.6.28) + ThpFaultAlloc uint64 `json:"thp_fault_alloc"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpFaultFallback uint64 `json:"thp_fault_fallback"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpCollapseAlloc uint64 `json:"thp_collapse_alloc"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpCollapseAllocFailed uint64 `json:"thp_collapse_alloc_failed"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpSplit uint64 `json:"thp_split"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpZeroPageAlloc uint64 `json:"thp_zero_page_alloc"` // (since Linux 3.8) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpZeroPageAllocFailed uint64 `json:"thp_zero_page_alloc_failed"` // (since Linux 3.8) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + BalloonInflate uint64 `json:"balloon_inflate"` // (since Linux 3.18) + BalloonDeflate uint64 `json:"balloon_deflate"` // (since Linux 3.18) + BalloonMigrate uint64 `json:"balloon_migrate"` // (since Linux 3.18) + NrTlbRemoteFlush uint64 `json:"nr_tlb_remote_flush"` // (since Linux 3.12) + NrTlbRemoteFlushReceived uint64 `json:"nr_tlb_remote_flush_received"` // (since Linux 3.12) + NrTlbLocalFlushAll uint64 `json:"nr_tlb_local_flush_all"` // (since Linux 3.12) + NrTlbLocalFlushOne uint64 `json:"nr_tlb_local_flush_one"` // (since Linux 3.12) + VmacacheFindCalls uint64 `json:"vmacache_find_calls"` // (since Linux 3.16) + VmacacheFindHits uint64 `json:"vmacache_find_hits"` // (since Linux 3.16) + VmacacheFullFlushes uint64 `json:"vmacache_full_flushes"` // (since Linux 3.19) + // the following fields are not documented in `man 5 proc` as of 4.15 + NrZoneInactiveAnon uint64 `json:"nr_zone_inactive_anon"` + NrZoneActiveAnon uint64 `json:"nr_zone_active_anon"` + NrZoneInactiveFile uint64 `json:"nr_zone_inactive_file"` + NrZoneActiveFile uint64 `json:"nr_zone_active_file"` + NrZoneUnevictable uint64 `json:"nr_zone_unevictable"` + NrZoneWritePending uint64 `json:"nr_zone_write_pending"` + NrZspages uint64 `json:"nr_zspages"` + NrShmemHugepages uint64 `json:"nr_shmem_hugepages"` + NrShmemPmdmapped uint64 `json:"nr_shmem_pmdmapped"` + AllocstallDma uint64 `json:"allocstall_dma"` + AllocstallDma32 uint64 `json:"allocstall_dma32"` + AllocstallNormal uint64 `json:"allocstall_normal"` + AllocstallMovable uint64 `json:"allocstall_movable"` + PgskipDma uint64 `json:"pgskip_dma"` + PgskipDma32 uint64 `json:"pgskip_dma32"` + PgskipNormal uint64 `json:"pgskip_normal"` + PgskipMovable uint64 `json:"pgskip_movable"` + Pglazyfree uint64 `json:"pglazyfree"` + Pglazyfreed uint64 `json:"pglazyfreed"` + Pgrefill uint64 `json:"pgrefill"` + PgstealKswapd uint64 `json:"pgsteal_kswapd"` + PgstealDirect uint64 `json:"pgsteal_direct"` + PgscanKswapd uint64 `json:"pgscan_kswapd"` + PgscanDirect uint64 `json:"pgscan_direct"` + OomKill uint64 `json:"oom_kill"` + CompactDaemonWake uint64 `json:"compact_daemon_wake"` + CompactDaemonMigrateScanned uint64 `json:"compact_daemon_migrate_scanned"` + CompactDaemonFreeScanned uint64 `json:"compact_daemon_free_scanned"` + ThpFileAlloc uint64 `json:"thp_file_alloc"` + ThpFileMapped uint64 `json:"thp_file_mapped"` + ThpSplitPage uint64 `json:"thp_split_page"` + ThpSplitPageFailed uint64 `json:"thp_split_page_failed"` + ThpDeferredSplitPage uint64 `json:"thp_deferred_split_page"` + ThpSplitPmd uint64 `json:"thp_split_pmd"` + ThpSplitPud uint64 `json:"thp_split_pud"` + ThpSwpout uint64 `json:"thp_swpout"` + ThpSwpoutFallback uint64 `json:"thp_swpout_fallback"` + SwapRa uint64 `json:"swap_ra"` + SwapRaHit uint64 `json:"swap_ra_hit"` +} diff --git a/vendor/github.com/elastic/go-sysinfo/types/process.go b/vendor/github.com/elastic/go-sysinfo/types/process.go new file mode 100644 index 00000000..c02ac9dc --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/types/process.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "time" + +// Process is the main wrapper for gathering information on a process +type Process interface { + CPUTimer + // Info returns process info. + // It may return partial information if the provider + // implementation is unable to collect all the necessary data. + Info() (ProcessInfo, error) + Memory() (MemoryInfo, error) + User() (UserInfo, error) + Parent() (Process, error) + PID() int +} + +// ProcessInfo contains basic stats about a process +type ProcessInfo struct { + Name string `json:"name"` + PID int `json:"pid"` + PPID int `json:"ppid"` + CWD string `json:"cwd"` + Exe string `json:"exe"` + Args []string `json:"args"` + StartTime time.Time `json:"start_time"` +} + +// UserInfo contains information about the UID and GID +// values of a process. +type UserInfo struct { + // UID is the user ID. + // On Linux and Darwin (macOS) this is the real user ID. + // On Windows, this is the security identifier (SID) of the + // user account of the process access token. + UID string `json:"uid"` + + // On Linux and Darwin (macOS) this is the effective user ID. + // On Windows, this is empty. + EUID string `json:"euid"` + + // On Linux and Darwin (macOS) this is the saved user ID. + // On Windows, this is empty. + SUID string `json:"suid"` + + // GID is the primary group ID. + // On Linux and Darwin (macOS) this is the real group ID. + // On Windows, this is the security identifier (SID) of the + // primary group of the process access token. + GID string `json:"gid"` + + // On Linux and Darwin (macOS) this is the effective group ID. + // On Windows, this is empty. + EGID string `json:"egid"` + + // On Linux and Darwin (macOS) this is the saved group ID. + // On Windows, this is empty. + SGID string `json:"sgid"` +} + +// Environment is the interface that wraps the Environment method. +// Environment returns variables for a process +type Environment interface { + Environment() (map[string]string, error) +} + +// OpenHandleEnumerator is the interface that wraps the OpenHandles method. +// OpenHandles lists the open file handles. +type OpenHandleEnumerator interface { + OpenHandles() ([]string, error) +} + +// OpenHandleCounter is the interface that wraps the OpenHandleCount method. +// OpenHandleCount returns the number of open file handles. +type OpenHandleCounter interface { + OpenHandleCount() (int, error) +} + +// CPUTimer is the interface that wraps the CPUTime method. +// CPUTime returns CPU time info +type CPUTimer interface { + // CPUTime returns a CPUTimes structure for + // the host or some process. + // + // The User and System fields are guaranteed + // to be populated for all platforms, and + // for both hosts and processes. + // This may return types.ErrNotImplemented + // if the provider cannot implement collection of this data. + CPUTime() (CPUTimes, error) +} + +// CPUTimes contains CPU timing stats for a process +type CPUTimes struct { + User time.Duration `json:"user"` + System time.Duration `json:"system"` + Idle time.Duration `json:"idle,omitempty"` + IOWait time.Duration `json:"iowait,omitempty"` + IRQ time.Duration `json:"irq,omitempty"` + Nice time.Duration `json:"nice,omitempty"` + SoftIRQ time.Duration `json:"soft_irq,omitempty"` + Steal time.Duration `json:"steal,omitempty"` +} + +// Total returns the total CPU time +func (cpu CPUTimes) Total() time.Duration { + return cpu.User + cpu.System + cpu.Idle + cpu.IOWait + cpu.IRQ + cpu.Nice + + cpu.SoftIRQ + cpu.Steal +} + +// MemoryInfo contains memory stats for a process +type MemoryInfo struct { + Resident uint64 `json:"resident_bytes"` + Virtual uint64 `json:"virtual_bytes"` + Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. +} + +// SeccompInfo contains seccomp info for a process +type SeccompInfo struct { + Mode string `json:"mode"` + NoNewPrivs *bool `json:"no_new_privs,omitempty"` // Added in kernel 4.10. +} + +// CapabilityInfo contains capability set info. +type CapabilityInfo struct { + Inheritable []string `json:"inheritable"` + Permitted []string `json:"permitted"` + Effective []string `json:"effective"` + Bounding []string `json:"bounding"` + Ambient []string `json:"ambient"` +} + +// Capabilities is the interface that wraps the Capabilities method. +// Capabilities returns capabilities for a process +type Capabilities interface { + Capabilities() (*CapabilityInfo, error) +} + +// Seccomp is the interface that wraps the Seccomp method. +// Seccomp returns seccomp info on Linux +type Seccomp interface { + Seccomp() (*SeccompInfo, error) +} diff --git a/vendor/github.com/elastic/go-windows/.gitattributes b/vendor/github.com/elastic/go-windows/.gitattributes new file mode 100644 index 00000000..875f4996 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/.gitattributes @@ -0,0 +1,5 @@ +# Treat all files in the Go repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. + +* -text diff --git a/vendor/github.com/elastic/go-windows/.gitignore b/vendor/github.com/elastic/go-windows/.gitignore new file mode 100644 index 00000000..29708a72 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/.gitignore @@ -0,0 +1,23 @@ +# Directories +/.vagrant +/.idea +/build + +# Files +.DS_Store +/*.iml +*.h + +# Editor swap files +*.swp +*.swo +*.swn + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.exe +*.test +*.prof +*.pyc diff --git a/vendor/github.com/elastic/go-windows/CHANGELOG.md b/vendor/github.com/elastic/go-windows/CHANGELOG.md new file mode 100644 index 00000000..c7fb7d34 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/CHANGELOG.md @@ -0,0 +1,45 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +- Replace pkg/errors with Go 1.13 native errors #18. + +### Changed + +- go.mod - Require Go 1.18 or newer. #27 + +### Deprecated + +### Removed + +### Fixed + +### Security + +## [1.0.1] - 2019-08-28 + +### Security + +- Load DLLs only from Windows system directory. + +## [1.0.0] - 2019-04-26 + +### Added + +- Add GetProcessMemoryInfo. #2 +- Add APIs to fetch process information #6. + - NtQueryInformationProcess + - ReadProcessMemory + - GetProcessImageFileName + - EnumProcesses +- Add GetProcessHandleCount to kernel32. #7 + +[Unreleased]: https://github.com/elastic/go-windows/compare/v1.0.1...HEAD +[1.0.1]: https://github.com/elastic/go-windows/v1.0.1 +[1.0.0]: https://github.com/elastic/go-windows/v1.0.0 diff --git a/vendor/github.com/elastic/go-windows/LICENSE.txt b/vendor/github.com/elastic/go-windows/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/go-windows/NOTICE.txt b/vendor/github.com/elastic/go-windows/NOTICE.txt new file mode 100644 index 00000000..86a6c31e --- /dev/null +++ b/vendor/github.com/elastic/go-windows/NOTICE.txt @@ -0,0 +1,5 @@ +Elastic go-windows +Copyright 2017-2024 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/go-windows/README.md b/vendor/github.com/elastic/go-windows/README.md new file mode 100644 index 00000000..45353355 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/README.md @@ -0,0 +1,15 @@ +# go-windows + +[![ci](https://github.com/elastic/go-windows/actions/workflows/ci.yml/badge.svg)](https://github.com/elastic/go-windows/actions/workflows/ci.yml) +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://pkg.go.dev/github.com/elastic/go-windows?GOOS=windows + +go-windows is a library for Go (golang) that provides wrappers to various +Windows APIs that are not covered by the stdlib or by +[golang.org/x/sys/windows](https://godoc.org/golang.org/x/sys/windows). + +Goals / Features + +- Does not use cgo. +- Provide abstractions to make using the APIs easier. diff --git a/vendor/github.com/elastic/go-windows/constants.go b/vendor/github.com/elastic/go-windows/constants.go new file mode 100644 index 00000000..4dfc9058 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/constants.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows +// +build windows + +package windows + +const ( + // This process access rights are missing from Go's syscall package as of 1.10.3 + + // PROCESS_VM_READ right allows to read memory from the target process. + PROCESS_VM_READ = 0x10 + + // PROCESS_QUERY_LIMITED_INFORMATION right allows to access a subset of the + // information granted by PROCESS_QUERY_INFORMATION. Not available in XP + // and Server 2003. + PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 +) diff --git a/vendor/github.com/elastic/go-windows/doc.go b/vendor/github.com/elastic/go-windows/doc.go new file mode 100644 index 00000000..0f96de2a --- /dev/null +++ b/vendor/github.com/elastic/go-windows/doc.go @@ -0,0 +1,25 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package windows contains various Windows system calls. +package windows + +// Use "GOOS=windows go generate -v -x" to generate the sources. +// Add -trace to enable debug prints around syscalls. +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output=zsyscall_windows.go kernel32.go version.go psapi.go ntdll.go +//go:generate go run .ci/scripts/fix_generated.go -input zsyscall_windows.go +//go:generate go run github.com/elastic/go-licenser@v0.4.1 diff --git a/vendor/github.com/elastic/go-windows/kernel32.go b/vendor/github.com/elastic/go-windows/kernel32.go new file mode 100644 index 00000000..d66f8874 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/kernel32.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows +// +build windows + +package windows + +import ( + "errors" + "fmt" + "syscall" + "time" + "unsafe" +) + +// Syscalls +//sys _GetNativeSystemInfo(systemInfo *SystemInfo) = kernel32.GetNativeSystemInfo +//sys _GetTickCount64() (millis uint64, err error) = kernel32.GetTickCount64 +//sys _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) = kernel32.GetSystemTimes +//sys _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) = kernel32.GlobalMemoryStatusEx +//sys _ReadProcessMemory(handle syscall.Handle, baseAddress uintptr, buffer uintptr, size uintptr, numRead *uintptr) (err error) = kernel32.ReadProcessMemory +//sys _GetProcessHandleCount(handle syscall.Handle, pdwHandleCount *uint32) (err error) = kernel32.GetProcessHandleCount + +var ( + sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MemoryStatusEx{})) +) + +// SystemInfo is an equivalent representation of SYSTEM_INFO in the Windows API. +// https://msdn.microsoft.com/en-us/library/ms724958%28VS.85%29.aspx?f=255&MSPPError=-2147217396 +type SystemInfo struct { + ProcessorArchitecture ProcessorArchitecture + Reserved uint16 + PageSize uint32 + MinimumApplicationAddress uintptr + MaximumApplicationAddress uintptr + ActiveProcessorMask uint64 + NumberOfProcessors uint32 + ProcessorType ProcessorType + AllocationGranularity uint32 + ProcessorLevel uint16 + ProcessorRevision uint16 +} + +// ProcessorArchitecture specifies the processor architecture that the OS requires. +type ProcessorArchitecture uint16 + +// List of processor architectures associated with SystemInfo. +const ( + ProcessorArchitectureAMD64 ProcessorArchitecture = 9 + ProcessorArchitectureARM ProcessorArchitecture = 5 + ProcessorArchitectureARM64 ProcessorArchitecture = 12 + ProcessorArchitectureIA64 ProcessorArchitecture = 6 + ProcessorArchitectureIntel ProcessorArchitecture = 0 + ProcessorArchitectureUnknown ProcessorArchitecture = 0xFFFF +) + +// ErrReadFailed is returned by ReadProcessMemory on failure +var ErrReadFailed = errors.New("ReadProcessMemory failed") + +func (a ProcessorArchitecture) String() string { + names := map[ProcessorArchitecture]string{ + ProcessorArchitectureAMD64: "x86_64", + ProcessorArchitectureARM: "arm", + ProcessorArchitectureARM64: "arm64", + ProcessorArchitectureIA64: "ia64", + ProcessorArchitectureIntel: "x86", + } + + name, found := names[a] + if !found { + return "unknown" + } + return name +} + +// ProcessorType specifies the type of processor. +type ProcessorType uint32 + +// List of processor types associated with SystemInfo. +const ( + ProcessorTypeIntel386 ProcessorType = 386 + ProcessorTypeIntel486 ProcessorType = 486 + ProcessorTypeIntelPentium ProcessorType = 586 + ProcessorTypeIntelIA64 ProcessorType = 2200 + ProcessorTypeAMDX8664 ProcessorType = 8664 +) + +func (t ProcessorType) String() string { + names := map[ProcessorType]string{ + ProcessorTypeIntel386: "386", + ProcessorTypeIntel486: "486", + ProcessorTypeIntelPentium: "586", + ProcessorTypeIntelIA64: "ia64", + ProcessorTypeAMDX8664: "x64_64", + } + + name, found := names[t] + if !found { + return "unknown" + } + return name +} + +// MemoryStatusEx is an equivalent representation of MEMORYSTATUSEX in the +// Windows API. It contains information about the current state of both physical +// and virtual memory, including extended memory. +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770 +type MemoryStatusEx struct { + length uint32 + MemoryLoad uint32 + TotalPhys uint64 + AvailPhys uint64 + TotalPageFile uint64 + AvailPageFile uint64 + TotalVirtual uint64 + AvailVirtual uint64 + AvailExtendedVirtual uint64 +} + +// GetNativeSystemInfo retrieves information about the current system to an +// application running under WOW64. If the function is called from a 64-bit +// application, it is equivalent to the GetSystemInfo function. +// https://msdn.microsoft.com/en-us/library/ms724340%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 +func GetNativeSystemInfo() (SystemInfo, error) { + var systemInfo SystemInfo + _GetNativeSystemInfo(&systemInfo) + return systemInfo, nil +} + +// Version identifies a Windows version by major, minor, and build number. +type Version struct { + Major int + Minor int + Build int +} + +// GetWindowsVersion returns the Windows version information. Applications not +// manifested for Windows 8.1 or Windows 10 will return the Windows 8 OS version +// value (6.2). +// +// For a table of version numbers see: +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +func GetWindowsVersion() Version { + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx + ver, err := syscall.GetVersion() + if err != nil { + // GetVersion should never return an error. + panic(fmt.Errorf("GetVersion failed: %v", err)) + } + + return Version{ + Major: int(ver & 0xFF), + Minor: int(ver >> 8 & 0xFF), + Build: int(ver >> 16), + } +} + +// IsWindowsVistaOrGreater returns true if the Windows version is Vista or +// greater. +func (v Version) IsWindowsVistaOrGreater() bool { + // Vista is 6.0. + return v.Major >= 6 && v.Minor >= 0 +} + +// GetTickCount64 retrieves the number of milliseconds that have elapsed since +// the system was started. +// This function is available on Windows Vista and newer. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724411(v=vs.85).aspx +func GetTickCount64() (uint64, error) { + return _GetTickCount64() +} + +// GetSystemTimes retrieves system timing information. On a multiprocessor +// system, the values returned are the sum of the designated times across all +// processors. The returned kernel time does not include the system idle time. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724400(v=vs.85).aspx +func GetSystemTimes() (idle, kernel, user time.Duration, err error) { + var idleTime, kernelTime, userTime syscall.Filetime + err = _GetSystemTimes(&idleTime, &kernelTime, &userTime) + if err != nil { + return 0, 0, 0, fmt.Errorf("GetSystemTimes failed: %w", err) + } + + idle = FiletimeToDuration(&idleTime) + kernel = FiletimeToDuration(&kernelTime) // Kernel time includes idle time so we subtract it out. + user = FiletimeToDuration(&userTime) + + return idle, kernel - idle, user, nil +} + +// FiletimeToDuration converts a Filetime to a time.Duration. Do not use this +// method to convert a Filetime to an actual clock time, for that use +// Filetime.Nanosecond(). +func FiletimeToDuration(ft *syscall.Filetime) time.Duration { + n := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) // in 100-nanosecond intervals + return time.Duration(n * 100) +} + +// GlobalMemoryStatusEx retrieves information about the system's current usage +// of both physical and virtual memory. +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +func GlobalMemoryStatusEx() (MemoryStatusEx, error) { + memoryStatusEx := MemoryStatusEx{length: sizeofMemoryStatusEx} + err := _GlobalMemoryStatusEx(&memoryStatusEx) + if err != nil { + return MemoryStatusEx{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err) + } + + return memoryStatusEx, nil +} + +// ReadProcessMemory reads from another process memory. The Handle needs to have +// the PROCESS_VM_READ right. +// A zero-byte read is a no-op, no error is returned. +func ReadProcessMemory(handle syscall.Handle, baseAddress uintptr, dest []byte) (numRead uintptr, err error) { + n := len(dest) + if n == 0 { + return 0, nil + } + if err = _ReadProcessMemory(handle, baseAddress, uintptr(unsafe.Pointer(&dest[0])), uintptr(n), &numRead); err != nil { + return 0, err + } + return numRead, nil +} + +// GetProcessHandleCount retrieves the number of open handles of a process. +// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getprocesshandlecount +func GetProcessHandleCount(process syscall.Handle) (uint32, error) { + var count uint32 + if err := _GetProcessHandleCount(process, &count); err != nil { + return 0, fmt.Errorf("GetProcessHandleCount failed: %w", err) + } + return count, nil +} diff --git a/vendor/github.com/elastic/go-windows/ntdll.go b/vendor/github.com/elastic/go-windows/ntdll.go new file mode 100644 index 00000000..7982fd49 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/ntdll.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows +// +build windows + +package windows + +import ( + "fmt" + "syscall" + "unsafe" +) + +const ( + // SizeOfProcessBasicInformationStruct gives the size + // of the ProcessBasicInformationStruct struct. + SizeOfProcessBasicInformationStruct = unsafe.Sizeof(ProcessBasicInformationStruct{}) + + // SizeOfRtlUserProcessParameters gives the size + // of the RtlUserProcessParameters struct. + SizeOfRtlUserProcessParameters = unsafe.Sizeof(RtlUserProcessParameters{}) +) + +// NTStatus is an error wrapper for NTSTATUS values, 32bit error-codes returned +// by the NT Kernel. +type NTStatus uint32 + +// ProcessInfoClass is Go's counterpart for the PROCESSINFOCLASS enumeration +// defined in ntdll.h. +type ProcessInfoClass uint32 + +const ( + // ProcessInfoClass enumeration values that can be used as arguments to + // NtQueryInformationProcess + + // ProcessBasicInformation returns a pointer to + // the Process Environment Block (PEB) structure. + ProcessBasicInformation ProcessInfoClass = 0 + + // ProcessDebugPort returns a uint32 that is the port number for the + // debugger of the process. + ProcessDebugPort = 7 + + // ProcessWow64Information returns whether a process is running under + // WOW64. + ProcessWow64Information = 26 + + // ProcessImageFileName returns the image file name for the process, as a + // UnicodeString struct. + ProcessImageFileName = 27 + + // ProcessBreakOnTermination returns a uintptr that tells if the process + // is critical. + ProcessBreakOnTermination = 29 + + // ProcessSubsystemInformation returns the subsystem type of the process. + ProcessSubsystemInformation = 75 +) + +// ProcessBasicInformationStruct is Go's counterpart of the +// PROCESS_BASIC_INFORMATION struct, returned by NtQueryInformationProcess +// when ProcessBasicInformation is requested. +type ProcessBasicInformationStruct struct { + Reserved1 uintptr + PebBaseAddress uintptr + Reserved2 [2]uintptr + UniqueProcessID uintptr + // Undocumented: + InheritedFromUniqueProcessID uintptr +} + +// UnicodeString is Go's equivalent for the _UNICODE_STRING struct. +type UnicodeString struct { + Size uint16 + MaximumLength uint16 + Buffer uintptr +} + +// RtlUserProcessParameters is Go's equivalent for the +// _RTL_USER_PROCESS_PARAMETERS struct. +// A few undocumented fields are exposed. +type RtlUserProcessParameters struct { + Reserved1 [16]byte + Reserved2 [5]uintptr + + // + CurrentDirectoryPath UnicodeString + CurrentDirectoryHandle uintptr + DllPath UnicodeString + // + + ImagePathName UnicodeString + CommandLine UnicodeString +} + +// Syscalls +// Warning: NtQueryInformationProcess is an unsupported API that can change +// in future versions of Windows. Available from XP to Windows 10. +//sys _NtQueryInformationProcess(handle syscall.Handle, infoClass uint32, info uintptr, infoLen uint32, returnLen *uint32) (ntStatus uint32) = ntdll.NtQueryInformationProcess + +// NtQueryInformationProcess is a wrapper for ntdll.NtQueryInformationProcess. +// The handle must have the PROCESS_QUERY_INFORMATION access right. +// Returns an error of type NTStatus. +func NtQueryInformationProcess(handle syscall.Handle, infoClass ProcessInfoClass, info unsafe.Pointer, infoLen uint32) (returnedLen uint32, err error) { + status := _NtQueryInformationProcess(handle, uint32(infoClass), uintptr(info), infoLen, &returnedLen) + if status != 0 { + return returnedLen, NTStatus(status) + } + return returnedLen, nil +} + +// Error prints the wrapped NTSTATUS in hex form. +func (status NTStatus) Error() string { + return fmt.Sprintf("ntstatus=%x", uint32(status)) +} diff --git a/vendor/github.com/elastic/go-windows/psapi.go b/vendor/github.com/elastic/go-windows/psapi.go new file mode 100644 index 00000000..01fcf10f --- /dev/null +++ b/vendor/github.com/elastic/go-windows/psapi.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows +// +build windows + +package windows + +import ( + "fmt" + "syscall" + "unsafe" +) + +// Syscalls +//sys _GetProcessMemoryInfo(handle syscall.Handle, psmemCounters *ProcessMemoryCountersEx, cb uint32) (err error) = psapi.GetProcessMemoryInfo +//sys _GetProcessImageFileNameA(handle syscall.Handle, imageFileName *byte, nSize uint32) (len uint32, err error) = psapi.GetProcessImageFileNameA +//sys _EnumProcesses(lpidProcess *uint32, cb uint32, lpcbNeeded *uint32) (err error) = psapi.EnumProcesses + +var ( + sizeofProcessMemoryCountersEx = uint32(unsafe.Sizeof(ProcessMemoryCountersEx{})) +) + +// ProcessMemoryCountersEx is an equivalent representation of +// PROCESS_MEMORY_COUNTERS_EX in the Windows API. It contains information about +// the memory usage of a process. +// https://docs.microsoft.com/en-au/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex +type ProcessMemoryCountersEx struct { + cb uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +// GetProcessMemoryInfo retrieves memory info for the given process handle. +// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/nf-psapi-getprocessmemoryinfo +func GetProcessMemoryInfo(process syscall.Handle) (ProcessMemoryCountersEx, error) { + var info ProcessMemoryCountersEx + if err := _GetProcessMemoryInfo(process, &info, sizeofProcessMemoryCountersEx); err != nil { + return ProcessMemoryCountersEx{}, fmt.Errorf("GetProcessMemoryInfo failed: %w", err) + } + return info, nil +} + +// GetProcessImageFileName retrieves the process main executable. +// The returned path is a device path, that is: +// "\Device\HardDisk0Volume1\Windows\notepad.exe" +// instead of +// "C:\Windows\notepad.exe" +// Use QueryDosDevice or equivalent to convert to a drive path. +// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/nf-psapi-getprocessimagefilenamea +func GetProcessImageFileName(handle syscall.Handle) (string, error) { + for bufLen, limit := syscall.MAX_PATH, syscall.MAX_PATH*4; bufLen <= limit; bufLen *= 2 { + buf := make([]byte, bufLen) + nameLen, err := _GetProcessImageFileNameA(handle, &buf[0], uint32(len(buf))) + if err == nil { + buf = buf[:nameLen] + return string(buf), nil + } + if err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", err + } + } + return "", syscall.ERROR_INSUFFICIENT_BUFFER +} + +// EnumProcesses returns a list of running processes. +// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/nf-psapi-enumprocesses +func EnumProcesses() (pids []uint32, err error) { + for nAlloc, nGot := uint32(128), uint32(0); ; nAlloc *= 2 { + pids = make([]uint32, nAlloc) + if err = _EnumProcesses(&pids[0], nAlloc*4, &nGot); err != nil { + return nil, err + } + if nGot/4 < nAlloc { + return pids, nil + } + } +} diff --git a/vendor/github.com/elastic/go-windows/utf16.go b/vendor/github.com/elastic/go-windows/utf16.go new file mode 100644 index 00000000..0fa94dff --- /dev/null +++ b/vendor/github.com/elastic/go-windows/utf16.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + "unicode/utf16" +) + +// UTF16BytesToString returns a string that is decoded from the UTF-16 bytes. +// The byte slice must be of even length otherwise an error will be returned. +// The integer returned is the offset to the start of the next string with +// buffer if it exists, otherwise -1 is returned. +func UTF16BytesToString(b []byte) (string, int, error) { + if len(b)%2 != 0 { + return "", 0, fmt.Errorf("slice must have an even length (length=%d)", len(b)) + } + + offset := -1 + + // Find the null terminator if it exists and re-slice the b. + if nullIndex := indexNullTerminator(b); nullIndex > -1 { + if len(b) > nullIndex+2 { + offset = nullIndex + 2 + } + + b = b[:nullIndex] + } + + s := make([]uint16, len(b)/2) + for i := range s { + s[i] = uint16(b[i*2]) + uint16(b[(i*2)+1])<<8 + } + + return string(utf16.Decode(s)), offset, nil +} + +// indexNullTerminator returns the index of a null terminator within a buffer +// containing UTF-16 encoded data. If the null terminator is not found -1 is +// returned. +func indexNullTerminator(b []byte) int { + if len(b) < 2 { + return -1 + } + + for i := 0; i < len(b); i += 2 { + if b[i] == 0 && b[i+1] == 0 { + return i + } + } + + return -1 +} diff --git a/vendor/github.com/elastic/go-windows/version.go b/vendor/github.com/elastic/go-windows/version.go new file mode 100644 index 00000000..f049d3f6 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/version.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows +// +build windows + +package windows + +import ( + "errors" + "fmt" + "unsafe" +) + +// Syscalls +//sys _GetFileVersionInfo(filename string, reserved uint32, dataLen uint32, data *byte) (success bool, err error) [!success] = version.GetFileVersionInfoW +//sys _GetFileVersionInfoSize(filename string, handle uintptr) (size uint32, err error) = version.GetFileVersionInfoSizeW +//sys _VerQueryValueW(data *byte, subBlock string, pBuffer *uintptr, len *uint32) (success bool, err error) [!success] = version.VerQueryValueW + +// FixedFileInfo contains version information for a file. This information is +// language and code page independent. This is an equivalent representation of +// VS_FIXEDFILEINFO. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms646997(v=vs.85).aspx +type FixedFileInfo struct { + Signature uint32 + StrucVersion uint32 + FileVersionMS uint32 + FileVersionLS uint32 + ProductVersionMS uint32 + ProductVersionLS uint32 + FileFlagsMask uint32 + FileFlags uint32 + FileOS uint32 + FileType uint32 + FileSubtype uint32 + FileDateMS uint32 + FileDateLS uint32 +} + +// ProductVersion returns the ProductVersion value in string format. +func (info FixedFileInfo) ProductVersion() string { + return fmt.Sprintf("%d.%d.%d.%d", + (info.ProductVersionMS >> 16), + (info.ProductVersionMS & 0xFFFF), + (info.ProductVersionLS >> 16), + (info.ProductVersionLS & 0xFFFF)) +} + +// FileVersion returns the FileVersion value in string format. +func (info FixedFileInfo) FileVersion() string { + return fmt.Sprintf("%d.%d.%d.%d", + (info.FileVersionMS >> 16), + (info.FileVersionMS & 0xFFFF), + (info.FileVersionLS >> 16), + (info.FileVersionLS & 0xFFFF)) +} + +// VersionData is a buffer holding the data returned by GetFileVersionInfo. +type VersionData []byte + +// QueryValue uses VerQueryValue to query version information from the a +// version-information resource. It returns responses using the first language +// and code point found in the resource. The accepted keys are listed in +// the VerQueryValue documentation (e.g. ProductVersion, FileVersion, etc.). +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647464(v=vs.85).aspx +func (d VersionData) QueryValue(key string) (string, error) { + type LangAndCodePage struct { + Language uint16 + CodePage uint16 + } + + var dataPtr uintptr + var size uint32 + if _, err := _VerQueryValueW(&d[0], `\VarFileInfo\Translation`, &dataPtr, &size); err != nil || size == 0 { + return "", fmt.Errorf("failed to get list of languages: %w", err) + } + + offset := int(dataPtr - (uintptr)(unsafe.Pointer(&d[0]))) + if offset <= 0 || offset > len(d)-1 { + return "", errors.New("invalid address") + } + + l := *(*LangAndCodePage)(unsafe.Pointer(&d[offset])) + + subBlock := fmt.Sprintf(`\StringFileInfo\%04x%04x\%v`, l.Language, l.CodePage, key) + if _, err := _VerQueryValueW(&d[0], subBlock, &dataPtr, &size); err != nil || size == 0 { + return "", fmt.Errorf("failed to query %v: %w", subBlock, err) + } + + offset = int(dataPtr - (uintptr)(unsafe.Pointer(&d[0]))) + if offset <= 0 || offset > len(d)-1 { + return "", errors.New("invalid address") + } + + str, _, err := UTF16BytesToString(d[offset : offset+int(size)*2]) + if err != nil { + return "", fmt.Errorf("failed to decode UTF16 data: %w", err) + } + + return str, nil +} + +// FixedFileInfo returns the fixed version information from a +// version-information resource. It queries the root block to get the +// VS_FIXEDFILEINFO value. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647464(v=vs.85).aspx +func (d VersionData) FixedFileInfo() (*FixedFileInfo, error) { + if len(d) == 0 { + return nil, errors.New("use GetFileVersionInfo to initialize VersionData") + } + + var dataPtr uintptr + var size uint32 + if _, err := _VerQueryValueW(&d[0], `\`, &dataPtr, &size); err != nil { + return nil, fmt.Errorf("VerQueryValue failed for \\: %w", err) + } + + offset := int(dataPtr - (uintptr)(unsafe.Pointer(&d[0]))) + if offset <= 0 || offset > len(d)-1 { + return nil, errors.New("invalid address") + } + + // Make a copy of the struct. + ffi := *(*FixedFileInfo)(unsafe.Pointer(&d[offset])) + + return &ffi, nil +} + +// GetFileVersionInfo retrieves version information for the specified file. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647003(v=vs.85).aspx +func GetFileVersionInfo(filename string) (VersionData, error) { + size, err := _GetFileVersionInfoSize(filename, 0) + if err != nil { + return nil, fmt.Errorf("GetFileVersionInfoSize failed: %w", err) + } + + data := make(VersionData, size) + _, err = _GetFileVersionInfo(filename, 0, uint32(len(data)), &data[0]) + if err != nil { + return nil, fmt.Errorf("GetFileVersionInfo failed: %w", err) + } + + return data, nil +} diff --git a/vendor/github.com/elastic/go-windows/zsyscall_windows.go b/vendor/github.com/elastic/go-windows/zsyscall_windows.go new file mode 100644 index 00000000..67b5add2 --- /dev/null +++ b/vendor/github.com/elastic/go-windows/zsyscall_windows.go @@ -0,0 +1,209 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by 'go generate'; DO NOT EDIT. + +//lint:file-ignore SA1019 Generated code will not be updated to use SyscallN as per https://github.com/golang/go/issues/57914. + +package windows + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modpsapi = windows.NewLazySystemDLL("psapi.dll") + modversion = windows.NewLazySystemDLL("version.dll") + + procGetNativeSystemInfo = modkernel32.NewProc("GetNativeSystemInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") + procGetSystemTimes = modkernel32.NewProc("GetSystemTimes") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") + procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procEnumProcesses = modpsapi.NewProc("EnumProcesses") + procGetProcessImageFileNameA = modpsapi.NewProc("GetProcessImageFileNameA") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") + procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") + procVerQueryValueW = modversion.NewProc("VerQueryValueW") +) + +func _GetNativeSystemInfo(systemInfo *SystemInfo) { + syscall.Syscall(procGetNativeSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(systemInfo)), 0, 0) + return +} + +func _GetProcessHandleCount(handle syscall.Handle, pdwHandleCount *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessHandleCount.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(pdwHandleCount)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) { + r1, _, e1 := syscall.Syscall(procGetSystemTimes.Addr(), 3, uintptr(unsafe.Pointer(idleTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func _GetTickCount64() (millis uint64, err error) { + r0, _, e1 := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + millis = uint64(r0) + if millis == 0 { + err = errnoErr(e1) + } + return +} + +func _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) { + r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func _ReadProcessMemory(handle syscall.Handle, baseAddress uintptr, buffer uintptr, size uintptr, numRead *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(handle), uintptr(baseAddress), uintptr(buffer), uintptr(size), uintptr(unsafe.Pointer(numRead)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func _NtQueryInformationProcess(handle syscall.Handle, infoClass uint32, info uintptr, infoLen uint32, returnLen *uint32) (ntStatus uint32) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(handle), uintptr(infoClass), uintptr(info), uintptr(infoLen), uintptr(unsafe.Pointer(returnLen)), 0) + ntStatus = uint32(r0) + return +} + +func _EnumProcesses(lpidProcess *uint32, cb uint32, lpcbNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(lpidProcess)), uintptr(cb), uintptr(unsafe.Pointer(lpcbNeeded))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func _GetProcessImageFileNameA(handle syscall.Handle, imageFileName *byte, nSize uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameA.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize)) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) + } + return +} + +func _GetProcessMemoryInfo(handle syscall.Handle, psmemCounters *ProcessMemoryCountersEx, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(psmemCounters)), uintptr(cb)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func _GetFileVersionInfoSize(filename string, handle uintptr) (size uint32, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return __GetFileVersionInfoSize(_p0, handle) +} + +func __GetFileVersionInfoSize(filename *uint16, handle uintptr) (size uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(handle), 0) + size = uint32(r0) + if size == 0 { + err = errnoErr(e1) + } + return +} + +func _GetFileVersionInfo(filename string, reserved uint32, dataLen uint32, data *byte) (success bool, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return __GetFileVersionInfo(_p0, reserved, dataLen, data) +} + +func __GetFileVersionInfo(filename *uint16, reserved uint32, dataLen uint32, data *byte) (success bool, err error) { + r0, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(reserved), uintptr(dataLen), uintptr(unsafe.Pointer(data)), 0, 0) + success = r0 != 0 + if !success { + err = errnoErr(e1) + } + return +} + +func _VerQueryValueW(data *byte, subBlock string, pBuffer *uintptr, len *uint32) (success bool, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(subBlock) + if err != nil { + return + } + return __VerQueryValueW(data, _p0, pBuffer, len) +} + +func __VerQueryValueW(data *byte, subBlock *uint16, pBuffer *uintptr, len *uint32) (success bool, err error) { + r0, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(subBlock)), uintptr(unsafe.Pointer(pBuffer)), uintptr(unsafe.Pointer(len)), 0, 0) + success = r0 != 0 + if !success { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore new file mode 100644 index 00000000..7cc33ae4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -0,0 +1,2 @@ +/testdata/fixtures/ +/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml new file mode 100644 index 00000000..126df9e6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -0,0 +1,22 @@ +--- +linters: + enable: + - errcheck + - godot + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - testifylint + - unused + +linter-settings: + godot: + capital: true + exclude: + # Ignore "See: URL" + - 'See:' + misspell: + locale: US diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..d325872b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 00000000..853eb9d4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,121 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) a suitable maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). + +* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) + +## Steps to Contribute + +Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. + +Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). + +For quickly compiling and testing your changes do: +``` +make test # Make sure all the tests pass before you commit and push :) +``` + +We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. + +## Pull Request Checklist + +* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. + +* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). + +* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). + +* Add tests relevant to the fixed bug or new feature. + +## Dependency management + +The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. + +All dependencies are vendored in the `vendor/` directory. + +To add or update a new dependency, use the `go get` command: + +```bash +# Pick the latest tagged release. +go get example.com/some/module/pkg + +# Pick a specific version. +go get example.com/some/module/pkg@vX.Y.Z +``` + +Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: + + +```bash +# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. +GO111MODULE=on go mod tidy + +GO111MODULE=on go mod vendor +``` + +You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. + + +## API Implementation Guidelines + +### Naming and Documentation + +Public functions and structs should normally be named according to the file(s) being read and parsed. For example, +the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function +should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). + +### Reading vs. Parsing + +Most functionality in this library consists of reading files and then parsing the text into structured data. In most +cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and +a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested +directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types +such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files +in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. + +### /proc and /sys filesystem I/O + +The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. +Many of the files are changing continuously and the data being read can in some cases change between subsequent +reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls +to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the +full file in a single operation using an internal utility function called `util.ReadFileNoStat`. +This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of +the file. + +Note that parsing the file's contents can still be performed one line at a time. This is done by first reading +the full file, and then using a scanner on the `[]byte` or `string` containing the data. + +``` + data, err := util.ReadFileNoStat("/proc/cpuinfo") + if err != nil { + return err + } + reader := bytes.NewReader(data) + scanner := bufio.NewScanner(reader) +``` + +The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files +can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does +not bother to check the size of the file before reading. +``` + data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") +``` + diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 00000000..e00f3b36 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1,3 @@ +* Johannes 'fish' Ziemke @discordianfish +* Paul Gier @pgier +* Ben Kochie @SuperQ diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 00000000..7edfe4d0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,31 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include Makefile.common + +%/.unpacked: %.ttar + @echo ">> extracting fixtures $*" + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +fixtures: testdata/fixtures/.unpacked + +update_fixtures: + rm -vf testdata/fixtures/.unpacked + ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ + +.PHONY: build +build: + +.PHONY: test +test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 00000000..16172923 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,277 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.17.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +SKIP_GOLANGCI_LINT := +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.59.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint yamllint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" + $(GO) mod download + +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get -d $$m; \ + done + $(GO) mod tidy + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + +.PHONY: common-lint-fix +common-lint-fix: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint fix" + $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + +.PHONY: common-yamllint +common-yamllint: + @echo ">> running yamllint on all YAML files in the repository" +ifeq (, $(shell command -v yamllint 2> /dev/null)) + @echo "yamllint not installed so skipping" +else + yamllint . +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: + @echo ">> running check for unused/missing packages in go.mod" + $(GO) mod tidy + @git diff --exit-code -- go.sum go.mod + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" + +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 00000000..53c5e9aa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 00000000..1224816c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,61 @@ +# procfs + +This package provides functions to retrieve system, kernel, and process +metrics from the pseudo-filesystems /proc and /sys. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs) +[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, +/sys, or both. For example, cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Package Organization + +The packages in this project are organized according to (1) whether the data comes from the `/proc` or +`/sys` filesystem and (2) the type of information being retrieved. For example, most process information +can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives +is available in the `blockdevices` sub-package. + +## Building and Testing + +The procfs library is intended to be built as part of another application, so there are no distributable binaries. +However, most of the API includes unit tests which can be run with `make test`. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf testdata/fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md new file mode 100644 index 00000000..fed02d85 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -0,0 +1,6 @@ +# Reporting a security issue + +The Prometheus security policy, including how to report vulnerabilities, can be +found here: + + diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 00000000..cdcc8a7c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "net" + "os" + "strconv" + "strings" +) + +// Learned from include/uapi/linux/if_arp.h. +const ( + // completed entry (ha valid). + ATFComplete = 0x02 + // permanent entry. + ATFPermanent = 0x04 + // Publish entry. + ATFPublish = 0x08 + // Has requested trailers. + ATFUseTrailers = 0x10 + // Obsoleted: Want to use a netmask (only for proxy entries). + ATFNetmask = 0x20 + // Don't answer this addresses. + ATFDontPublish = 0x40 +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string + // Flags + Flags byte +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := os.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + entry := ARPEntry{Device: columns[5]} + ip := net.ParseIP(columns[0]) + entry.IPAddr = ip + + if mac, err := net.ParseMAC(columns[3]); err == nil { + entry.HWAddr = mac + } else { + return ARPEntry{}, err + } + + if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { + entry.Flags = byte(flags) + } else { + return ARPEntry{}, err + } + + return entry, nil +} + +// IsComplete returns true if ARP entry is marked with complete flag. +func (entry *ARPEntry) IsComplete() bool { + return entry.Flags&ATFComplete != 0 +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 00000000..83807500 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) + } + + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go new file mode 100644 index 00000000..bf4f3b48 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cmdline.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CmdLine returns the command line of the kernel. +func (fs FS) CmdLine() ([]string, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cmdline")) + if err != nil { + return nil, err + } + + return strings.Fields(string(data)), nil +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 00000000..f0950bb4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,519 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +var ( + cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) + cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) +) + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor", "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + match, err := regexp.MatchString("^[Pp]rocessor", firstLine) + if !match || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + featuresLine := "" + commonCPUInfo := CPUInfo{} + i := 0 + if strings.TrimSpace(field[0]) == "Processor" { + commonCPUInfo = CPUInfo{ModelName: field[1]} + i = -1 + } else { + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo = []CPUInfo{firstcpu} + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "BogoMIPS": + if i == -1 { + cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor + i++ + cpuinfo[i].Processor = 0 + } + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "Features": + featuresLine = line + case "model name": + cpuinfo[i].ModelName = field[1] + } + } + fields := strings.SplitN(featuresLine, ": ", 2) + for i := range cpuinfo { + cpuinfo[i].Flags = strings.Fields(fields[1]) + } + return cpuinfo, nil + +} + +func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + commonCPUInfo := CPUInfo{VendorID: field[1]} + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "bogomips per cpu": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + commonCPUInfo.BogoMips = v + case "features": + commonCPUInfo.Flags = strings.Fields(field[1]) + } + if strings.HasPrefix(line, "processor") { + match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) + if len(match) < 2 { + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) + } + cpu := commonCPUInfo + v, err := strconv.ParseUint(match[1], 0, 32) + if err != nil { + return nil, err + } + cpu.Processor = uint(v) + cpuinfo = append(cpuinfo, cpu) + } + if strings.HasPrefix(line, "cpu number") { + break + } + } + + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "cpu number": + i++ + case "cpu MHz dynamic": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + } + } + + return cpuinfo, nil +} + +func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "cpu model": + cpuinfo[i].ModelName = field[1] + case "BogoMIPS": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "cpu": + cpuinfo[i].VendorID = field[1] + case "clock": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + case "hart": + cpuinfo[i].CoreID = field[1] + case "isa": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode + return nil, errors.New("not implemented") +} + +// firstNonEmptyLine advances the scanner to the first non-empty line +// and returns the contents of that line. +func firstNonEmptyLine(scanner *bufio.Scanner) string { + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) != "" { + return line + } + } + return "" +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go new file mode 100644 index 00000000..64cfd534 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (arm || arm64) +// +build linux +// +build arm arm64 + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go new file mode 100644 index 00000000..d88442f0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go new file mode 100644 index 00000000..c11207f3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (mips || mipsle || mips64 || mips64le) +// +build linux +// +build mips mipsle mips64 mips64le + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go new file mode 100644 index 00000000..a6b2b312 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x + +package procfs + +var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go new file mode 100644 index 00000000..003bc2ad --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (ppc64 || ppc64le) +// +build linux +// +build ppc64 ppc64le + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go new file mode 100644 index 00000000..1c9b7313 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (riscv || riscv64) +// +build linux +// +build riscv riscv64 + +package procfs + +var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go new file mode 100644 index 00000000..fa3686bc --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go new file mode 100644 index 00000000..a0ef5556 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (386 || amd64) +// +build linux +// +build 386 amd64 + +package procfs + +var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 00000000..5f2a37a7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,154 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) + + } + + crypto, err := parseCrypto(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) + } + + return crypto, nil +} + +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 00000000..f9d961e4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,44 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 00000000..4980c875 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "github.com/prometheus/procfs/internal/fs" +) + +// FS represents the pseudo-filesystem sys, which provides an interface to +// kernel data structures. +type FS struct { + proc fs.FS + isReal bool +} + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint + +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) +} + +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) + if err != nil { + return FS{}, err + } + + isReal, err := isRealProc(mountPoint) + if err != nil { + return FS{}, err + } + + return FS{fs, isReal}, nil +} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go new file mode 100644 index 00000000..134767d6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !freebsd && !linux +// +build !freebsd,!linux + +package procfs + +// isRealProc returns true on architectures that don't have a Type argument +// in their Statfs_t struct +func isRealProc(mountPoint string) (bool, error) { + return true, nil +} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go new file mode 100644 index 00000000..80df79c3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build freebsd || linux +// +build freebsd linux + +package procfs + +import ( + "syscall" +) + +// isRealProc determines whether supplied mountpoint is really a proc filesystem. +func isRealProc(mountPoint string) (bool, error) { + stat := syscall.Statfs_t{} + err := syscall.Statfs(mountPoint, &stat) + if err != nil { + return false, err + } + + // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 + return stat.Type == 0x9fa0, nil +} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go new file mode 100644 index 00000000..cf2e3eaa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -0,0 +1,422 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Fscacheinfo represents fscache statistics. +type Fscacheinfo struct { + // Number of index cookies allocated + IndexCookiesAllocated uint64 + // data storage cookies allocated + DataStorageCookiesAllocated uint64 + // Number of special cookies allocated + SpecialCookiesAllocated uint64 + // Number of objects allocated + ObjectsAllocated uint64 + // Number of object allocation failures + ObjectAllocationsFailure uint64 + // Number of objects that reached the available state + ObjectsAvailable uint64 + // Number of objects that reached the dead state + ObjectsDead uint64 + // Number of objects that didn't have a coherency check + ObjectsWithoutCoherencyCheck uint64 + // Number of objects that passed a coherency check + ObjectsWithCoherencyCheck uint64 + // Number of objects that needed a coherency data update + ObjectsNeedCoherencyCheckUpdate uint64 + // Number of objects that were declared obsolete + ObjectsDeclaredObsolete uint64 + // Number of pages marked as being cached + PagesMarkedAsBeingCached uint64 + // Number of uncache page requests seen + UncachePagesRequestSeen uint64 + // Number of acquire cookie requests seen + AcquireCookiesRequestSeen uint64 + // Number of acq reqs given a NULL parent + AcquireRequestsWithNullParent uint64 + // Number of acq reqs rejected due to no cache available + AcquireRequestsRejectedNoCacheAvailable uint64 + // Number of acq reqs succeeded + AcquireRequestsSucceeded uint64 + // Number of acq reqs rejected due to error + AcquireRequestsRejectedDueToError uint64 + // Number of acq reqs failed on ENOMEM + AcquireRequestsFailedDueToEnomem uint64 + // Number of lookup calls made on cache backends + LookupsNumber uint64 + // Number of negative lookups made + LookupsNegative uint64 + // Number of positive lookups made + LookupsPositive uint64 + // Number of objects created by lookup + ObjectsCreatedByLookup uint64 + // Number of lookups timed out and requeued + LookupsTimedOutAndRequed uint64 + InvalidationsNumber uint64 + InvalidationsRunning uint64 + // Number of update cookie requests seen + UpdateCookieRequestSeen uint64 + // Number of upd reqs given a NULL parent + UpdateRequestsWithNullParent uint64 + // Number of upd reqs granted CPU time + UpdateRequestsRunning uint64 + // Number of relinquish cookie requests seen + RelinquishCookiesRequestSeen uint64 + // Number of rlq reqs given a NULL parent + RelinquishCookiesWithNullParent uint64 + // Number of rlq reqs waited on completion of creation + RelinquishRequestsWaitingCompleteCreation uint64 + // Relinqs rtr + RelinquishRetries uint64 + // Number of attribute changed requests seen + AttributeChangedRequestsSeen uint64 + // Number of attr changed requests queued + AttributeChangedRequestsQueued uint64 + // Number of attr changed rejected -ENOBUFS + AttributeChangedRejectDueToEnobufs uint64 + // Number of attr changed failed -ENOMEM + AttributeChangedFailedDueToEnomem uint64 + // Number of attr changed ops given CPU time + AttributeChangedOps uint64 + // Number of allocation requests seen + AllocationRequestsSeen uint64 + // Number of successful alloc reqs + AllocationOkRequests uint64 + // Number of alloc reqs that waited on lookup completion + AllocationWaitingOnLookup uint64 + // Number of alloc reqs rejected -ENOBUFS + AllocationsRejectedDueToEnobufs uint64 + // Number of alloc reqs aborted -ERESTARTSYS + AllocationsAbortedDueToErestartsys uint64 + // Number of alloc reqs submitted + AllocationOperationsSubmitted uint64 + // Number of alloc reqs waited for CPU time + AllocationsWaitedForCPU uint64 + // Number of alloc reqs aborted due to object death + AllocationsAbortedDueToObjectDeath uint64 + // Number of retrieval (read) requests seen + RetrievalsReadRequests uint64 + // Number of successful retr reqs + RetrievalsOk uint64 + // Number of retr reqs that waited on lookup completion + RetrievalsWaitingLookupCompletion uint64 + // Number of retr reqs returned -ENODATA + RetrievalsReturnedEnodata uint64 + // Number of retr reqs rejected -ENOBUFS + RetrievalsRejectedDueToEnobufs uint64 + // Number of retr reqs aborted -ERESTARTSYS + RetrievalsAbortedDueToErestartsys uint64 + // Number of retr reqs failed -ENOMEM + RetrievalsFailedDueToEnomem uint64 + // Number of retr reqs submitted + RetrievalsRequests uint64 + // Number of retr reqs waited for CPU time + RetrievalsWaitingCPU uint64 + // Number of retr reqs aborted due to object death + RetrievalsAbortedDueToObjectDeath uint64 + // Number of storage (write) requests seen + StoreWriteRequests uint64 + // Number of successful store reqs + StoreSuccessfulRequests uint64 + // Number of store reqs on a page already pending storage + StoreRequestsOnPendingStorage uint64 + // Number of store reqs rejected -ENOBUFS + StoreRequestsRejectedDueToEnobufs uint64 + // Number of store reqs failed -ENOMEM + StoreRequestsFailedDueToEnomem uint64 + // Number of store reqs submitted + StoreRequestsSubmitted uint64 + // Number of store reqs granted CPU time + StoreRequestsRunning uint64 + // Number of pages given store req processing time + StorePagesWithRequestsProcessing uint64 + // Number of store reqs deleted from tracking tree + StoreRequestsDeleted uint64 + // Number of store reqs over store limit + StoreRequestsOverStoreLimit uint64 + // Number of release reqs against pages with no pending store + ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 + // Number of release reqs against pages stored by time lock granted + ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 + // Number of release reqs ignored due to in-progress store + ReleaseRequestsIgnoredDueToInProgressStore uint64 + // Number of page stores cancelled due to release req + PageStoresCancelledByReleaseRequests uint64 + VmscanWaiting uint64 + // Number of times async ops added to pending queues + OpsPending uint64 + // Number of times async ops given CPU time + OpsRunning uint64 + // Number of times async ops queued for processing + OpsEnqueued uint64 + // Number of async ops cancelled + OpsCancelled uint64 + // Number of async ops rejected due to object lookup/create failure + OpsRejected uint64 + // Number of async ops initialised + OpsInitialised uint64 + // Number of async ops queued for deferred release + OpsDeferred uint64 + // Number of async ops released (should equal ini=N when idle) + OpsReleased uint64 + // Number of deferred-release async ops garbage collected + OpsGarbageCollected uint64 + // Number of in-progress alloc_object() cache ops + CacheopAllocationsinProgress uint64 + // Number of in-progress lookup_object() cache ops + CacheopLookupObjectInProgress uint64 + // Number of in-progress lookup_complete() cache ops + CacheopLookupCompleteInPorgress uint64 + // Number of in-progress grab_object() cache ops + CacheopGrabObjectInProgress uint64 + CacheopInvalidations uint64 + // Number of in-progress update_object() cache ops + CacheopUpdateObjectInProgress uint64 + // Number of in-progress drop_object() cache ops + CacheopDropObjectInProgress uint64 + // Number of in-progress put_object() cache ops + CacheopPutObjectInProgress uint64 + // Number of in-progress attr_changed() cache ops + CacheopAttributeChangeInProgress uint64 + // Number of in-progress sync_cache() cache ops + CacheopSyncCacheInProgress uint64 + // Number of in-progress read_or_alloc_page() cache ops + CacheopReadOrAllocPageInProgress uint64 + // Number of in-progress read_or_alloc_pages() cache ops + CacheopReadOrAllocPagesInProgress uint64 + // Number of in-progress allocate_page() cache ops + CacheopAllocatePageInProgress uint64 + // Number of in-progress allocate_pages() cache ops + CacheopAllocatePagesInProgress uint64 + // Number of in-progress write_page() cache ops + CacheopWritePagesInProgress uint64 + // Number of in-progress uncache_page() cache ops + CacheopUncachePagesInProgress uint64 + // Number of in-progress dissociate_pages() cache ops + CacheopDissociatePagesInProgress uint64 + // Number of object lookups/creations rejected due to lack of space + CacheevLookupsAndCreationsRejectedLackSpace uint64 + // Number of stale objects deleted + CacheevStaleObjectsDeleted uint64 + // Number of objects retired when relinquished + CacheevRetiredWhenReliquished uint64 + // Number of objects culled + CacheevObjectsCulled uint64 +} + +// Fscacheinfo returns information about current fscache statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt +func (fs FS) Fscacheinfo() (Fscacheinfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) + if err != nil { + return Fscacheinfo{}, err + } + + m, err := parseFscacheinfo(bytes.NewReader(b)) + if err != nil { + return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) + } + + return *m, nil +} + +func setFSCacheFields(fields []string, setFields ...*uint64) error { + var err error + if len(fields) < len(setFields) { + return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) + } + + for i := range setFields { + *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) + if err != nil { + return err + } + } + return nil +} + +func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { + var m Fscacheinfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) + } + + switch fields[0] { + case "Cookies:": + err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, + &m.SpecialCookiesAllocated) + if err != nil { + return &m, err + } + case "Objects:": + err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, + &m.ObjectsAvailable, &m.ObjectsDead) + if err != nil { + return &m, err + } + case "ChkAux": + err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, + &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) + if err != nil { + return &m, err + } + case "Pages": + err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) + if err != nil { + return &m, err + } + case "Acquire:": + err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, + &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, + &m.AcquireRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + case "Lookups:": + err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, + &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) + if err != nil { + return &m, err + } + case "Invals": + err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) + if err != nil { + return &m, err + } + case "Updates:": + err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, + &m.UpdateRequestsRunning) + if err != nil { + return &m, err + } + case "Relinqs:": + err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, + &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) + if err != nil { + return &m, err + } + case "AttrChg:": + err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, + &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) + if err != nil { + return &m, err + } + case "Allocs": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, + &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, + &m.AllocationsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Retrvls:": + if strings.Split(fields[1], "=")[0] == "n" { + err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, + &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, + &m.RetrievalsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Stores": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, + &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, + &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) + if err != nil { + return &m, err + } + } + case "VmScan": + err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, + &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, + &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) + if err != nil { + return &m, err + } + case "Ops": + if strings.Split(fields[2], "=")[0] == "pend" { + err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) + if err != nil { + return &m, err + } + } + case "CacheOp:": + if strings.Split(fields[1], "=")[0] == "alo" { + err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, + &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) + if err != nil { + return &m, err + } + } else if strings.Split(fields[1], "=")[0] == "inv" { + err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, + &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, + &m.CacheopSyncCacheInProgress) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, + &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, + &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) + if err != nil { + return &m, err + } + } + case "CacheEv:": + err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, + &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) + if err != nil { + return &m, err + } + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 00000000..3c18c761 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the common mount point of the configfs. + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %q: %w", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %q is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 00000000..14272dc7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,112 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "os" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// Parses a uint64 from given hex in string. +func ParseHexUint64s(ss []string) ([]*uint64, error) { + us := make([]*uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ReadIntFromFile reads a file and attempts to parse a int64 from it. +func ReadIntFromFile(path string) (int64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 00000000..71b7a70e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "os" +) + +// ReadFileNoStat uses io.ReadAll to read contents of entire file. +// This is similar to os.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 1024kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 1024 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return io.ReadAll(reader) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 00000000..1ab875ce --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's os.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go new file mode 100644 index 00000000..1d86f5e6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (linux && appengine) || (!linux && !darwin) +// +build linux,appengine !linux,!darwin + +package util + +import ( + "fmt" +) + +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 00000000..fe2355d3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 00000000..bc3a20c9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,241 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + + return parseIPVSStats(bytes.NewReader(data)) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(r io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := io.ReadAll(r) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) + } + default: + return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, + fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go new file mode 100644 index 00000000..db88566b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// KernelRandom contains information about to the kernel's random number generator. +type KernelRandom struct { + // EntropyAvaliable gives the available entropy, in bits. + EntropyAvaliable *uint64 + // PoolSize gives the size of the entropy pool, in bits. + PoolSize *uint64 + // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. + URandomMinReseedSeconds *uint64 + // WriteWakeupThreshold the number of bits of entropy below which we wake up processes + // that do a select(2) or poll(2) for write access to /dev/random. + WriteWakeupThreshold *uint64 + // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep + // waiting for entropy from /dev/random. + ReadWakeupThreshold *uint64 +} + +// KernelRandom returns values from /proc/sys/kernel/random. +func (fs FS) KernelRandom() (KernelRandom, error) { + random := KernelRandom{} + + for file, p := range map[string]**uint64{ + "entropy_avail": &random.EntropyAvaliable, + "poolsize": &random.PoolSize, + "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, + "write_wakeup_threshold": &random.WriteWakeupThreshold, + "read_wakeup_threshold": &random.ReadWakeupThreshold, + } { + val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) + if os.IsNotExist(err) { + continue + } + if err != nil { + return random, err + } + *p = &val + } + + return random, nil +} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 00000000..332e76c1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg. +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 00000000..67a9d2b4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) + recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) + recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) + recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device requires. + DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Number of "down" disks. (the _ indicator in the status line) + DisksDown int64 + // Spare disks in the device. + DisksSpare int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 + // progress percentage of current sync + BlocksSyncedPct float64 + // estimated finishing time for current sync (in minutes) + BlocksSyncedFinishTime float64 + // current sync speed (in Kilobytes/sec) + BlocksSyncedSpeed float64 + // Name of md component devices + Devices []string +} + +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := os.ReadFile(fs.proc.Path("mdstat")) + if err != nil { + return nil, err + } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} + +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { + continue + } + + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + if len(lines) <= i+3 { + return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) + } + + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) + + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + blocksSynced := size + blocksToBeSynced := size + speed := float64(0) + finish := float64(0) + pct := float64(0) + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + blocksSynced = 0 + } else { + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) + } + } + } + + mdStats = append(mdStats, MDStat{ + Name: mdName, + ActivityState: state, + DisksActive: active, + DisksFailed: fail, + DisksDown: down, + DisksSpare: spare, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, + BlocksSyncedPct: pct, + BlocksSyncedFinishTime: finish, + BlocksSyncedSpeed: speed, + Devices: evalComponentDevices(deviceFields), + }) + } + + return mdStats, nil +} + +func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + statusFields := strings.Fields(statusLine) + if len(statusFields) < 1 { + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + } + + sizeStr := statusFields[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, 0, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 5 { + return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) + } + down = int64(strings.Count(matches[4], "_")) + + return active, total, down, size, nil +} + +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { + matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) + } + + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) + if err != nil { + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) + } + + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) + if err != nil { + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) + } + + // Get percentage complete + matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) + } + pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) + if err != nil { + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) + } + + // Get time expected left to complete + matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) + } + finish, err = strconv.ParseFloat(matches[1], 64) + if err != nil { + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) + } + + // Get recovery speed + matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) + } + speed, err = strconv.ParseFloat(matches[1], 64) + if err != nil { + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) + } + + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil +} + +func evalComponentDevices(deviceFields []string) []string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + return mdComponentDevices +} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 00000000..4b2c4050 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,389 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal *uint64 + // The sum of LowFree+HighFree + MemFree *uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable *uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers *uint64 + Cached *uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached *uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active *uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive *uint64 + ActiveAnon *uint64 + InactiveAnon *uint64 + ActiveFile *uint64 + InactiveFile *uint64 + Unevictable *uint64 + Mlocked *uint64 + // total amount of swap space available + SwapTotal *uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree *uint64 + // Memory which is waiting to get written back to the disk + Dirty *uint64 + // Memory which is actively being written back to the disk + Writeback *uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages *uint64 + // files which have been mapped, such as libraries + Mapped *uint64 + Shmem *uint64 + // in-kernel data structures cache + Slab *uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable *uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim *uint64 + KernelStack *uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables *uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable *uint64 + // Memory used for block device "bounce buffers" + Bounce *uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp *uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit *uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS *uint64 + // total size of vmalloc memory area + VmallocTotal *uint64 + // amount of vmalloc area which is used + VmallocUsed *uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk *uint64 + Percpu *uint64 + HardwareCorrupted *uint64 + AnonHugePages *uint64 + ShmemHugePages *uint64 + ShmemPmdMapped *uint64 + CmaTotal *uint64 + CmaFree *uint64 + HugePagesTotal *uint64 + HugePagesFree *uint64 + HugePagesRsvd *uint64 + HugePagesSurp *uint64 + Hugepagesize *uint64 + DirectMap4k *uint64 + DirectMap2M *uint64 + DirectMap1G *uint64 + + // The struct fields below are the byte-normalized counterparts to the + // existing struct fields. Values are normalized using the optional + // unit field in the meminfo line. + MemTotalBytes *uint64 + MemFreeBytes *uint64 + MemAvailableBytes *uint64 + BuffersBytes *uint64 + CachedBytes *uint64 + SwapCachedBytes *uint64 + ActiveBytes *uint64 + InactiveBytes *uint64 + ActiveAnonBytes *uint64 + InactiveAnonBytes *uint64 + ActiveFileBytes *uint64 + InactiveFileBytes *uint64 + UnevictableBytes *uint64 + MlockedBytes *uint64 + SwapTotalBytes *uint64 + SwapFreeBytes *uint64 + DirtyBytes *uint64 + WritebackBytes *uint64 + AnonPagesBytes *uint64 + MappedBytes *uint64 + ShmemBytes *uint64 + SlabBytes *uint64 + SReclaimableBytes *uint64 + SUnreclaimBytes *uint64 + KernelStackBytes *uint64 + PageTablesBytes *uint64 + NFSUnstableBytes *uint64 + BounceBytes *uint64 + WritebackTmpBytes *uint64 + CommitLimitBytes *uint64 + CommittedASBytes *uint64 + VmallocTotalBytes *uint64 + VmallocUsedBytes *uint64 + VmallocChunkBytes *uint64 + PercpuBytes *uint64 + HardwareCorruptedBytes *uint64 + AnonHugePagesBytes *uint64 + ShmemHugePagesBytes *uint64 + ShmemPmdMappedBytes *uint64 + CmaTotalBytes *uint64 + CmaFreeBytes *uint64 + HugepagesizeBytes *uint64 + DirectMap4kBytes *uint64 + DirectMap2MBytes *uint64 + DirectMap1GBytes *uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + var val, valBytes uint64 + + val, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch len(fields) { + case 2: + // No unit present, use the parsed the value as bytes directly. + valBytes = val + case 3: + // Unit present in optional 3rd field, convert it to + // bytes. The only unit supported within the Linux + // kernel is `kB`. + if fields[2] != "kB" { + return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) + } + + valBytes = 1024 * val + + default: + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = &val + m.MemTotalBytes = &valBytes + case "MemFree:": + m.MemFree = &val + m.MemFreeBytes = &valBytes + case "MemAvailable:": + m.MemAvailable = &val + m.MemAvailableBytes = &valBytes + case "Buffers:": + m.Buffers = &val + m.BuffersBytes = &valBytes + case "Cached:": + m.Cached = &val + m.CachedBytes = &valBytes + case "SwapCached:": + m.SwapCached = &val + m.SwapCachedBytes = &valBytes + case "Active:": + m.Active = &val + m.ActiveBytes = &valBytes + case "Inactive:": + m.Inactive = &val + m.InactiveBytes = &valBytes + case "Active(anon):": + m.ActiveAnon = &val + m.ActiveAnonBytes = &valBytes + case "Inactive(anon):": + m.InactiveAnon = &val + m.InactiveAnonBytes = &valBytes + case "Active(file):": + m.ActiveFile = &val + m.ActiveFileBytes = &valBytes + case "Inactive(file):": + m.InactiveFile = &val + m.InactiveFileBytes = &valBytes + case "Unevictable:": + m.Unevictable = &val + m.UnevictableBytes = &valBytes + case "Mlocked:": + m.Mlocked = &val + m.MlockedBytes = &valBytes + case "SwapTotal:": + m.SwapTotal = &val + m.SwapTotalBytes = &valBytes + case "SwapFree:": + m.SwapFree = &val + m.SwapFreeBytes = &valBytes + case "Dirty:": + m.Dirty = &val + m.DirtyBytes = &valBytes + case "Writeback:": + m.Writeback = &val + m.WritebackBytes = &valBytes + case "AnonPages:": + m.AnonPages = &val + m.AnonPagesBytes = &valBytes + case "Mapped:": + m.Mapped = &val + m.MappedBytes = &valBytes + case "Shmem:": + m.Shmem = &val + m.ShmemBytes = &valBytes + case "Slab:": + m.Slab = &val + m.SlabBytes = &valBytes + case "SReclaimable:": + m.SReclaimable = &val + m.SReclaimableBytes = &valBytes + case "SUnreclaim:": + m.SUnreclaim = &val + m.SUnreclaimBytes = &valBytes + case "KernelStack:": + m.KernelStack = &val + m.KernelStackBytes = &valBytes + case "PageTables:": + m.PageTables = &val + m.PageTablesBytes = &valBytes + case "NFS_Unstable:": + m.NFSUnstable = &val + m.NFSUnstableBytes = &valBytes + case "Bounce:": + m.Bounce = &val + m.BounceBytes = &valBytes + case "WritebackTmp:": + m.WritebackTmp = &val + m.WritebackTmpBytes = &valBytes + case "CommitLimit:": + m.CommitLimit = &val + m.CommitLimitBytes = &valBytes + case "Committed_AS:": + m.CommittedAS = &val + m.CommittedASBytes = &valBytes + case "VmallocTotal:": + m.VmallocTotal = &val + m.VmallocTotalBytes = &valBytes + case "VmallocUsed:": + m.VmallocUsed = &val + m.VmallocUsedBytes = &valBytes + case "VmallocChunk:": + m.VmallocChunk = &val + m.VmallocChunkBytes = &valBytes + case "Percpu:": + m.Percpu = &val + m.PercpuBytes = &valBytes + case "HardwareCorrupted:": + m.HardwareCorrupted = &val + m.HardwareCorruptedBytes = &valBytes + case "AnonHugePages:": + m.AnonHugePages = &val + m.AnonHugePagesBytes = &valBytes + case "ShmemHugePages:": + m.ShmemHugePages = &val + m.ShmemHugePagesBytes = &valBytes + case "ShmemPmdMapped:": + m.ShmemPmdMapped = &val + m.ShmemPmdMappedBytes = &valBytes + case "CmaTotal:": + m.CmaTotal = &val + m.CmaTotalBytes = &valBytes + case "CmaFree:": + m.CmaFree = &val + m.CmaFreeBytes = &valBytes + case "HugePages_Total:": + m.HugePagesTotal = &val + case "HugePages_Free:": + m.HugePagesFree = &val + case "HugePages_Rsvd:": + m.HugePagesRsvd = &val + case "HugePages_Surp:": + m.HugePagesSurp = &val + case "Hugepagesize:": + m.Hugepagesize = &val + m.HugepagesizeBytes = &valBytes + case "DirectMap4k:": + m.DirectMap4k = &val + m.DirectMap4kBytes = &valBytes + case "DirectMap2M:": + m.DirectMap2M = &val + m.DirectMap2MBytes = &valBytes + case "DirectMap1G:": + m.DirectMap1G = &val + m.DirectMap1GBytes = &valBytes + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 00000000..a704c5e7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique ID for the mount + MountID int + // The ID of the parent mount + ParentID int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(info []byte) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 10 { + return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) + } + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) + } + + mount := &MountInfo{ + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), + OptionalFields: nil, + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), + } + + mount.MountID, err = strconv.Atoi(mountInfo[0]) + if err != nil { + return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) + } + mount.ParentID, err = strconv.Atoi(mountInfo[1]) + if err != nil { + return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, fmt.Errorf("%w: %w", ErrFileParse, err) + } + } + return mount, nil +} + +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + +// mountOptionsParser parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat("/proc/self/mountinfo") + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 00000000..75a3b6c8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,707 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The mount options of the NFS mount. + Opts map[string]string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport []NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueMilliseconds uint64 + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseMilliseconds uint64 + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestMilliseconds uint64 + // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. + Errors uint64 +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTimeSeconds uint64 + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldOpts = "opts:" + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + + switch ss[0] { + case fieldOpts: + if len(ss) < 2 { + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) + } + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } + case fieldAge: + if len(ss) < 2 { + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) + } + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + if len(ss) < 2 { + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) + } + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + if len(ss) < 2 { + return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) + } + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = append(stats.Transport, *tstats) + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Minimum number of expected fields in each per-operation statistics set + minFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) < minFields { + return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, minFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + opStats := NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], + } + + if len(ns) > 8 { + opStats.Errors = ns[8] + } + + ops = append(ops, opStats) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen + } else { + return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) + } + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) + } + default: + return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) + } + + return &NFSTransportStats{ + // NFS xprt over tcp or udp + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTimeSeconds: ns[4], + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 00000000..316df5fb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,118 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core. +type ConntrackStatEntry struct { + Entries uint64 + Searched uint64 + Found uint64 + New uint64 + Invalid uint64 + Ignore uint64 + Delete uint64 + DeleteList uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath. +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields. +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + entries, err := util.ParseHexUint64s(fields) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) + } + numEntries := len(entries) + if numEntries < 16 || numEntries > 17 { + return nil, + fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) + } + + stats := &ConntrackStatEntry{ + Entries: *entries[0], + Searched: *entries[1], + Found: *entries[2], + New: *entries[3], + Invalid: *entries[4], + Ignore: *entries[5], + Delete: *entries[6], + DeleteList: *entries[7], + Insert: *entries[8], + InsertFailed: *entries[9], + Drop: *entries[10], + EarlyDrop: *entries[11], + } + + // Ignore missing search_restart on Linux < 2.6.35. + if numEntries == 17 { + stats.SearchRestart = *entries[16] + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 00000000..e66208aa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,205 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) +} + +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + netDev := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := netDev.parseLine(s.Text()) + if err != nil { + return netDev, err + } + + netDev[line.Name] = *line + } + + return netDev, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { + idx := strings.LastIndex(rawLine, ":") + if idx == -1 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(rawLine[:idx]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (netDev NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go new file mode 100644 index 00000000..b70f1fc7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -0,0 +1,248 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +// This contains generic data structures for both udp and tcp sockets. +type ( + // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. + NetIPSocket []*netIPSocketLine + + // NetIPSocketSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetIPSocket it does not collect + // the parsed lines into a slice. + NetIPSocketSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + // Drops shows the total number of dropped packets of all UPD sockets. + Drops *uint64 + } + + // netIPSocketLine represents the fields parsed from a single line + // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // Drops is non-nil for udp{,6}, but nil for tcp{,6}. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netIPSocketLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + Inode uint64 + Drops *uint64 + } +) + +func newNetIPSocket(file string) (NetIPSocket, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocket NetIPSocket + isUDP := strings.Contains(file, "udp") + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields, isUDP) + if err != nil { + return nil, err + } + netIPSocket = append(netIPSocket, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netIPSocket, nil +} + +// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. +func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocketSummary NetIPSocketSummary + var udpPacketDrops uint64 + isUDP := strings.Contains(file, "udp") + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields, isUDP) + if err != nil { + return nil, err + } + netIPSocketSummary.TxQueueLength += line.TxQueue + netIPSocketSummary.RxQueueLength += line.RxQueue + netIPSocketSummary.UsedSockets++ + if isUDP { + udpPacketDrops += *line.Drops + netIPSocketSummary.Drops = &udpPacketDrops + } + } + if err := s.Err(); err != nil { + return nil, err + } + return &netIPSocketSummary, nil +} + +// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. + +func parseIP(hexIP string) (net.IP, error) { + var byteIP []byte + byteIP, err := hex.DecodeString(hexIP) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) + } + switch len(byteIP) { + case 4: + return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil + case 16: + i := net.IP{ + byteIP[3], byteIP[2], byteIP[1], byteIP[0], + byteIP[7], byteIP[6], byteIP[5], byteIP[4], + byteIP[11], byteIP[10], byteIP[9], byteIP[8], + byteIP[15], byteIP[14], byteIP[13], byteIP[12], + } + return i, nil + default: + return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) + } +} + +// parseNetIPSocketLine parses a single line, represented by a list of fields. +func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { + line := &netIPSocketLine{} + if len(fields) < 10 { + return nil, fmt.Errorf( + "%w: Less than 10 columns found %q", + ErrFileParse, + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) + } + if line.LocalAddr, err = parseIP(l[0]); err != nil { + return nil, err + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) + } + if line.RemAddr, err = parseIP(r[0]); err != nil { + return nil, err + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "%w: Missing colon for tx/rx queues in socket line %q", + ErrFileParse, + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) + } + + // inode + if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { + return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + } + + // drops + if isUDP { + drops, err := strconv.ParseUint(fields[12], 0, 64) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) + } + line.Drops = &drops + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go new file mode 100644 index 00000000..b6c77b70 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -0,0 +1,180 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// NetProtocolStats stores the contents from /proc/net/protocols. +type NetProtocolStats map[string]NetProtocolStatLine + +// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We +// only care about the first six columns as the rest are not likely to change +// and only serve to provide a set of capabilities for each protocol. +type NetProtocolStatLine struct { + Name string // 0 The name of the protocol + Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) + Sockets int64 // 2 Number of sockets in use by this protocol + Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol + Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. + MaxHeader uint64 // 5 Protocol specific max header size + Slab bool // 6 Indicates whether or not memory is allocated from the SLAB + ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module + Capabilities NetProtocolCapabilities +} + +// NetProtocolCapabilities contains a list of capabilities for each protocol. +type NetProtocolCapabilities struct { + Close bool // 8 + Connect bool // 9 + Disconnect bool // 10 + Accept bool // 11 + IoCtl bool // 12 + Init bool // 13 + Destroy bool // 14 + Shutdown bool // 15 + SetSockOpt bool // 16 + GetSockOpt bool // 17 + SendMsg bool // 18 + RecvMsg bool // 19 + SendPage bool // 20 + Bind bool // 21 + BacklogRcv bool // 22 + Hash bool // 23 + UnHash bool // 24 + GetPort bool // 25 + EnterMemoryPressure bool // 26 +} + +// NetProtocols reads stats from /proc/net/protocols and returns a map of +// PortocolStatLine entries. As of this writing no official Linux Documentation +// exists, however the source is fairly self-explanatory and the format seems +// stable since its introduction in 2.6.12-rc2 +// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 +// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 +func (fs FS) NetProtocols() (NetProtocolStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) + if err != nil { + return NetProtocolStats{}, err + } + return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) +} + +func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { + nps := NetProtocolStats{} + + // Skip the header line + s.Scan() + + for s.Scan() { + line, err := nps.parseLine(s.Text()) + if err != nil { + return NetProtocolStats{}, err + } + + nps[line.Name] = *line + } + return nps, nil +} + +func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { + line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} + var err error + const enabled = "yes" + const disabled = "no" + + fields := strings.Fields(rawLine) + line.Name = fields[0] + line.Size, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.Memory, err = strconv.ParseInt(fields[3], 10, 64) + if err != nil { + return nil, err + } + if fields[4] == enabled { + line.Pressure = 1 + } else if fields[4] == disabled { + line.Pressure = 0 + } else { + line.Pressure = -1 + } + line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + if fields[6] == enabled { + line.Slab = true + } else if fields[6] == disabled { + line.Slab = false + } else { + return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) + } + line.ModuleName = fields[7] + + err = line.Capabilities.parseCapabilities(fields[8:]) + if err != nil { + return nil, err + } + + return line, nil +} + +func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { + // The capabilities are all bools so we can loop over to map them + capabilityFields := [...]*bool{ + &pc.Close, + &pc.Connect, + &pc.Disconnect, + &pc.Accept, + &pc.IoCtl, + &pc.Init, + &pc.Destroy, + &pc.Shutdown, + &pc.SetSockOpt, + &pc.GetSockOpt, + &pc.SendMsg, + &pc.RecvMsg, + &pc.SendPage, + &pc.Bind, + &pc.BacklogRcv, + &pc.Hash, + &pc.UnHash, + &pc.GetPort, + &pc.EnterMemoryPressure, + } + + for i := 0; i < len(capabilities); i++ { + if capabilities[i] == "y" { + *capabilityFields[i] = true + } else if capabilities[i] == "n" { + *capabilityFields[i] = false + } else { + return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go new file mode 100644 index 00000000..deb7029f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -0,0 +1,143 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const ( + blackholeRepresentation string = "*" + blackholeIfaceName string = "blackhole" + routeLineColumns int = 11 +) + +// A NetRouteLine represents one line from net/route. +type NetRouteLine struct { + Iface string + Destination uint32 + Gateway uint32 + Flags uint32 + RefCnt uint32 + Use uint32 + Metric uint32 + Mask uint32 + MTU uint32 + Window uint32 + IRTT uint32 +} + +func (fs FS) NetRoute() ([]NetRouteLine, error) { + return readNetRoute(fs.proc.Path("net", "route")) +} + +func readNetRoute(path string) ([]NetRouteLine, error) { + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + + routelines, err := parseNetRoute(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) + } + return routelines, nil +} + +func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { + var routelines []NetRouteLine + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + routeline, err := parseNetRouteLine(fields) + if err != nil { + return nil, err + } + routelines = append(routelines, *routeline) + } + return routelines, nil +} + +func parseNetRouteLine(fields []string) (*NetRouteLine, error) { + if len(fields) != routeLineColumns { + return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) + } + iface := fields[0] + if iface == blackholeRepresentation { + iface = blackholeIfaceName + } + destination, err := strconv.ParseUint(fields[1], 16, 32) + if err != nil { + return nil, err + } + gateway, err := strconv.ParseUint(fields[2], 16, 32) + if err != nil { + return nil, err + } + flags, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil, err + } + refcnt, err := strconv.ParseUint(fields[4], 10, 32) + if err != nil { + return nil, err + } + use, err := strconv.ParseUint(fields[5], 10, 32) + if err != nil { + return nil, err + } + metric, err := strconv.ParseUint(fields[6], 10, 32) + if err != nil { + return nil, err + } + mask, err := strconv.ParseUint(fields[7], 16, 32) + if err != nil { + return nil, err + } + mtu, err := strconv.ParseUint(fields[8], 10, 32) + if err != nil { + return nil, err + } + window, err := strconv.ParseUint(fields[9], 10, 32) + if err != nil { + return nil, err + } + irtt, err := strconv.ParseUint(fields[10], 10, 32) + if err != nil { + return nil, err + } + routeline := &NetRouteLine{ + Iface: iface, + Destination: uint32(destination), + Gateway: uint32(gateway), + Flags: uint32(flags), + RefCnt: uint32(refcnt), + Use: uint32(use), + Metric: uint32(metric), + Mask: uint32(mask), + MTU: uint32(mtu), + Window: uint32(window), + IRTT: uint32(irtt), + } + return routeline, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 00000000..fae62b13 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,162 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 00000000..71c8059f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,155 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// For the proc file format details, +// See: +// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + +// SoftnetStat contains a single row of data from /proc/net/softnet_stat. +type SoftnetStat struct { + // Number of processed packets. + Processed uint32 + // Number of dropped packets. + Dropped uint32 + // Number of times processing packets ran out of quota. + TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int +} + +var softNetProcFile = "net/softnet_stat" + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) + } + + return entries, nil +} + +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const minColumns = 9 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + cpuIndex := 0 + for s.Scan() { + columns := strings.Fields(s.Text()) + width := len(columns) + softnetStat := SoftnetStat{} + + if width < minColumns { + return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) + } + + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] + } + + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } else { + // For older kernels, create the Index based on the scan line number. + softnetStat.Index = uint32(cpuIndex) + } + softnetStat.Width = width + stats = append(stats, softnetStat) + cpuIndex++ + } + + return stats, nil +} + +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go new file mode 100644 index 00000000..52776295 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. + NetTCP []*netIPSocketLine + + // NetTCPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetTCP it does not collect + // the parsed lines into a slice. + NetTCPSummary NetIPSocketSummary +) + +// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp. +func (fs FS) NetTCP() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp")) +} + +// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp6. +func (fs FS) NetTCP6() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp6")) +} + +// NetTCPSummary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp. +func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp")) +} + +// NetTCP6Summary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp6. +func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp6")) +} + +// newNetTCP creates a new NetTCP{,6} from the contents of the given file. +func newNetTCP(file string) (NetTCP, error) { + n, err := newNetIPSocket(file) + n1 := NetTCP(n) + return n1, err +} + +func newNetTCPSummary(file string) (*NetTCPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetTCPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go new file mode 100644 index 00000000..13994c17 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -0,0 +1,119 @@ +// Copyright 2023 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// TLSStat struct represents data in /proc/net/tls_stat. +// See https://docs.kernel.org/networking/tls.html#statistics +type TLSStat struct { + // number of TX sessions currently installed where host handles cryptography + TLSCurrTxSw int + // number of RX sessions currently installed where host handles cryptography + TLSCurrRxSw int + // number of TX sessions currently installed where NIC handles cryptography + TLSCurrTxDevice int + // number of RX sessions currently installed where NIC handles cryptography + TLSCurrRxDevice int + //number of TX sessions opened with host cryptography + TLSTxSw int + //number of RX sessions opened with host cryptography + TLSRxSw int + // number of TX sessions opened with NIC cryptography + TLSTxDevice int + // number of RX sessions opened with NIC cryptography + TLSRxDevice int + // record decryption failed (e.g. due to incorrect authentication tag) + TLSDecryptError int + // number of RX resyncs sent to NICs handling cryptography + TLSRxDeviceResync int + // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. + TLSDecryptRetry int + // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. + TLSRxNoPadViolation int +} + +// NewTLSStat reads the tls_stat statistics. +func NewTLSStat() (TLSStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return TLSStat{}, err + } + + return fs.NewTLSStat() +} + +// NewTLSStat reads the tls_stat statistics. +func (fs FS) NewTLSStat() (TLSStat, error) { + file, err := os.Open(fs.proc.Path("net/tls_stat")) + if err != nil { + return TLSStat{}, err + } + defer file.Close() + + var ( + tlsstat = TLSStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return TLSStat{}, err + } + + switch name { + case "TlsCurrTxSw": + tlsstat.TLSCurrTxSw = value + case "TlsCurrRxSw": + tlsstat.TLSCurrRxSw = value + case "TlsCurrTxDevice": + tlsstat.TLSCurrTxDevice = value + case "TlsCurrRxDevice": + tlsstat.TLSCurrRxDevice = value + case "TlsTxSw": + tlsstat.TLSTxSw = value + case "TlsRxSw": + tlsstat.TLSRxSw = value + case "TlsTxDevice": + tlsstat.TLSTxDevice = value + case "TlsRxDevice": + tlsstat.TLSRxDevice = value + case "TlsDecryptError": + tlsstat.TLSDecryptError = value + case "TlsRxDeviceResync": + tlsstat.TLSRxDeviceResync = value + case "TlsDecryptRetry": + tlsstat.TLSDecryptRetry = value + case "TlsRxNoPadViolation": + tlsstat.TLSRxNoPadViolation = value + } + + } + + return tlsstat, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 00000000..9ac3daf2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netIPSocketLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary NetIPSocketSummary +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + n, err := newNetIPSocket(file) + n1 := NetUDP(n) + return n1, err +} + +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetUDPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 00000000..d868cebd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,257 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 + +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 + +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 + +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState + Inode uint64 + Path string +} + +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine +} + +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) +} + +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + return parseNetUNIX(f) +} + +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") + + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 + if hasInode { + minFields++ + } + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) + if err != nil { + return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) + } + + nu.Rows = append(nu.Rows, item) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) + } + + return &nu, nil +} + +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { + fields := strings.Fields(line) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + } + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) + if err != nil { + return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) + } + + flags, err := u.parseFlags(fields[3]) + if err != nil { + return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) + } + + typ, err := u.parseType(fields[4]) + if err != nil { + return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) + } + + state, err := u.parseState(fields[5]) + if err != nil { + return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) + } + + var inode uint64 + if hasInode { + inode, err = u.parseInode(fields[6]) + if err != nil { + return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) + } + } + + n := &NetUNIXLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 + if !hasInode { + pathIdx-- + } + + n.Path = fields[pathIdx] + } + + return n, nil +} + +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) +} + +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) + if err != nil { + return 0, err + } + + return NetUNIXType(typ), nil +} + +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return 0, err + } + + return NetUNIXFlags(flags), nil +} + +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) + if err != nil { + return 0, err + } + + return NetUNIXState(st), nil +} + +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func (t NetUNIXType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUNIXFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUNIXState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go new file mode 100644 index 00000000..7c597bc8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -0,0 +1,182 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Wireless models the content of /proc/net/wireless. +type Wireless struct { + Name string + + // Status is the current 4-digit hex value status of the interface. + Status uint64 + + // QualityLink is the link quality. + QualityLink int + + // QualityLevel is the signal gain (dBm). + QualityLevel int + + // QualityNoise is the signal noise baseline (dBm). + QualityNoise int + + // DiscardedNwid is the number of discarded packets with wrong nwid/essid. + DiscardedNwid int + + // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). + DiscardedCrypt int + + // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. + DiscardedFrag int + + // DiscardedRetry is the number of discarded packets that reached max MAC retries. + DiscardedRetry int + + // DiscardedMisc is the number of discarded packets for other reasons. + DiscardedMisc int + + // MissedBeacon is the number of missed beacons/superframe. + MissedBeacon int +} + +// Wireless returns kernel wireless statistics. +func (fs FS) Wireless() ([]*Wireless, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) + if err != nil { + return nil, err + } + + m, err := parseWireless(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) + } + + return m, nil +} + +// parseWireless parses the contents of /proc/net/wireless. +/* +Inter-| sta-| Quality | Discarded packets | Missed | WE +face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + eth1: 0000 5. -256. -10. 0 1 0 3 0 0 + eth2: 0000 5. -256. -20. 0 2 0 4 0 0 +*/ +func parseWireless(r io.Reader) ([]*Wireless, error) { + var ( + interfaces []*Wireless + scanner = bufio.NewScanner(r) + ) + + for n := 0; scanner.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line := scanner.Text() + + parts := strings.Split(line, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) + } + + name := strings.TrimSpace(parts[0]) + stats := strings.Fields(parts[1]) + + if len(stats) < 10 { + return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) + } + + status, err := strconv.ParseUint(stats[0], 16, 16) + if err != nil { + return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) + } + + qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) + if err != nil { + return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + } + + qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) + if err != nil { + return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + } + + qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) + if err != nil { + return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + } + + dnwid, err := strconv.Atoi(stats[4]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + } + + dcrypt, err := strconv.Atoi(stats[5]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + } + + dfrag, err := strconv.Atoi(stats[6]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + } + + dretry, err := strconv.Atoi(stats[7]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + } + + dmisc, err := strconv.Atoi(stats[8]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + } + + mbeacon, err := strconv.Atoi(stats[9]) + if err != nil { + return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + } + + w := &Wireless{ + Name: name, + Status: status, + QualityLink: qlink, + QualityLevel: qlevel, + QualityNoise: qnoise, + DiscardedNwid: dnwid, + DiscardedCrypt: dcrypt, + DiscardedFrag: dfrag, + DiscardedRetry: dretry, + DiscardedMisc: dmisc, + MissedBeacon: mbeacon, + } + + interfaces = append(interfaces, w) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + } + + return interfaces, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go new file mode 100644 index 00000000..932ef204 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -0,0 +1,189 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + // Forward routing of a packet is not allowed + XfrmFwdHdrError int + // State is invalid, perhaps expired + XfrmOutStateInvalid int + // State hasn’t been fully acquired before use + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go new file mode 100644 index 00000000..742dff45 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -0,0 +1,82 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +// NetStat contains statistics for all the counters from one file. +type NetStat struct { + Stats map[string][]uint64 + Filename string +} + +// NetStat retrieves stats from `/proc/net/stat/`. +func (fs FS) NetStat() ([]NetStat, error) { + statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) + if err != nil { + return nil, err + } + + var netStatsTotal []NetStat + + for _, filePath := range statFiles { + procNetstat, err := parseNetstat(filePath) + if err != nil { + return nil, err + } + procNetstat.Filename = filepath.Base(filePath) + + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(filePath string) (NetStat, error) { + netStat := NetStat{ + Stats: make(map[string][]uint64), + } + file, err := os.Open(filePath) + if err != nil { + return netStat, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + scanner.Scan() + + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err + } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) + } + } + + return netStat, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 00000000..14279636 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,338 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +var ( + ErrFileParse = errors.New("Error Parsing File") + ErrFileRead = errors.New("Error Reading File") + ErrMountPoint = errors.New("Error Accessing Mount point") +) + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil || errors.Unwrap(err) == ErrMountPoint { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.proc.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// NewProc returns a process for the given pid. +// +// Deprecated: Use fs.Proc() instead. +func (fs FS) NewProc(pid int) (Proc, error) { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.proc.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + data, err := util.ReadFileNoStat(p.path("cmdline")) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil +} + +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := io.ReadAll(f) + if err != nil { + return "", err + } + + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + data, err := util.ReadFileNoStat(p.path("comm")) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot). +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 + if p.fs.isReal { + stat, err := os.Stat(p.path("fd")) + if err != nil { + return 0, err + } + + size := stat.Size() + if size > 0 { + return int(size), nil + } + } + + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(p.path("mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := os.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go new file mode 100644 index 00000000..daeed7f5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies +// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in +// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of +// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID +// in this hierarchy +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type Cgroup struct { + // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one + // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number + HierarchyID int + // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For + // Cgroups V2 this may be empty, as all active controllers use the same hierarchy + Controllers []string + // Path of this control group, relative to the mount point of the cgroupfs representing this specific + // hierarchy + Path string +} + +// parseCgroupString parses each line of the /proc/[pid]/cgroup file +// Line format is hierarchyID:[controller1,controller2]:path. +func parseCgroupString(cgroupStr string) (*Cgroup, error) { + var err error + + fields := strings.SplitN(cgroupStr, ":", 3) + if len(fields) < 3 { + return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) + } + + cgroup := &Cgroup{ + Path: fields[2], + Controllers: nil, + } + cgroup.HierarchyID, err = strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) + } + if fields[1] != "" { + ssNames := strings.Split(fields[1], ",") + cgroup.Controllers = append(cgroup.Controllers, ssNames...) + } + return cgroup, nil +} + +// parseCgroups reads each line of the /proc/[pid]/cgroup file. +func parseCgroups(data []byte) ([]Cgroup, error) { + var cgroups []Cgroup + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseCgroupString(mountString) + if err != nil { + return nil, err + } + cgroups = append(cgroups, *parsedMounts) + } + + err := scanner.Err() + return cgroups, err +} + +// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process +// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, +// so the len of the returned struct is equal to the number of active hierarchies on this system. +func (p Proc) Cgroups() ([]Cgroup, error) { + data, err := util.ReadFileNoStat(p.path("cgroup")) + if err != nil { + return nil, err + } + return parseCgroups(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go new file mode 100644 index 00000000..5dd49389 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -0,0 +1,98 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CgroupSummary models one line from /proc/cgroups. +// This file contains information about the controllers that are compiled into the kernel. +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type CgroupSummary struct { + // The name of the controller. controller is also known as subsystem. + SubsysName string + // The unique ID of the cgroup hierarchy on which this controller is mounted. + Hierarchy int + // The number of control groups in this hierarchy using this controller. + Cgroups int + // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled + Enabled int +} + +// parseCgroupSummary parses each line of the /proc/cgroup file +// Line format is `subsys_name hierarchy num_cgroups enabled`. +func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { + var err error + + fields := strings.Fields(CgroupSummaryStr) + // require at least 4 fields + if len(fields) < 4 { + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) + } + + CgroupSummary := &CgroupSummary{ + SubsysName: fields[0], + } + CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) + } + CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) + if err != nil { + return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) + } + CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) + if err != nil { + return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) + } + return CgroupSummary, nil +} + +// parseCgroupSummary reads each line of the /proc/cgroup file. +func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { + var CgroupSummarys []CgroupSummary + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + CgroupSummaryString := scanner.Text() + // ignore comment lines + if strings.HasPrefix(CgroupSummaryString, "#") { + continue + } + CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) + if err != nil { + return nil, err + } + CgroupSummarys = append(CgroupSummarys, *CgroupSummary) + } + + err := scanner.Err() + return CgroupSummarys, err +} + +// CgroupSummarys returns information about current /proc/cgroups. +func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) + if err != nil { + return nil, err + } + return parseCgroupSummary(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 00000000..57a89895 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Environ reads process environments from `/proc//environ`. +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + data, err := util.ReadFileNoStat(p.path("environ")) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 00000000..fa761b35 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,138 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) + rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // Inode number + Ino string + // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid, ino string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + Ino: ino, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + m := rInotifyParts.FindStringSubmatch(line) + if len(m) >= 4 { + var mask string + if len(m) == 5 { + mask = m[4] + } + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: mask, + } + return i, nil + } + return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches. +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 00000000..86b4b452 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 00000000..776f3497 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { + pio := ProcIO{} + + data, err := util.ReadFileNoStat(p.path("io")) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 00000000..9530b14b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,160 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime uint64 + // Maximum size of files that the process may create. + FileSize uint64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize uint64 + // Maximum size of the process stack in bytes. + StackSize uint64 + // Maximum size of a core file. + CoreFileSize uint64 + // Limit of the process's resident set in pages. + ResidentSet uint64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes uint64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles uint64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory uint64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace uint64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks uint64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals uint64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize uint64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority uint64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority uint64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout uint64 +} + +const ( + limitsFields = 4 + limitsUnlimited = "unlimited" +) + +var ( + limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) +) + +// NewLimits returns the current soft limits of the process. +// +// Deprecated: Use p.Limits() instead. +func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + + s.Scan() // Skip limits header + + for s.Scan() { + //fields := limitsMatch.Split(s.Text(), limitsFields) + fields := limitsMatch.FindStringSubmatch(s.Text()) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) + } + + switch fields[1] { + case "Max cpu time": + l.CPUTime, err = parseUint(fields[2]) + case "Max file size": + l.FileSize, err = parseUint(fields[2]) + case "Max data size": + l.DataSize, err = parseUint(fields[2]) + case "Max stack size": + l.StackSize, err = parseUint(fields[2]) + case "Max core file size": + l.CoreFileSize, err = parseUint(fields[2]) + case "Max resident set": + l.ResidentSet, err = parseUint(fields[2]) + case "Max processes": + l.Processes, err = parseUint(fields[2]) + case "Max open files": + l.OpenFiles, err = parseUint(fields[2]) + case "Max locked memory": + l.LockedMemory, err = parseUint(fields[2]) + case "Max address space": + l.AddressSpace, err = parseUint(fields[2]) + case "Max file locks": + l.FileLocks, err = parseUint(fields[2]) + case "Max pending signals": + l.PendingSignals, err = parseUint(fields[2]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseUint(fields[2]) + case "Max nice priority": + l.NicePriority, err = parseUint(fields[2]) + case "Max realtime priority": + l.RealtimePriority, err = parseUint(fields[2]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseUint(fields[2]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseUint(s string) (uint64, error) { + if s == limitsUnlimited { + return 18446744073709551615, nil + } + i, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go new file mode 100644 index 00000000..7e75c286 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -0,0 +1,211 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !js + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. +type ProcMapPermissions struct { + // mapping has the [R]ead flag set + Read bool + // mapping has the [W]rite flag set + Write bool + // mapping has the [X]ecutable flag set + Execute bool + // mapping has the [S]hared flag set + Shared bool + // mapping is marked as [P]rivate (copy on write) + Private bool +} + +// ProcMap contains the process memory-mappings of the process +// read from `/proc/[pid]/maps`. +type ProcMap struct { + // The start address of current mapping. + StartAddr uintptr + // The end address of the current mapping + EndAddr uintptr + // The permissions for this mapping + Perms *ProcMapPermissions + // The current offset into the file/fd (e.g., shared libs) + Offset int64 + // Device owner of this mapping (major:minor) in Mkdev format. + Dev uint64 + // The inode of the device above + Inode uint64 + // The file or psuedofile (or empty==anonymous) + Pathname string +} + +// parseDevice parses the device token of a line and converts it to a dev_t +// (mkdev) like structure. +func parseDevice(s string) (uint64, error) { + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) + } + + major, err := strconv.ParseUint(s[0:i], 16, 0) + if err != nil { + return 0, err + } + + minor, err := strconv.ParseUint(s[i+1:], 16, 0) + if err != nil { + return 0, err + } + + return unix.Mkdev(uint32(major), uint32(minor)), nil +} + +// parseAddress converts a hex-string to a uintptr. +func parseAddress(s string) (uintptr, error) { + a, err := strconv.ParseUint(s, 16, 0) + if err != nil { + return 0, err + } + + return uintptr(a), nil +} + +// parseAddresses parses the start-end address. +func parseAddresses(s string) (uintptr, uintptr, error) { + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) + } + + saddr, err := parseAddress(s[0:idx]) + if err != nil { + return 0, 0, err + } + + eaddr, err := parseAddress(s[idx+1:]) + if err != nil { + return 0, 0, err + } + + return saddr, eaddr, nil +} + +// parsePermissions parses a token and returns any that are set. +func parsePermissions(s string) (*ProcMapPermissions, error) { + if len(s) < 4 { + return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) + } + + perms := ProcMapPermissions{} + for _, ch := range s { + switch ch { + case 'r': + perms.Read = true + case 'w': + perms.Write = true + case 'x': + perms.Execute = true + case 'p': + perms.Private = true + case 's': + perms.Shared = true + } + } + + return &perms, nil +} + +// parseProcMap will attempt to parse a single line within a proc/[pid]/maps +// buffer. +func parseProcMap(text string) (*ProcMap, error) { + fields := strings.Fields(text) + if len(fields) < 5 { + return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) + } + + saddr, eaddr, err := parseAddresses(fields[0]) + if err != nil { + return nil, err + } + + perms, err := parsePermissions(fields[1]) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(fields[2], 16, 0) + if err != nil { + return nil, err + } + + device, err := parseDevice(fields[3]) + if err != nil { + return nil, err + } + + inode, err := strconv.ParseUint(fields[4], 10, 0) + if err != nil { + return nil, err + } + + pathname := "" + + if len(fields) >= 5 { + pathname = strings.Join(fields[5:], " ") + } + + return &ProcMap{ + StartAddr: saddr, + EndAddr: eaddr, + Perms: perms, + Offset: offset, + Dev: device, + Inode: inode, + Pathname: pathname, + }, nil +} + +// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the +// process. +func (p Proc) ProcMaps() ([]*ProcMap, error) { + file, err := os.Open(p.path("maps")) + if err != nil { + return nil, err + } + defer file.Close() + + maps := []*ProcMap{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + m, err := parseProcMap(scan.Text()) + if err != nil { + return nil, err + } + + maps = append(maps, m) + } + + return maps, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go new file mode 100644 index 00000000..8e3ff4d7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -0,0 +1,443 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcNetstat models the content of /proc//net/netstat. +type ProcNetstat struct { + // The process ID. + PID int + TcpExt + IpExt +} + +type TcpExt struct { // nolint:revive + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 +} + +type IpExt struct { // nolint:revive + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 +} + +func (p Proc) Netstat() (ProcNetstat, error) { + filename := p.path("net/netstat") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcNetstat{PID: p.PID}, err + } + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) + procNetstat.PID = p.PID + return procNetstat, err +} + +// parseProcNetstat parses the metrics from proc//net/netstat file +// and returns a ProcNetstat structure. +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { + var ( + scanner = bufio.NewScanner(r) + procNetstat = ProcNetstat{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procNetstat, err + } + key := nameParts[i] + + switch protocol { + case "TcpExt": + switch key { + case "SyncookiesSent": + procNetstat.TcpExt.SyncookiesSent = &value + case "SyncookiesRecv": + procNetstat.TcpExt.SyncookiesRecv = &value + case "SyncookiesFailed": + procNetstat.TcpExt.SyncookiesFailed = &value + case "EmbryonicRsts": + procNetstat.TcpExt.EmbryonicRsts = &value + case "PruneCalled": + procNetstat.TcpExt.PruneCalled = &value + case "RcvPruned": + procNetstat.TcpExt.RcvPruned = &value + case "OfoPruned": + procNetstat.TcpExt.OfoPruned = &value + case "OutOfWindowIcmps": + procNetstat.TcpExt.OutOfWindowIcmps = &value + case "LockDroppedIcmps": + procNetstat.TcpExt.LockDroppedIcmps = &value + case "ArpFilter": + procNetstat.TcpExt.ArpFilter = &value + case "TW": + procNetstat.TcpExt.TW = &value + case "TWRecycled": + procNetstat.TcpExt.TWRecycled = &value + case "TWKilled": + procNetstat.TcpExt.TWKilled = &value + case "PAWSActive": + procNetstat.TcpExt.PAWSActive = &value + case "PAWSEstab": + procNetstat.TcpExt.PAWSEstab = &value + case "DelayedACKs": + procNetstat.TcpExt.DelayedACKs = &value + case "DelayedACKLocked": + procNetstat.TcpExt.DelayedACKLocked = &value + case "DelayedACKLost": + procNetstat.TcpExt.DelayedACKLost = &value + case "ListenOverflows": + procNetstat.TcpExt.ListenOverflows = &value + case "ListenDrops": + procNetstat.TcpExt.ListenDrops = &value + case "TCPHPHits": + procNetstat.TcpExt.TCPHPHits = &value + case "TCPPureAcks": + procNetstat.TcpExt.TCPPureAcks = &value + case "TCPHPAcks": + procNetstat.TcpExt.TCPHPAcks = &value + case "TCPRenoRecovery": + procNetstat.TcpExt.TCPRenoRecovery = &value + case "TCPSackRecovery": + procNetstat.TcpExt.TCPSackRecovery = &value + case "TCPSACKReneging": + procNetstat.TcpExt.TCPSACKReneging = &value + case "TCPSACKReorder": + procNetstat.TcpExt.TCPSACKReorder = &value + case "TCPRenoReorder": + procNetstat.TcpExt.TCPRenoReorder = &value + case "TCPTSReorder": + procNetstat.TcpExt.TCPTSReorder = &value + case "TCPFullUndo": + procNetstat.TcpExt.TCPFullUndo = &value + case "TCPPartialUndo": + procNetstat.TcpExt.TCPPartialUndo = &value + case "TCPDSACKUndo": + procNetstat.TcpExt.TCPDSACKUndo = &value + case "TCPLossUndo": + procNetstat.TcpExt.TCPLossUndo = &value + case "TCPLostRetransmit": + procNetstat.TcpExt.TCPLostRetransmit = &value + case "TCPRenoFailures": + procNetstat.TcpExt.TCPRenoFailures = &value + case "TCPSackFailures": + procNetstat.TcpExt.TCPSackFailures = &value + case "TCPLossFailures": + procNetstat.TcpExt.TCPLossFailures = &value + case "TCPFastRetrans": + procNetstat.TcpExt.TCPFastRetrans = &value + case "TCPSlowStartRetrans": + procNetstat.TcpExt.TCPSlowStartRetrans = &value + case "TCPTimeouts": + procNetstat.TcpExt.TCPTimeouts = &value + case "TCPLossProbes": + procNetstat.TcpExt.TCPLossProbes = &value + case "TCPLossProbeRecovery": + procNetstat.TcpExt.TCPLossProbeRecovery = &value + case "TCPRenoRecoveryFail": + procNetstat.TcpExt.TCPRenoRecoveryFail = &value + case "TCPSackRecoveryFail": + procNetstat.TcpExt.TCPSackRecoveryFail = &value + case "TCPRcvCollapsed": + procNetstat.TcpExt.TCPRcvCollapsed = &value + case "TCPDSACKOldSent": + procNetstat.TcpExt.TCPDSACKOldSent = &value + case "TCPDSACKOfoSent": + procNetstat.TcpExt.TCPDSACKOfoSent = &value + case "TCPDSACKRecv": + procNetstat.TcpExt.TCPDSACKRecv = &value + case "TCPDSACKOfoRecv": + procNetstat.TcpExt.TCPDSACKOfoRecv = &value + case "TCPAbortOnData": + procNetstat.TcpExt.TCPAbortOnData = &value + case "TCPAbortOnClose": + procNetstat.TcpExt.TCPAbortOnClose = &value + case "TCPDeferAcceptDrop": + procNetstat.TcpExt.TCPDeferAcceptDrop = &value + case "IPReversePathFilter": + procNetstat.TcpExt.IPReversePathFilter = &value + case "TCPTimeWaitOverflow": + procNetstat.TcpExt.TCPTimeWaitOverflow = &value + case "TCPReqQFullDoCookies": + procNetstat.TcpExt.TCPReqQFullDoCookies = &value + case "TCPReqQFullDrop": + procNetstat.TcpExt.TCPReqQFullDrop = &value + case "TCPRetransFail": + procNetstat.TcpExt.TCPRetransFail = &value + case "TCPRcvCoalesce": + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value + case "TCPOFOQueue": + procNetstat.TcpExt.TCPOFOQueue = &value + case "TCPOFODrop": + procNetstat.TcpExt.TCPOFODrop = &value + case "TCPOFOMerge": + procNetstat.TcpExt.TCPOFOMerge = &value + case "TCPChallengeACK": + procNetstat.TcpExt.TCPChallengeACK = &value + case "TCPSYNChallenge": + procNetstat.TcpExt.TCPSYNChallenge = &value + case "TCPFastOpenActive": + procNetstat.TcpExt.TCPFastOpenActive = &value + case "TCPFastOpenActiveFail": + procNetstat.TcpExt.TCPFastOpenActiveFail = &value + case "TCPFastOpenPassive": + procNetstat.TcpExt.TCPFastOpenPassive = &value + case "TCPFastOpenPassiveFail": + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + case "TCPFastOpenListenOverflow": + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + case "TCPFastOpenCookieReqd": + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + case "TCPFastOpenBlackhole": + procNetstat.TcpExt.TCPFastOpenBlackhole = &value + case "TCPSpuriousRtxHostQueues": + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + case "BusyPollRxPackets": + procNetstat.TcpExt.BusyPollRxPackets = &value + case "TCPAutoCorking": + procNetstat.TcpExt.TCPAutoCorking = &value + case "TCPFromZeroWindowAdv": + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + case "TCPToZeroWindowAdv": + procNetstat.TcpExt.TCPToZeroWindowAdv = &value + case "TCPWantZeroWindowAdv": + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + case "TCPSynRetrans": + procNetstat.TcpExt.TCPSynRetrans = &value + case "TCPOrigDataSent": + procNetstat.TcpExt.TCPOrigDataSent = &value + case "TCPHystartTrainDetect": + procNetstat.TcpExt.TCPHystartTrainDetect = &value + case "TCPHystartTrainCwnd": + procNetstat.TcpExt.TCPHystartTrainCwnd = &value + case "TCPHystartDelayDetect": + procNetstat.TcpExt.TCPHystartDelayDetect = &value + case "TCPHystartDelayCwnd": + procNetstat.TcpExt.TCPHystartDelayCwnd = &value + case "TCPACKSkippedSynRecv": + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + case "TCPACKSkippedPAWS": + procNetstat.TcpExt.TCPACKSkippedPAWS = &value + case "TCPACKSkippedSeq": + procNetstat.TcpExt.TCPACKSkippedSeq = &value + case "TCPACKSkippedFinWait2": + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + case "TCPACKSkippedTimeWait": + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + case "TCPACKSkippedChallenge": + procNetstat.TcpExt.TCPACKSkippedChallenge = &value + case "TCPWinProbe": + procNetstat.TcpExt.TCPWinProbe = &value + case "TCPKeepAlive": + procNetstat.TcpExt.TCPKeepAlive = &value + case "TCPMTUPFail": + procNetstat.TcpExt.TCPMTUPFail = &value + case "TCPMTUPSuccess": + procNetstat.TcpExt.TCPMTUPSuccess = &value + case "TCPWqueueTooBig": + procNetstat.TcpExt.TCPWqueueTooBig = &value + } + case "IpExt": + switch key { + case "InNoRoutes": + procNetstat.IpExt.InNoRoutes = &value + case "InTruncatedPkts": + procNetstat.IpExt.InTruncatedPkts = &value + case "InMcastPkts": + procNetstat.IpExt.InMcastPkts = &value + case "OutMcastPkts": + procNetstat.IpExt.OutMcastPkts = &value + case "InBcastPkts": + procNetstat.IpExt.InBcastPkts = &value + case "OutBcastPkts": + procNetstat.IpExt.OutBcastPkts = &value + case "InOctets": + procNetstat.IpExt.InOctets = &value + case "OutOctets": + procNetstat.IpExt.OutOctets = &value + case "InMcastOctets": + procNetstat.IpExt.InMcastOctets = &value + case "OutMcastOctets": + procNetstat.IpExt.OutMcastOctets = &value + case "InBcastOctets": + procNetstat.IpExt.InBcastOctets = &value + case "OutBcastOctets": + procNetstat.IpExt.OutBcastOctets = &value + case "InCsumErrors": + procNetstat.IpExt.InCsumErrors = &value + case "InNoECTPkts": + procNetstat.IpExt.InNoECTPkts = &value + case "InECT1Pkts": + procNetstat.IpExt.InECT1Pkts = &value + case "InECT0Pkts": + procNetstat.IpExt.InECT0Pkts = &value + case "InCEPkts": + procNetstat.IpExt.InCEPkts = &value + case "ReasmOverlaps": + procNetstat.IpExt.ReasmOverlaps = &value + } + } + } + } + return procNetstat, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 00000000..0f8f847f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// Namespaces reads from /proc//ns/* to get the namespaces of which the +// process is a member. +func (p Proc) Namespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 00000000..ccd35f15 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by `/proc/pressure/*`. +// +// The Avg entries are averages over n seconds, as a percentage. +// The Total line is in microseconds. +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// +// "Some" indicates the share of time in which at least some tasks are stalled. +// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) + } + + return parsePSIStats(bytes.NewReader(data)) +} + +// parsePSIStats parses the specified file for pressure stall information. +func parsePSIStats(r io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go new file mode 100644 index 00000000..09060e82 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -0,0 +1,166 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + // match the header line before each mapped zone in `/proc/pid/smaps`. + procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) +) + +type ProcSMapsRollup struct { + // Amount of the mapping that is currently resident in RAM. + Rss uint64 + // Process's proportional share of this mapping. + Pss uint64 + // Size in bytes of clean shared pages. + SharedClean uint64 + // Size in bytes of dirty shared pages. + SharedDirty uint64 + // Size in bytes of clean private pages. + PrivateClean uint64 + // Size in bytes of dirty private pages. + PrivateDirty uint64 + // Amount of memory currently marked as referenced or accessed. + Referenced uint64 + // Amount of memory that does not belong to any file. + Anonymous uint64 + // Amount would-be-anonymous memory currently on swap. + Swap uint64 + // Process's proportional memory on swap. + SwapPss uint64 +} + +// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the +// process. +// +// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will +// we read and summed. +func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { + data, err := util.ReadFileNoStat(p.path("smaps_rollup")) + if err != nil && os.IsNotExist(err) { + return p.procSMapsRollupManual() + } + if err != nil { + return ProcSMapsRollup{}, err + } + + lines := strings.Split(string(data), "\n") + smaps := ProcSMapsRollup{} + + // skip first line which don't contains information we need + lines = lines[1:] + for _, line := range lines { + if line == "" { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +// Read /proc/pid/smaps and do the roll-up in Go code. +func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { + file, err := os.Open(p.path("smaps")) + if err != nil { + return ProcSMapsRollup{}, err + } + defer file.Close() + + smaps := ProcSMapsRollup{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + line := scan.Text() + + if procSMapsHeaderLine.MatchString(line) { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +func (s *ProcSMapsRollup) parseLine(line string) error { + kv := strings.SplitN(line, ":", 2) + if len(kv) != 2 { + fmt.Println(line) + return errors.New("invalid net/dev line, missing colon") + } + + k := kv[0] + if k == "VmFlags" { + return nil + } + + v := strings.TrimSpace(kv[1]) + v = strings.TrimSuffix(v, " kB") + + vKBytes, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + vBytes := vKBytes * 1024 + + s.addValue(k, vBytes) + + return nil +} + +func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { + switch k { + case "Rss": + s.Rss += vUintBytes + case "Pss": + s.Pss += vUintBytes + case "Shared_Clean": + s.SharedClean += vUintBytes + case "Shared_Dirty": + s.SharedDirty += vUintBytes + case "Private_Clean": + s.PrivateClean += vUintBytes + case "Private_Dirty": + s.PrivateDirty += vUintBytes + case "Referenced": + s.Referenced += vUintBytes + case "Anonymous": + s.Anonymous += vUintBytes + case "Swap": + s.Swap += vUintBytes + case "SwapPss": + s.SwapPss += vUintBytes + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go new file mode 100644 index 00000000..b9d2cf64 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -0,0 +1,353 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp models the content of /proc//net/snmp. +type ProcSnmp struct { + // The process ID. + PID int + Ip + Icmp + IcmpMsg + Tcp + Udp + UdpLite +} + +type Ip struct { // nolint:revive + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 +} + +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 +} + +type IcmpMsg struct { + InType3 *float64 + OutType3 *float64 +} + +type Tcp struct { // nolint:revive + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 +} + +type Udp struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +type UdpLite struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +func (p Proc) Snmp() (ProcSnmp, error) { + filename := p.path("net/snmp") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcSnmp{PID: p.PID}, err + } + procSnmp, err := parseSnmp(bytes.NewReader(data), filename) + procSnmp.PID = p.PID + return procSnmp, err +} + +// parseSnmp parses the metrics from proc//net/snmp file +// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). +func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp = ProcSnmp{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procSnmp, err + } + key := nameParts[i] + + switch protocol { + case "Ip": + switch key { + case "Forwarding": + procSnmp.Ip.Forwarding = &value + case "DefaultTTL": + procSnmp.Ip.DefaultTTL = &value + case "InReceives": + procSnmp.Ip.InReceives = &value + case "InHdrErrors": + procSnmp.Ip.InHdrErrors = &value + case "InAddrErrors": + procSnmp.Ip.InAddrErrors = &value + case "ForwDatagrams": + procSnmp.Ip.ForwDatagrams = &value + case "InUnknownProtos": + procSnmp.Ip.InUnknownProtos = &value + case "InDiscards": + procSnmp.Ip.InDiscards = &value + case "InDelivers": + procSnmp.Ip.InDelivers = &value + case "OutRequests": + procSnmp.Ip.OutRequests = &value + case "OutDiscards": + procSnmp.Ip.OutDiscards = &value + case "OutNoRoutes": + procSnmp.Ip.OutNoRoutes = &value + case "ReasmTimeout": + procSnmp.Ip.ReasmTimeout = &value + case "ReasmReqds": + procSnmp.Ip.ReasmReqds = &value + case "ReasmOKs": + procSnmp.Ip.ReasmOKs = &value + case "ReasmFails": + procSnmp.Ip.ReasmFails = &value + case "FragOKs": + procSnmp.Ip.FragOKs = &value + case "FragFails": + procSnmp.Ip.FragFails = &value + case "FragCreates": + procSnmp.Ip.FragCreates = &value + } + case "Icmp": + switch key { + case "InMsgs": + procSnmp.Icmp.InMsgs = &value + case "InErrors": + procSnmp.Icmp.InErrors = &value + case "InCsumErrors": + procSnmp.Icmp.InCsumErrors = &value + case "InDestUnreachs": + procSnmp.Icmp.InDestUnreachs = &value + case "InTimeExcds": + procSnmp.Icmp.InTimeExcds = &value + case "InParmProbs": + procSnmp.Icmp.InParmProbs = &value + case "InSrcQuenchs": + procSnmp.Icmp.InSrcQuenchs = &value + case "InRedirects": + procSnmp.Icmp.InRedirects = &value + case "InEchos": + procSnmp.Icmp.InEchos = &value + case "InEchoReps": + procSnmp.Icmp.InEchoReps = &value + case "InTimestamps": + procSnmp.Icmp.InTimestamps = &value + case "InTimestampReps": + procSnmp.Icmp.InTimestampReps = &value + case "InAddrMasks": + procSnmp.Icmp.InAddrMasks = &value + case "InAddrMaskReps": + procSnmp.Icmp.InAddrMaskReps = &value + case "OutMsgs": + procSnmp.Icmp.OutMsgs = &value + case "OutErrors": + procSnmp.Icmp.OutErrors = &value + case "OutDestUnreachs": + procSnmp.Icmp.OutDestUnreachs = &value + case "OutTimeExcds": + procSnmp.Icmp.OutTimeExcds = &value + case "OutParmProbs": + procSnmp.Icmp.OutParmProbs = &value + case "OutSrcQuenchs": + procSnmp.Icmp.OutSrcQuenchs = &value + case "OutRedirects": + procSnmp.Icmp.OutRedirects = &value + case "OutEchos": + procSnmp.Icmp.OutEchos = &value + case "OutEchoReps": + procSnmp.Icmp.OutEchoReps = &value + case "OutTimestamps": + procSnmp.Icmp.OutTimestamps = &value + case "OutTimestampReps": + procSnmp.Icmp.OutTimestampReps = &value + case "OutAddrMasks": + procSnmp.Icmp.OutAddrMasks = &value + case "OutAddrMaskReps": + procSnmp.Icmp.OutAddrMaskReps = &value + } + case "IcmpMsg": + switch key { + case "InType3": + procSnmp.IcmpMsg.InType3 = &value + case "OutType3": + procSnmp.IcmpMsg.OutType3 = &value + } + case "Tcp": + switch key { + case "RtoAlgorithm": + procSnmp.Tcp.RtoAlgorithm = &value + case "RtoMin": + procSnmp.Tcp.RtoMin = &value + case "RtoMax": + procSnmp.Tcp.RtoMax = &value + case "MaxConn": + procSnmp.Tcp.MaxConn = &value + case "ActiveOpens": + procSnmp.Tcp.ActiveOpens = &value + case "PassiveOpens": + procSnmp.Tcp.PassiveOpens = &value + case "AttemptFails": + procSnmp.Tcp.AttemptFails = &value + case "EstabResets": + procSnmp.Tcp.EstabResets = &value + case "CurrEstab": + procSnmp.Tcp.CurrEstab = &value + case "InSegs": + procSnmp.Tcp.InSegs = &value + case "OutSegs": + procSnmp.Tcp.OutSegs = &value + case "RetransSegs": + procSnmp.Tcp.RetransSegs = &value + case "InErrs": + procSnmp.Tcp.InErrs = &value + case "OutRsts": + procSnmp.Tcp.OutRsts = &value + case "InCsumErrors": + procSnmp.Tcp.InCsumErrors = &value + } + case "Udp": + switch key { + case "InDatagrams": + procSnmp.Udp.InDatagrams = &value + case "NoPorts": + procSnmp.Udp.NoPorts = &value + case "InErrors": + procSnmp.Udp.InErrors = &value + case "OutDatagrams": + procSnmp.Udp.OutDatagrams = &value + case "RcvbufErrors": + procSnmp.Udp.RcvbufErrors = &value + case "SndbufErrors": + procSnmp.Udp.SndbufErrors = &value + case "InCsumErrors": + procSnmp.Udp.InCsumErrors = &value + case "IgnoredMulti": + procSnmp.Udp.IgnoredMulti = &value + } + case "UdpLite": + switch key { + case "InDatagrams": + procSnmp.UdpLite.InDatagrams = &value + case "NoPorts": + procSnmp.UdpLite.NoPorts = &value + case "InErrors": + procSnmp.UdpLite.InErrors = &value + case "OutDatagrams": + procSnmp.UdpLite.OutDatagrams = &value + case "RcvbufErrors": + procSnmp.UdpLite.RcvbufErrors = &value + case "SndbufErrors": + procSnmp.UdpLite.SndbufErrors = &value + case "InCsumErrors": + procSnmp.UdpLite.InCsumErrors = &value + case "IgnoredMulti": + procSnmp.UdpLite.IgnoredMulti = &value + } + } + } + } + return procSnmp, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go new file mode 100644 index 00000000..3059cc6a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp6 models the content of /proc//net/snmp6. +type ProcSnmp6 struct { + // The process ID. + PID int + Ip6 + Icmp6 + Udp6 + UdpLite6 +} + +type Ip6 struct { // nolint:revive + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 +} + +type Icmp6 struct { + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 +} + +type Udp6 struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +type UdpLite6 struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 +} + +func (p Proc) Snmp6() (ProcSnmp6, error) { + filename := p.path("net/snmp6") + data, err := util.ReadFileNoStat(filename) + if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return ProcSnmp6{PID: p.PID}, nil + } + + return ProcSnmp6{PID: p.PID}, err + } + + procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) + procSnmp6.PID = p.PID + return procSnmp6, err +} + +// parseSnmp6 parses the metrics from proc//net/snmp6 file +// and returns a map contains those metrics. +func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp6 = ProcSnmp6{} + ) + + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + // Expect to have "6" in metric name, skip line otherwise + if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { + protocol := stat[0][:sixIndex+1] + key := stat[0][sixIndex+1:] + value, err := strconv.ParseFloat(stat[1], 64) + if err != nil { + return procSnmp6, err + } + + switch protocol { + case "Ip6": + switch key { + case "InReceives": + procSnmp6.Ip6.InReceives = &value + case "InHdrErrors": + procSnmp6.Ip6.InHdrErrors = &value + case "InTooBigErrors": + procSnmp6.Ip6.InTooBigErrors = &value + case "InNoRoutes": + procSnmp6.Ip6.InNoRoutes = &value + case "InAddrErrors": + procSnmp6.Ip6.InAddrErrors = &value + case "InUnknownProtos": + procSnmp6.Ip6.InUnknownProtos = &value + case "InTruncatedPkts": + procSnmp6.Ip6.InTruncatedPkts = &value + case "InDiscards": + procSnmp6.Ip6.InDiscards = &value + case "InDelivers": + procSnmp6.Ip6.InDelivers = &value + case "OutForwDatagrams": + procSnmp6.Ip6.OutForwDatagrams = &value + case "OutRequests": + procSnmp6.Ip6.OutRequests = &value + case "OutDiscards": + procSnmp6.Ip6.OutDiscards = &value + case "OutNoRoutes": + procSnmp6.Ip6.OutNoRoutes = &value + case "ReasmTimeout": + procSnmp6.Ip6.ReasmTimeout = &value + case "ReasmReqds": + procSnmp6.Ip6.ReasmReqds = &value + case "ReasmOKs": + procSnmp6.Ip6.ReasmOKs = &value + case "ReasmFails": + procSnmp6.Ip6.ReasmFails = &value + case "FragOKs": + procSnmp6.Ip6.FragOKs = &value + case "FragFails": + procSnmp6.Ip6.FragFails = &value + case "FragCreates": + procSnmp6.Ip6.FragCreates = &value + case "InMcastPkts": + procSnmp6.Ip6.InMcastPkts = &value + case "OutMcastPkts": + procSnmp6.Ip6.OutMcastPkts = &value + case "InOctets": + procSnmp6.Ip6.InOctets = &value + case "OutOctets": + procSnmp6.Ip6.OutOctets = &value + case "InMcastOctets": + procSnmp6.Ip6.InMcastOctets = &value + case "OutMcastOctets": + procSnmp6.Ip6.OutMcastOctets = &value + case "InBcastOctets": + procSnmp6.Ip6.InBcastOctets = &value + case "OutBcastOctets": + procSnmp6.Ip6.OutBcastOctets = &value + case "InNoECTPkts": + procSnmp6.Ip6.InNoECTPkts = &value + case "InECT1Pkts": + procSnmp6.Ip6.InECT1Pkts = &value + case "InECT0Pkts": + procSnmp6.Ip6.InECT0Pkts = &value + case "InCEPkts": + procSnmp6.Ip6.InCEPkts = &value + + } + case "Icmp6": + switch key { + case "InMsgs": + procSnmp6.Icmp6.InMsgs = &value + case "InErrors": + procSnmp6.Icmp6.InErrors = &value + case "OutMsgs": + procSnmp6.Icmp6.OutMsgs = &value + case "OutErrors": + procSnmp6.Icmp6.OutErrors = &value + case "InCsumErrors": + procSnmp6.Icmp6.InCsumErrors = &value + case "InDestUnreachs": + procSnmp6.Icmp6.InDestUnreachs = &value + case "InPktTooBigs": + procSnmp6.Icmp6.InPktTooBigs = &value + case "InTimeExcds": + procSnmp6.Icmp6.InTimeExcds = &value + case "InParmProblems": + procSnmp6.Icmp6.InParmProblems = &value + case "InEchos": + procSnmp6.Icmp6.InEchos = &value + case "InEchoReplies": + procSnmp6.Icmp6.InEchoReplies = &value + case "InGroupMembQueries": + procSnmp6.Icmp6.InGroupMembQueries = &value + case "InGroupMembResponses": + procSnmp6.Icmp6.InGroupMembResponses = &value + case "InGroupMembReductions": + procSnmp6.Icmp6.InGroupMembReductions = &value + case "InRouterSolicits": + procSnmp6.Icmp6.InRouterSolicits = &value + case "InRouterAdvertisements": + procSnmp6.Icmp6.InRouterAdvertisements = &value + case "InNeighborSolicits": + procSnmp6.Icmp6.InNeighborSolicits = &value + case "InNeighborAdvertisements": + procSnmp6.Icmp6.InNeighborAdvertisements = &value + case "InRedirects": + procSnmp6.Icmp6.InRedirects = &value + case "InMLDv2Reports": + procSnmp6.Icmp6.InMLDv2Reports = &value + case "OutDestUnreachs": + procSnmp6.Icmp6.OutDestUnreachs = &value + case "OutPktTooBigs": + procSnmp6.Icmp6.OutPktTooBigs = &value + case "OutTimeExcds": + procSnmp6.Icmp6.OutTimeExcds = &value + case "OutParmProblems": + procSnmp6.Icmp6.OutParmProblems = &value + case "OutEchos": + procSnmp6.Icmp6.OutEchos = &value + case "OutEchoReplies": + procSnmp6.Icmp6.OutEchoReplies = &value + case "OutGroupMembQueries": + procSnmp6.Icmp6.OutGroupMembQueries = &value + case "OutGroupMembResponses": + procSnmp6.Icmp6.OutGroupMembResponses = &value + case "OutGroupMembReductions": + procSnmp6.Icmp6.OutGroupMembReductions = &value + case "OutRouterSolicits": + procSnmp6.Icmp6.OutRouterSolicits = &value + case "OutRouterAdvertisements": + procSnmp6.Icmp6.OutRouterAdvertisements = &value + case "OutNeighborSolicits": + procSnmp6.Icmp6.OutNeighborSolicits = &value + case "OutNeighborAdvertisements": + procSnmp6.Icmp6.OutNeighborAdvertisements = &value + case "OutRedirects": + procSnmp6.Icmp6.OutRedirects = &value + case "OutMLDv2Reports": + procSnmp6.Icmp6.OutMLDv2Reports = &value + case "InType1": + procSnmp6.Icmp6.InType1 = &value + case "InType134": + procSnmp6.Icmp6.InType134 = &value + case "InType135": + procSnmp6.Icmp6.InType135 = &value + case "InType136": + procSnmp6.Icmp6.InType136 = &value + case "InType143": + procSnmp6.Icmp6.InType143 = &value + case "OutType133": + procSnmp6.Icmp6.OutType133 = &value + case "OutType135": + procSnmp6.Icmp6.OutType135 = &value + case "OutType136": + procSnmp6.Icmp6.OutType136 = &value + case "OutType143": + procSnmp6.Icmp6.OutType143 = &value + } + case "Udp6": + switch key { + case "InDatagrams": + procSnmp6.Udp6.InDatagrams = &value + case "NoPorts": + procSnmp6.Udp6.NoPorts = &value + case "InErrors": + procSnmp6.Udp6.InErrors = &value + case "OutDatagrams": + procSnmp6.Udp6.OutDatagrams = &value + case "RcvbufErrors": + procSnmp6.Udp6.RcvbufErrors = &value + case "SndbufErrors": + procSnmp6.Udp6.SndbufErrors = &value + case "InCsumErrors": + procSnmp6.Udp6.InCsumErrors = &value + case "IgnoredMulti": + procSnmp6.Udp6.IgnoredMulti = &value + } + case "UdpLite6": + switch key { + case "InDatagrams": + procSnmp6.UdpLite6.InDatagrams = &value + case "NoPorts": + procSnmp6.UdpLite6.NoPorts = &value + case "InErrors": + procSnmp6.UdpLite6.InErrors = &value + case "OutDatagrams": + procSnmp6.UdpLite6.OutDatagrams = &value + case "RcvbufErrors": + procSnmp6.UdpLite6.RcvbufErrors = &value + case "SndbufErrors": + procSnmp6.UdpLite6.SndbufErrors = &value + case "InCsumErrors": + procSnmp6.UdpLite6.InCsumErrors = &value + } + } + } + } + return procSnmp6, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 00000000..06a8d931 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,229 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime int + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime int + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize uint + // Resident set size in pages. + RSS int + // Soft limit in bytes on the rss of the process. + RSSLimit uint64 + // CPU number last executed on. + Processor uint + // Real-time scheduling priority, a number in the range 1 to 99 for processes + // scheduled under a real-time policy, or 0, for non-real-time processes. + RTPriority uint + // Scheduling policy. + Policy uint + // Aggregated block I/O delays, measured in clock ticks (centiseconds). + DelayAcctBlkIOTicks uint64 + // Guest time of the process (time spent running a virtual CPU for a guest + // operating system), measured in clock ticks. + GuestTime int + // Guest time of the process's children, measured in clock ticks. + CGuestTime int + + proc FS +} + +// NewStat returns the current status information of the process. +// +// Deprecated: Use p.Stat() instead. +func (p Proc) NewStat() (ProcStat, error) { + return p.Stat() +} + +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + data, err := util.ReadFileNoStat(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + + var ( + ignoreInt64 int64 + ignoreUint64 uint64 + + s = ProcStat{PID: p.PID, proc: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) + } + + s.Comm = string(data[l+1 : r]) + + // Check the following resources for the details about the particular stat + // fields and their data types: + // * https://man7.org/linux/man-pages/man5/proc.5.html + // * https://man7.org/linux/man-pages/man3/scanf.3.html + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignoreInt64, + &s.Starttime, + &s.VSize, + &s.RSS, + &s.RSSLimit, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreInt64, + &s.Processor, + &s.RTPriority, + &s.Policy, + &s.DelayAcctBlkIOTicks, + &s.GuestTime, + &s.CGuestTime, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() uint { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.proc.Stat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 00000000..a055197c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,238 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "math/bits" + "sort" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/status. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Thread group ID. + TGID int + // List of Pid namespace. + NSpids []uint64 + + // Peak virtual memory size. + VmPeak uint64 // nolint:revive + // Virtual memory size. + VmSize uint64 // nolint:revive + // Locked memory size. + VmLck uint64 // nolint:revive + // Pinned memory size. + VmPin uint64 // nolint:revive + // Peak resident set size. + VmHWM uint64 // nolint:revive + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 // nolint:revive + // Size of resident anonymous memory. + RssAnon uint64 // nolint:revive + // Size of resident file mappings. + RssFile uint64 // nolint:revive + // Size of resident shared memory. + RssShmem uint64 // nolint:revive + // Size of data segments. + VmData uint64 // nolint:revive + // Size of stack segments. + VmStk uint64 // nolint:revive + // Size of text segments. + VmExe uint64 // nolint:revive + // Shared library code size. + VmLib uint64 // nolint:revive + // Page table entries size. + VmPTE uint64 // nolint:revive + // Size of second-level page tables. + VmPMD uint64 // nolint:revive + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 // nolint:revive + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs) + UIDs [4]uint64 + // GIDs of the process (Real, effective, saved set, and filesystem GIDs) + GIDs [4]uint64 + + // CpusAllowedList: List of cpu cores processes are allowed to run on. + CpusAllowedList []uint64 +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + data, err := util.ReadFileNoStat(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + // removes "kB" + v = strings.TrimSuffix(v, " kB") + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + err = s.fillStatus(k, v, vKBytes, vBytes) + if err != nil { + return ProcStatus{}, err + } + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { + switch k { + case "Tgid": + s.TGID = int(vUint) + case "Name": + s.Name = vString + case "Uid": + var err error + for i, v := range strings.Split(vString, "\t") { + s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } + case "Gid": + var err error + for i, v := range strings.Split(vString, "\t") { + s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } + case "NSpid": + s.NSpids = calcNSPidsList(vString) + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + case "Cpus_allowed_list": + s.CpusAllowedList = calcCpusAllowedList(vString) + } + + return nil +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} + +func calcCpusAllowedList(cpuString string) []uint64 { + s := strings.Split(cpuString, ",") + + var g []uint64 + + for _, cpu := range s { + // parse cpu ranges, example: 1-3=[1,2,3] + if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { + startCPU, _ := strconv.ParseUint(l[0], 10, 64) + endCPU, _ := strconv.ParseUint(l[1], 10, 64) + + for i := startCPU; i <= endCPU; i++ { + g = append(g, i) + } + } else if len(l) == 1 { + cpu, _ := strconv.ParseUint(l[0], 10, 64) + g = append(g, cpu) + } + + } + + sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + return g +} + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go new file mode 100644 index 00000000..5eefbe2e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +func sysctlToPath(sysctl string) string { + return strings.Replace(sysctl, ".", "/", -1) +} + +func (fs FS) SysctlStrings(sysctl string) ([]string, error) { + value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) + if err != nil { + return nil, err + } + return strings.Fields(value), nil + +} + +func (fs FS) SysctlInts(sysctl string) ([]int, error) { + fields, err := fs.SysctlStrings(sysctl) + if err != nil { + return nil, err + } + + values := make([]int, len(fields)) + for i, f := range fields { + vp := util.NewValueParser(f) + values[i] = vp.Int() + if err := vp.Err(); err != nil { + return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) + } + } + return values, nil +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 00000000..5f7f32dc --- /dev/null +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,121 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line. +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from `/proc//schedstat`. +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from `/proc/schedstat`. +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (ProcSchedstat, error) { + var ( + stats ProcSchedstat + err error + ) + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return stats, err + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return stats, err + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return stats, err + } + + return stats, errors.New("could not parse schedstat") +} diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go new file mode 100644 index 00000000..8611c901 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -0,0 +1,151 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + slabSpace = regexp.MustCompile(`\s+`) + slabVer = regexp.MustCompile(`slabinfo -`) + slabHeader = regexp.MustCompile(`# name`) +) + +// Slab represents a slab pool in the kernel. +type Slab struct { + Name string + ObjActive int64 + ObjNum int64 + ObjSize int64 + ObjPerSlab int64 + PagesPerSlab int64 + // tunables + Limit int64 + Batch int64 + SharedFactor int64 + SlabActive int64 + SlabNum int64 + SharedAvail int64 +} + +// SlabInfo represents info for all slabs. +type SlabInfo struct { + Slabs []*Slab +} + +func shouldParseSlab(line string) bool { + if slabVer.MatchString(line) { + return false + } + if slabHeader.MatchString(line) { + return false + } + return true +} + +// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. +func parseV21SlabEntry(line string) (*Slab, error) { + // First cleanup whitespace. + l := slabSpace.ReplaceAllString(line, " ") + s := strings.Split(l, " ") + if len(s) != 16 { + return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) + } + var err error + i := &Slab{Name: s[0]} + i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) + if err != nil { + return nil, err + } + i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) + if err != nil { + return nil, err + } + i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) + if err != nil { + return nil, err + } + i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) + if err != nil { + return nil, err + } + i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) + if err != nil { + return nil, err + } + i.Limit, err = strconv.ParseInt(s[8], 10, 64) + if err != nil { + return nil, err + } + i.Batch, err = strconv.ParseInt(s[9], 10, 64) + if err != nil { + return nil, err + } + i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) + if err != nil { + return nil, err + } + i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) + if err != nil { + return nil, err + } + i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) + if err != nil { + return nil, err + } + i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) + if err != nil { + return nil, err + } + return i, nil +} + +// parseSlabInfo21 is used to parse a slabinfo 2.1 file. +func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { + scanner := bufio.NewScanner(r) + s := SlabInfo{Slabs: []*Slab{}} + for scanner.Scan() { + line := scanner.Text() + if !shouldParseSlab(line) { + continue + } + slab, err := parseV21SlabEntry(line) + if err != nil { + return s, err + } + s.Slabs = append(s.Slabs, slab) + } + return s, nil +} + +// SlabInfo reads data from `/proc/slabinfo`. +func (fs FS) SlabInfo() (SlabInfo, error) { + // TODO: Consider passing options to allow for parsing different + // slabinfo versions. However, slabinfo 2.1 has been stable since + // kernel 2.6.10 and later. + data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) + if err != nil { + return SlabInfo{}, err + } + + return parseSlabInfo21(bytes.NewReader(data)) +} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go new file mode 100644 index 00000000..28708e07 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Softirqs represents the softirq statistics. +type Softirqs struct { + Hi []uint64 + Timer []uint64 + NetTx []uint64 + NetRx []uint64 + Block []uint64 + IRQPoll []uint64 + Tasklet []uint64 + Sched []uint64 + HRTimer []uint64 + RCU []uint64 +} + +func (fs FS) Softirqs() (Softirqs, error) { + fileName := fs.proc.Path("softirqs") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Softirqs{}, err + } + + reader := bytes.NewReader(data) + + return parseSoftirqs(reader) +} + +func parseSoftirqs(r io.Reader) (Softirqs, error) { + var ( + softirqs = Softirqs{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) + } + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + var err error + + // require at least one cpu + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "HI:": + perCPU := parts[1:] + softirqs.Hi = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "TIMER:": + perCPU := parts[1:] + softirqs.Timer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "NET_TX:": + perCPU := parts[1:] + softirqs.NetTx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "NET_RX:": + perCPU := parts[1:] + softirqs.NetRx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "BLOCK:": + perCPU := parts[1:] + softirqs.Block = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "IRQ_POLL:": + perCPU := parts[1:] + softirqs.IRQPoll = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "TASKLET:": + perCPU := parts[1:] + softirqs.Tasklet = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "SCHED:": + perCPU := parts[1:] + softirqs.Sched = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "HRTIMER:": + perCPU := parts[1:] + softirqs.HRTimer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "RCU:": + perCPU := parts[1:] + softirqs.RCU = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) + } + } + } + } + + if err := scanner.Err(); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) + } + + return softirqs, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 00000000..e36b41c1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,258 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading `/proc/softirqs`. +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU map[int64]CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: Use fs.Stat() instead. +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: Use fs.Stat() instead. +func (fs FS) NewStat() (Stat, error) { + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Stat{}, err + } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} + +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) + + // Increase default scanner buffer to handle very long `intr` lines. + buf := make([]byte, 0, 8*1024) + scanner.Buffer(buf, 1024*1024) + + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 00000000..65fec834 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) + } + + return swap, nil +} diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 00000000..80e0e947 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,80 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} + if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar new file mode 100644 index 00000000..19ef02b8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ttar @@ -0,0 +1,413 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C ] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + --recursive-unlink (recursively delete existing directory if path + collides with file or directory to extract) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE +unset RECURSIVE_UNLINK + +while getopts :cf:-:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + -) + case $OPTARG in + recursive-unlink) + RECURSIVE_UNLINK="yes" + ;; + *) + echo -e "Error: invalid option -$OPTARG" + echo + usage 1 + ;; + esac + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -L "$path" ]; then + rm "$path" + elif [ -d "$path" ]; then + if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then + rm -r "$path" + else + # Safe because symlinks to directories are dealt with above + rmdir "$path" + fi + elif [ -e "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 00000000..51c49d89 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,212 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string. +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) + } + + files, err := os.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 00000000..e54d94b0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "os" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := os.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c01f238c..1040e49e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -144,7 +144,7 @@ github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/docker/model-distribution v0.0.0-20250710123110-a633223e127e +# github.com/docker/model-distribution v0.0.0-20250724114133-a11d745e582c ## explicit; go 1.23.0 github.com/docker/model-distribution/builder github.com/docker/model-distribution/distribution @@ -154,11 +154,13 @@ github.com/docker/model-distribution/internal/partial github.com/docker/model-distribution/internal/progress github.com/docker/model-distribution/internal/store github.com/docker/model-distribution/registry +github.com/docker/model-distribution/tarball github.com/docker/model-distribution/types -# github.com/docker/model-runner v0.0.0-20250711130825-8907b3ddf82e +# github.com/docker/model-runner v0.0.0-20250724122432-ecfa5e7e6807 ## explicit; go 1.23.7 github.com/docker/model-runner/pkg/diskusage github.com/docker/model-runner/pkg/environment +github.com/docker/model-runner/pkg/gpuinfo github.com/docker/model-runner/pkg/inference github.com/docker/model-runner/pkg/inference/backends/llamacpp github.com/docker/model-runner/pkg/inference/config @@ -170,6 +172,19 @@ github.com/docker/model-runner/pkg/internal/jsonutil github.com/docker/model-runner/pkg/logging github.com/docker/model-runner/pkg/metrics github.com/docker/model-runner/pkg/tailbuffer +# github.com/elastic/go-sysinfo v1.15.3 +## explicit; go 1.21 +github.com/elastic/go-sysinfo +github.com/elastic/go-sysinfo/internal/registry +github.com/elastic/go-sysinfo/providers/aix +github.com/elastic/go-sysinfo/providers/darwin +github.com/elastic/go-sysinfo/providers/linux +github.com/elastic/go-sysinfo/providers/shared +github.com/elastic/go-sysinfo/providers/windows +github.com/elastic/go-sysinfo/types +# github.com/elastic/go-windows v1.0.2 +## explicit; go 1.18 +github.com/elastic/go-windows # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -359,6 +374,11 @@ github.com/prometheus/client_model/go ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model +# github.com/prometheus/procfs v0.15.1 +## explicit; go 1.20 +github.com/prometheus/procfs +github.com/prometheus/procfs/internal/fs +github.com/prometheus/procfs/internal/util # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg