diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a2c0d31..da063137 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). the `manifests` entry to `null` (which was technically a violation of the specification, though such images cannot be pushed or interacted with outside of umoci). +* Based on [some recent developments in the image-spec][image-spec#1285], umoci + will now produce an error if it encounters descriptors with a negative size + (this was a potential DoS vector previously) as well as a theoretical attack + where an attacker would endlessly write to a blob (this would not be + generally exploitable for images with descriptors). ### Changed ### * We now use `go:embed` to fill the version information of `umoci --version`, @@ -33,6 +38,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). (and after that, we may choose to keep the `jq`-based validators as a double-check that our own validators are working correctly). +[image-spec#1285]: https://github.com/opencontainers/image-spec/pull/1285 [docker-library/meta-scripts]: https://github.com/docker-library/meta-scripts ## [0.5.0] - 2025-05-21 ## diff --git a/oci/cas/dir/dir.go b/oci/cas/dir/dir.go index 9217af9e..8ea391e4 100644 --- a/oci/cas/dir/dir.go +++ b/oci/cas/dir/dir.go @@ -244,10 +244,17 @@ func (e *dirEngine) GetBlob(_ context.Context, digest digest.Digest) (io.ReadClo if err != nil { return nil, fmt.Errorf("open blob: %w", err) } + st, err := fh.Stat() + if err != nil { + return nil, fmt.Errorf("stat blob: %w", err) + } return &hardening.VerifiedReadCloser{ Reader: fh, ExpectedDigest: digest, - ExpectedSize: int64(-1), // We don't know the expected size. + // Assume the file size is the blob size. This is almost certainly true + // in general, and if an attacker is modifying the blobs underneath us + // then snapshotting the size makes sure we don't read endlessly. + ExpectedSize: st.Size(), }, nil } diff --git a/oci/casext/verified_blob.go b/oci/casext/verified_blob.go index 8eb0c2dd..13d5a44f 100644 --- a/oci/casext/verified_blob.go +++ b/oci/casext/verified_blob.go @@ -20,6 +20,8 @@ package casext import ( "context" + "errors" + "fmt" "io" ispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -27,16 +29,25 @@ import ( "github.com/opencontainers/umoci/pkg/hardening" ) +var errInvalidDescriptorSize = errors.New("descriptor size must not be negative") + // GetVerifiedBlob returns a VerifiedReadCloser for retrieving a blob from the // image, which the caller must Close() *and* read-to-EOF (checking the error // code of both). Returns ErrNotExist if the digest is not found, and // ErrBlobDigestMismatch on a mismatched blob digest. In addition, the reader // is limited to the descriptor.Size. func (e Engine) GetVerifiedBlob(ctx context.Context, descriptor ispec.Descriptor) (io.ReadCloser, error) { + // Negative sizes are not permitted by the spec, and are a DoS vector. + if descriptor.Size < 0 { + return nil, fmt.Errorf("invalid descriptor: %w", errInvalidDescriptorSize) + } reader, err := e.GetBlob(ctx, descriptor.Digest) + if err != nil { + return nil, err + } return &hardening.VerifiedReadCloser{ Reader: reader, ExpectedDigest: descriptor.Digest, ExpectedSize: descriptor.Size, - }, err + }, nil } diff --git a/oci/casext/verified_blob_test.go b/oci/casext/verified_blob_test.go new file mode 100644 index 00000000..e3669164 --- /dev/null +++ b/oci/casext/verified_blob_test.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: Apache-2.0 +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016-2025 SUSE LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/opencontainers/umoci/oci/cas/dir" +) + +func TestGetVerifiedBlob(t *testing.T) { + image := filepath.Join(t.TempDir(), "image") + err := dir.Create(image) + require.NoError(t, err) + + engine, err := dir.Open(image) + require.NoError(t, err) + engineExt := NewEngine(engine) + defer engine.Close() //nolint:errcheck + + descMap := fakeSetupEngine(t, engineExt) + assert.NotEmpty(t, descMap, "fakeSetupEngine descriptor map") + + t.Run("NonExist", func(t *testing.T) { + for idx, test := range descMap { + test := test // copy iterator + t.Run(fmt.Sprintf("Descriptor%.2d", idx+1), func(t *testing.T) { + ctx := context.Background() + + const badDigest = digest.Digest("sha256:000111222333444555666777888999aaabbbcccdddeeefff0123456789abcdef") + desc := test.result + + badDescriptor := ispec.Descriptor{ + MediaType: desc.MediaType, + Digest: badDigest, + Size: desc.Size, + } + + blob, err := engineExt.GetVerifiedBlob(ctx, badDescriptor) + assert.ErrorIs(t, err, os.ErrNotExist, "get non-existent verified blob (negative descriptor size)") //nolint:testifylint // assert.*Error* makes more sense + if !assert.Nil(t, blob, "get verified blob (negative descriptor size)") { + _ = blob.Close() + } + }) + } + }) + + t.Run("InvalidSize", func(t *testing.T) { + for idx, test := range descMap { + test := test // copy iterator + t.Run(fmt.Sprintf("Descriptor%.2d", idx+1), func(t *testing.T) { + ctx := context.Background() + desc := test.result + + blob, err := engineExt.GetVerifiedBlob(ctx, desc) + assert.NoError(t, err, "get verified blob (regular descriptor)") //nolint:testifylint // assert.*Error* makes more sense + if assert.NotNil(t, blob, "get verified blob (regular descriptor)") { + // Avoid "trailing data" log warnings on Close. + _, _ = io.Copy(io.Discard, blob) + _ = blob.Close() + } + + badDescriptor := ispec.Descriptor{ + MediaType: desc.MediaType, + Digest: desc.Digest, + Size: -1, // invalid! + } + + blob, err = engineExt.GetVerifiedBlob(ctx, badDescriptor) + assert.ErrorIs(t, err, errInvalidDescriptorSize, "get verified blob (negative descriptor size)") //nolint:testifylint // assert.*Error* makes more sense + if !assert.Nil(t, blob, "get verified blob (negative descriptor size)") { + _ = blob.Close() + } + }) + } + }) +} diff --git a/pkg/hardening/verified_reader.go b/pkg/hardening/verified_reader.go index ea801c88..4a179b40 100644 --- a/pkg/hardening/verified_reader.go +++ b/pkg/hardening/verified_reader.go @@ -33,8 +33,9 @@ import ( // Exported errors for verification issues that occur during processing within // VerifiedReadCloser. var ( - ErrDigestMismatch = errors.New("verified reader digest mismatch") - ErrSizeMismatch = errors.New("verified reader size mismatch") + ErrDigestMismatch = errors.New("verified reader digest mismatch") + ErrSizeMismatch = errors.New("verified reader size mismatch") + ErrInvalidExpectedSize = errors.New("verified reader has invalid expected size") ) // VerifiedReadCloser is a basic io.ReadCloser which allows for simple @@ -87,21 +88,25 @@ func (v *VerifiedReadCloser) isNoop() bool { innerV.ExpectedSize == v.ExpectedSize } +func (v *VerifiedReadCloser) check() error { + if v.ExpectedSize < 0 { + return fmt.Errorf("%w: expected size must be non-negative", ErrInvalidExpectedSize) + } + return nil +} + func (v *VerifiedReadCloser) verify(nilErr error) error { // Digest mismatch (always takes precedence)? if actualDigest := v.digester.Digest(); actualDigest != v.ExpectedDigest { return fmt.Errorf("expected %s not %s: %w", v.ExpectedDigest, actualDigest, ErrDigestMismatch) } - // Do we need to check the size for mismatches? - if v.ExpectedSize >= 0 { - switch { - // Not enough bytes in the stream. - case v.currentSize < v.ExpectedSize: - return fmt.Errorf("expected %d bytes (only %d bytes in stream): %w", v.ExpectedSize, v.currentSize, ErrSizeMismatch) - // We don't read the entire blob, so the message needs to be slightly adjusted. - case v.currentSize > v.ExpectedSize: - return fmt.Errorf("expected %d bytes (extra bytes in stream): %w", v.ExpectedSize, ErrSizeMismatch) - } + switch { + // Not enough bytes in the stream. + case v.currentSize < v.ExpectedSize: + return fmt.Errorf("expected %d bytes (only %d bytes in stream): %w", v.ExpectedSize, v.currentSize, ErrSizeMismatch) + // We don't read the entire blob, so the message needs to be slightly adjusted. + case v.currentSize > v.ExpectedSize: + return fmt.Errorf("expected %d bytes (extra bytes in stream): %w", v.ExpectedSize, ErrSizeMismatch) } // Forward the provided error. return nilErr @@ -111,14 +116,13 @@ func (v *VerifiedReadCloser) verify(nilErr error) error { // EOF. Make sure that you always check for EOF and read-to-the-end for all // files. func (v *VerifiedReadCloser) Read(p []byte) (n int, err error) { + if err := v.check(); err != nil { + return 0, err + } // Make sure we don't read after v.ExpectedSize has been passed. err = io.EOF left := v.ExpectedSize - v.currentSize switch { - // ExpectedSize has been disabled. - case v.ExpectedSize < 0: - n, err = v.Reader.Read(p) - // We still have something left to read. case left > 0: if int64(len(p)) > left { @@ -180,6 +184,9 @@ func (v *VerifiedReadCloser) sourceName() string { // Close is a wrapper around VerifiedReadCloser.Reader, but with a digest check // which will return an error if the underlying Close() didn't. func (v *VerifiedReadCloser) Close() error { + if err := v.check(); err != nil { + return err + } // Consume any remaining bytes to make sure that we've actually read to the // end of the stream. VerifiedReadCloser.Read will not read past // ExpectedSize+1, so we don't need to add a limit here. diff --git a/pkg/hardening/verified_reader_test.go b/pkg/hardening/verified_reader_test.go index d295b6e4..52fb1440 100644 --- a/pkg/hardening/verified_reader_test.go +++ b/pkg/hardening/verified_reader_test.go @@ -60,7 +60,7 @@ func TestValid(t *testing.T) { } } -func TestValidIgnoreLength(t *testing.T) { +func TestNegativeExpectedSize(t *testing.T) { for size := 1; size <= 16384; size *= 2 { t.Run(fmt.Sprintf("size:%d", size), func(t *testing.T) { // Fill buffer with random data. @@ -76,13 +76,20 @@ func TestValidIgnoreLength(t *testing.T) { ExpectedSize: -1, } - // Make sure if we copy-to-EOF we get no errors. - _, err = io.Copy(io.Discard, verifiedReader) - require.NoError(t, err, "digest (size ignored) should be correct on EOF") + // Make sure that negative ExpectedSize always fails. + n, err := io.Copy(io.Discard, verifiedReader) + assert.ErrorIs(t, err, ErrInvalidExpectedSize, "io.Copy (with ExpectedSize < 0) should fail") //nolint:testifylint // assert.*Error* makes more sense + assert.Zero(t, n, "io.Copy (with ExpectedSize < 0) should read nothing") + + // Bad ExpectedSize should read no data. + assert.Equal(t, size, buffer.Len(), "io.Copy (with ExpectedSize < 0) should not have read any data") // And on close we shouldn't get an error either. err = verifiedReader.Close() - require.NoError(t, err, "digest (size ignored) should be correct on Close") + assert.ErrorIs(t, err, ErrInvalidExpectedSize, "Close (with ExpectedSize < 0) should fail") //nolint:testifylint // assert.*Error* makes more sense + + // Bad ExpectedSize should read no data. + assert.Equal(t, size, buffer.Len(), "close (with ExpectedSize < 0) should not have read any data") }) } } @@ -100,7 +107,7 @@ func TestValidTrailing(t *testing.T) { verifiedReader := &VerifiedReadCloser{ Reader: io.NopCloser(buffer), ExpectedDigest: expectedDigest, - ExpectedSize: -1, + ExpectedSize: int64(size), } // Read *half* of the bytes, leaving some remaining. We should get @@ -111,7 +118,7 @@ func TestValidTrailing(t *testing.T) { // On close we shouldn't get an error, even though there are // trailing bytes still in the buffer. err = verifiedReader.Close() - require.NoError(t, err, "digest (size ignored) should be correct on Close") + require.NoError(t, err, "digest should be correct on Close") }) } } @@ -147,44 +154,6 @@ func TestInvalidDigest(t *testing.T) { } } -func TestInvalidDigest_Trailing_NoExpectedSize(t *testing.T) { - for size := 1; size <= 16384; size *= 2 { - for delta := 1; delta-1 <= size/2; delta *= 2 { - t.Run(fmt.Sprintf("size:%d_delta:%d", size, delta), func(t *testing.T) { - // Fill buffer with random data. - buffer := new(bytes.Buffer) - _, err := io.CopyN(buffer, rand.Reader, int64(size)) - require.NoError(t, err, "fill buffer with random data") - - // Generate a correct hash (for a shorter buffer), but don't - // verify the size -- this is to make sure that we actually - // read all the bytes. - shortBuffer := buffer.Bytes()[:size-delta] - expectedDigest := digest.SHA256.FromBytes(shortBuffer) - verifiedReader := &VerifiedReadCloser{ - Reader: io.NopCloser(buffer), - ExpectedDigest: expectedDigest, - ExpectedSize: -1, - } - - // Read up to the end of the short buffer. We should get no - // errors. - _, err = io.CopyN(io.Discard, verifiedReader, int64(size-delta)) - require.NoErrorf(t, err, "should get no errors when reading %d (%d-%d) bytes", size-delta, size, delta) - - // Check that the digest does actually match right now. - verifiedReader.init() - err = verifiedReader.verify(nil) - require.NoError(t, err, "digest check should succeed at the point we finish the subset") - - // On close we should get the error. - err = verifiedReader.Close() - assert.ErrorIs(t, err, ErrDigestMismatch, "digest should be invalid on Close") //nolint:testifylint // assert.*Error* makes more sense - }) - } - } -} - func TestInvalidSize_LongBuffer(t *testing.T) { for size := 1; size <= 16384; size *= 2 { for delta := 1; delta-1 <= size/2; delta *= 2 {