diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 27c50181..43649112 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -32,12 +32,12 @@ jobs: run: go test ./... - name: Test Noasm - run: go test -tags=noasm&&go test -no-avx512&&go test -no-avx512 -no-avx2&&go test -no-avx512 -no-avx2 -no-ssse3 + run: go test -tags=noasm -short&&go test -short -no-avx512&&go test -short -no-avx512 -no-avx2&&go test -no-avx512 -no-avx2 -no-ssse3 -short - name: Test Race env: CGO_ENABLED: 1 - run: go test -cpu="1,4" -short -race -v . + run: go test -cpu="1,4" -short -race -v -timeout 20m . build-special: env: diff --git a/galoisAvx512_amd64_test.go b/galoisAvx512_amd64_test.go index 6792e984..6b234c4f 100644 --- a/galoisAvx512_amd64_test.go +++ b/galoisAvx512_amd64_test.go @@ -326,14 +326,14 @@ func testCodeSomeShardsAvx512WithLength(t *testing.T, ds, ps, l int, parallel bo shards, _ := enc.Split(data) // Fill shards to encode with garbage - for i := r.DataShards; i < r.DataShards+r.ParityShards; i++ { + for i := r.dataShards; i < r.dataShards+r.parityShards; i++ { rand.Read(shards[i]) } if parallel { - r.codeSomeShardsAvx512P(r.parity, shards[:r.DataShards], shards[r.DataShards:], len(shards[0])) + r.codeSomeShardsAvx512P(r.parity, shards[:r.dataShards], shards[r.dataShards:], len(shards[0])) } else { - r.codeSomeShardsAvx512(r.parity, shards[:r.DataShards], shards[r.DataShards:r.DataShards+r.ParityShards], len(shards[0])) + r.codeSomeShardsAvx512(r.parity, shards[:r.dataShards], shards[r.dataShards:r.dataShards+r.parityShards], len(shards[0])) } correct, _ := r.Verify(shards) diff --git a/leopard.go b/leopard.go index 9051b08a..fee93d8e 100644 --- a/leopard.go +++ b/leopard.go @@ -12,7 +12,6 @@ package reedsolomon import ( "bytes" - "errors" "io" "math/bits" "sync" @@ -21,11 +20,11 @@ import ( "github.com/klauspost/cpuid/v2" ) -// reedSolomonFF16 is like reedSolomon but for more than 256 total shards. -type reedSolomonFF16 struct { - DataShards int // Number of data shards, should not be modified. - ParityShards int // Number of parity shards, should not be modified. - Shards int // Total number of shards. Calculated, and should not be modified. +// leopardFF16 is like reedSolomon but for more than 256 total shards. +type leopardFF16 struct { + dataShards int // Number of data shards, should not be modified. + parityShards int // Number of parity shards, should not be modified. + totalShards int // Total number of shards. Calculated, and should not be modified. workPool sync.Pool @@ -33,10 +32,10 @@ type reedSolomonFF16 struct { } // newFF16 is like New, but for more than 256 total shards. -func newFF16(dataShards, parityShards int, opt options) (*reedSolomonFF16, error) { +func newFF16(dataShards, parityShards int, opt options) (*leopardFF16, error) { initConstants() - if dataShards <= 0 || parityShards < 0 { + if dataShards <= 0 || parityShards <= 0 { return nil, ErrInvShardNum } @@ -44,15 +43,33 @@ func newFF16(dataShards, parityShards int, opt options) (*reedSolomonFF16, error return nil, ErrMaxShardNum } - r := &reedSolomonFF16{ - DataShards: dataShards, - ParityShards: parityShards, - Shards: dataShards + parityShards, + r := &leopardFF16{ + dataShards: dataShards, + parityShards: parityShards, + totalShards: dataShards + parityShards, o: opt, } return r, nil } +var _ = Extensions(&leopardFF16{}) + +func (r *leopardFF16) ShardSizeMultiple() int { + return 64 +} + +func (r *leopardFF16) DataShards() int { + return r.dataShards +} + +func (r *leopardFF16) ParityShards() int { + return r.parityShards +} + +func (r *leopardFF16) TotalShards() int { + return r.parityShards +} + type ffe uint16 const ( @@ -87,8 +104,8 @@ type mul16LUT struct { // Stores lookup for avx2 var multiply256LUT *[order][8 * 16]byte -func (r *reedSolomonFF16) Encode(shards [][]byte) error { - if len(shards) != r.Shards { +func (r *leopardFF16) Encode(shards [][]byte) error { + if len(shards) != r.totalShards { return ErrTooFewShards } @@ -98,13 +115,13 @@ func (r *reedSolomonFF16) Encode(shards [][]byte) error { return r.encode(shards) } -func (r *reedSolomonFF16) encode(shards [][]byte) error { - shardSize := len(shards[0]) +func (r *leopardFF16) encode(shards [][]byte) error { + shardSize := shardSize(shards) if shardSize%64 != 0 { return ErrShardSize } - m := ceilPow2(r.ParityShards) + m := ceilPow2(r.parityShards) var work [][]byte if w, ok := r.workPool.Get().([][]byte); ok { work = w @@ -124,15 +141,15 @@ func (r *reedSolomonFF16) encode(shards [][]byte) error { defer r.workPool.Put(work) mtrunc := m - if r.DataShards < mtrunc { - mtrunc = r.DataShards + if r.dataShards < mtrunc { + mtrunc = r.dataShards } skewLUT := fftSkew[m-1:] sh := shards ifftDITEncoder( - sh[:r.DataShards], + sh[:r.dataShards], mtrunc, work, nil, // No xor output @@ -141,13 +158,13 @@ func (r *reedSolomonFF16) encode(shards [][]byte) error { &r.o, ) - lastCount := r.DataShards % m - if m >= r.DataShards { + lastCount := r.dataShards % m + if m >= r.dataShards { goto skip_body } // For sets of m data pieces: - for i := m; i+m <= r.DataShards; i += m { + for i := m; i+m <= r.dataShards; i += m { sh = sh[m:] skewLUT = skewLUT[m:] @@ -184,31 +201,31 @@ func (r *reedSolomonFF16) encode(shards [][]byte) error { skip_body: // work <- FFT(work, m, 0) - fftDIT(work, r.ParityShards, m, fftSkew[:], &r.o) + fftDIT(work, r.parityShards, m, fftSkew[:], &r.o) - for i, w := range work[:r.ParityShards] { - sh := shards[i+r.DataShards] + for i, w := range work[:r.parityShards] { + sh := shards[i+r.dataShards] if cap(sh) >= shardSize { sh = append(sh[:0], w...) } else { sh = w } - shards[i+r.DataShards] = sh + shards[i+r.dataShards] = sh } return nil } -func (r *reedSolomonFF16) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error { - return errors.New("not implemented") +func (r *leopardFF16) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error { + return ErrNotSupported } -func (r *reedSolomonFF16) Join(dst io.Writer, shards [][]byte, outSize int) error { +func (r *leopardFF16) Join(dst io.Writer, shards [][]byte, outSize int) error { // Do we have enough shards? - if len(shards) < r.DataShards { + if len(shards) < r.dataShards { return ErrTooFewShards } - shards = shards[:r.DataShards] + shards = shards[:r.dataShards] // Do we have enough data? size := 0 @@ -243,17 +260,17 @@ func (r *reedSolomonFF16) Join(dst io.Writer, shards [][]byte, outSize int) erro return nil } -func (r *reedSolomonFF16) Update(shards [][]byte, newDatashards [][]byte) error { - return errors.New("not implemented") +func (r *leopardFF16) Update(shards [][]byte, newDatashards [][]byte) error { + return ErrNotSupported } -func (r *reedSolomonFF16) Split(data []byte) ([][]byte, error) { +func (r *leopardFF16) Split(data []byte) ([][]byte, error) { if len(data) == 0 { return nil, ErrShortData } dataLen := len(data) // Calculate number of bytes per data shard. - perShard := (len(data) + r.DataShards - 1) / r.DataShards + perShard := (len(data) + r.dataShards - 1) / r.dataShards perShard = ((perShard + 63) / 64) * 64 if cap(data) > len(data) { @@ -262,20 +279,20 @@ func (r *reedSolomonFF16) Split(data []byte) ([][]byte, error) { // Only allocate memory if necessary var padding []byte - if len(data) < (r.Shards * perShard) { + if len(data) < (r.totalShards * perShard) { // calculate maximum number of full shards in `data` slice fullShards := len(data) / perShard - padding = make([]byte, r.Shards*perShard-perShard*fullShards) + padding = make([]byte, r.totalShards*perShard-perShard*fullShards) copy(padding, data[perShard*fullShards:]) data = data[0 : perShard*fullShards] } else { - for i := dataLen; i < dataLen+r.DataShards; i++ { + for i := dataLen; i < dataLen+r.dataShards; i++ { data[i] = 0 } } // Split into equal-length shards. - dst := make([][]byte, r.Shards) + dst := make([][]byte, r.totalShards) i := 0 for ; i < len(dst) && len(data) >= perShard; i++ { dst[i] = data[:perShard:perShard] @@ -290,20 +307,20 @@ func (r *reedSolomonFF16) Split(data []byte) ([][]byte, error) { return dst, nil } -func (r *reedSolomonFF16) ReconstructSome(shards [][]byte, required []bool) error { +func (r *leopardFF16) ReconstructSome(shards [][]byte, required []bool) error { return r.ReconstructData(shards) } -func (r *reedSolomonFF16) Reconstruct(shards [][]byte) error { +func (r *leopardFF16) Reconstruct(shards [][]byte) error { return r.reconstruct(shards, true) } -func (r *reedSolomonFF16) ReconstructData(shards [][]byte) error { +func (r *leopardFF16) ReconstructData(shards [][]byte) error { return r.reconstruct(shards, false) } -func (r *reedSolomonFF16) Verify(shards [][]byte) (bool, error) { - if len(shards) != r.Shards { +func (r *leopardFF16) Verify(shards [][]byte) (bool, error) { + if len(shards) != r.totalShards { return false, ErrTooFewShards } if err := checkShards(shards, false); err != nil { @@ -312,9 +329,9 @@ func (r *reedSolomonFF16) Verify(shards [][]byte) (bool, error) { // Re-encode parity shards to temporary storage. shardSize := len(shards[0]) - outputs := make([][]byte, r.Shards) - copy(outputs, shards[:r.DataShards]) - for i := r.DataShards; i < r.Shards; i++ { + outputs := make([][]byte, r.totalShards) + copy(outputs, shards[:r.dataShards]) + for i := r.dataShards; i < r.totalShards; i++ { outputs[i] = make([]byte, shardSize) } if err := r.Encode(outputs); err != nil { @@ -322,7 +339,7 @@ func (r *reedSolomonFF16) Verify(shards [][]byte) (bool, error) { } // Compare. - for i := r.DataShards; i < r.Shards; i++ { + for i := r.dataShards; i < r.totalShards; i++ { if !bytes.Equal(outputs[i], shards[i]) { return false, nil } @@ -330,8 +347,8 @@ func (r *reedSolomonFF16) Verify(shards [][]byte) (bool, error) { return true, nil } -func (r *reedSolomonFF16) reconstruct(shards [][]byte, recoverAll bool) error { - if len(shards) != r.Shards { +func (r *leopardFF16) reconstruct(shards [][]byte, recoverAll bool) error { + if len(shards) != r.totalShards { return ErrTooFewShards } @@ -343,22 +360,22 @@ func (r *reedSolomonFF16) reconstruct(shards [][]byte, recoverAll bool) error { // nothing to do. numberPresent := 0 dataPresent := 0 - for i := 0; i < r.Shards; i++ { + for i := 0; i < r.totalShards; i++ { if len(shards[i]) != 0 { numberPresent++ - if i < r.DataShards { + if i < r.dataShards { dataPresent++ } } } - if numberPresent == r.Shards || !recoverAll && dataPresent == r.DataShards { + if numberPresent == r.totalShards || !recoverAll && dataPresent == r.dataShards { // Cool. All of the shards have data. We don't // need to do anything. return nil } // Check if we have enough to reconstruct. - if numberPresent < r.DataShards { + if numberPresent < r.dataShards { return ErrTooFewShards } @@ -367,27 +384,27 @@ func (r *reedSolomonFF16) reconstruct(shards [][]byte, recoverAll bool) error { return ErrShardSize } - m := ceilPow2(r.ParityShards) - n := ceilPow2(m + r.DataShards) + m := ceilPow2(r.parityShards) + n := ceilPow2(m + r.dataShards) // Fill in error locations. var errLocs [order]ffe - for i := 0; i < r.ParityShards; i++ { - if len(shards[i+r.DataShards]) == 0 { + for i := 0; i < r.parityShards; i++ { + if len(shards[i+r.dataShards]) == 0 { errLocs[i] = 1 } } - for i := r.ParityShards; i < m; i++ { + for i := r.parityShards; i < m; i++ { errLocs[i] = 1 } - for i := 0; i < r.DataShards; i++ { + for i := 0; i < r.dataShards; i++ { if len(shards[i]) == 0 { errLocs[i+m] = 1 } } // Evaluate error locator polynomial - fwht(&errLocs, order, m+r.DataShards) + fwht(&errLocs, order, m+r.dataShards) for i := 0; i < order; i++ { errLocs[i] = ffe((uint(errLocs[i]) * uint(logWalsh[i])) % modulus) @@ -415,34 +432,34 @@ func (r *reedSolomonFF16) reconstruct(shards [][]byte, recoverAll bool) error { // work <- recovery data - for i := 0; i < r.ParityShards; i++ { - if len(shards[i+r.DataShards]) != 0 { - mulgf16(work[i], shards[i+r.DataShards], errLocs[i], &r.o) + for i := 0; i < r.parityShards; i++ { + if len(shards[i+r.dataShards]) != 0 { + mulgf16(work[i], shards[i+r.dataShards], errLocs[i], &r.o) } else { memclr(work[i]) } } - for i := r.ParityShards; i < m; i++ { + for i := r.parityShards; i < m; i++ { memclr(work[i]) } // work <- original data - for i := 0; i < r.DataShards; i++ { + for i := 0; i < r.dataShards; i++ { if len(shards[i]) != 0 { mulgf16(work[m+i], shards[i], errLocs[m+i], &r.o) } else { memclr(work[m+i]) } } - for i := m + r.DataShards; i < n; i++ { + for i := m + r.dataShards; i < n; i++ { memclr(work[i]) } // work <- IFFT(work, n, 0) ifftDITDecoder( - m+r.DataShards, + m+r.dataShards, work, n, fftSkew[:], @@ -458,7 +475,7 @@ func (r *reedSolomonFF16) reconstruct(shards [][]byte, recoverAll bool) error { // work <- FFT(work, n, 0) truncated to m + dataShards - outputCount := m + r.DataShards + outputCount := m + r.dataShards fftDIT(work, outputCount, n, fftSkew[:], &r.o) @@ -468,9 +485,9 @@ func (r *reedSolomonFF16) reconstruct(shards [][]byte, recoverAll bool) error { // mul_mem(x, y, log_m, ) equals x[] = y[] * log_m // // mem layout: [Recovery Data (Power of Two = M)] [Original Data (K)] [Zero Padding out to N] - end := r.DataShards + end := r.dataShards if recoverAll { - end = r.Shards + end = r.totalShards } for i := 0; i < end; i++ { if len(shards[i]) != 0 { @@ -481,9 +498,9 @@ func (r *reedSolomonFF16) reconstruct(shards [][]byte, recoverAll bool) error { } else { shards[i] = make([]byte, shardSize) } - if i >= r.DataShards { + if i >= r.dataShards { // Parity shard. - mulgf16(shards[i], work[i-r.DataShards], modulus-errLocs[i-r.DataShards], &r.o) + mulgf16(shards[i], work[i-r.dataShards], modulus-errLocs[i-r.dataShards], &r.o) } else { // Data shard. mulgf16(shards[i], work[i+m], modulus-errLocs[i+m], &r.o) diff --git a/leopard_test.go b/leopard_test.go index bbe6346b..8c3fceef 100644 --- a/leopard_test.go +++ b/leopard_test.go @@ -2,7 +2,6 @@ package reedsolomon import ( "bytes" - "math/rand" "testing" ) @@ -126,7 +125,6 @@ func TestEncoderReconstructFailLeo(t *testing.T) { func TestSplitJoinLeo(t *testing.T) { var data = make([]byte, (250<<10)-1) - rand.Seed(0) fillRandom(data) enc, _ := New(500, 300, testOptions()...) diff --git a/options.go b/options.go index 220d3394..83dd2cd3 100644 --- a/options.go +++ b/options.go @@ -222,9 +222,12 @@ func WithCustomMatrix(customMatrix [][]byte) Option { } } -// WithLeopard will always use leopard for encoding. +// WithLeopardGF16 will always use leopard GF16 for encoding, +// even when there is less than 256 shards. +// This will likely improve reconstruction time for some setups. +// This is not compatible with Leopard output for <= 256 shards. // Note that Leopard places certain restrictions on use see other documentation. -func WithLeopard(enabled bool) Option { +func WithLeopardGF16(enabled bool) Option { return func(o *options) { o.withLeopard = &enabled } diff --git a/race_none_test.go b/race_none_test.go new file mode 100644 index 00000000..3c0d24ba --- /dev/null +++ b/race_none_test.go @@ -0,0 +1,8 @@ +// Copyright 2022, Klaus Post, see LICENSE for details. + +//go:build !race +// +build !race + +package reedsolomon + +const raceEnabled = false diff --git a/race_test.go b/race_test.go new file mode 100644 index 00000000..417a0e55 --- /dev/null +++ b/race_test.go @@ -0,0 +1,8 @@ +// Copyright 2022, Klaus Post, see LICENSE for details. + +//go:build race +// +build race + +package reedsolomon + +const raceEnabled = true diff --git a/reedsolomon.go b/reedsolomon.go index ec5d0c9c..a2e58866 100644 --- a/reedsolomon.go +++ b/reedsolomon.go @@ -125,6 +125,22 @@ type Encoder interface { Join(dst io.Writer, shards [][]byte, outSize int) error } +// Extensions is an optional interface. +// All returned instances will support this interface. +type Extensions interface { + // ShardSizeMultiple will return the size the shard sizes must be a multiple of. + ShardSizeMultiple() int + + // DataShards will return the number of data shards. + DataShards() int + + // ParityShards will return the number of parity shards. + ParityShards() int + + // TotalShards will return the total number of shards. + TotalShards() int +} + const ( avx2CodeGenMinSize = 64 avx2CodeGenMinShards = 3 @@ -138,9 +154,9 @@ const ( // distribution of datashards and parity shards. // Construct if using New() type reedSolomon struct { - DataShards int // Number of data shards, should not be modified. - ParityShards int // Number of parity shards, should not be modified. - Shards int // Total number of shards. Calculated, and should not be modified. + dataShards int // Number of data shards, should not be modified. + parityShards int // Number of parity shards, should not be modified. + totalShards int // Total number of shards. Calculated, and should not be modified. m matrix tree *inversionTree parity [][]byte @@ -148,6 +164,24 @@ type reedSolomon struct { mPool sync.Pool } +var _ = Extensions(&reedSolomon{}) + +func (r *reedSolomon) ShardSizeMultiple() int { + return 1 +} + +func (r *reedSolomon) DataShards() int { + return r.dataShards +} + +func (r *reedSolomon) ParityShards() int { + return r.parityShards +} + +func (r *reedSolomon) TotalShards() int { + return r.parityShards +} + // ErrInvShardNum will be returned by New, if you attempt to create // an Encoder with less than one data shard or less than zero parity // shards. @@ -158,6 +192,9 @@ var ErrInvShardNum = errors.New("cannot create Encoder with less than one data s // GF(2^8). var ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards") +// ErrNotSupported is returned when an operation is not supported. +var ErrNotSupported = errors.New("operation not supported") + // buildMatrix creates the matrix to use for encoding, given the // number of data shards and the number of total shards. // @@ -364,7 +401,7 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) { } if (dataShards+parityShards > 256 && o.withLeopard == nil) || - (o.withLeopard != nil && *o.withLeopard == true) { + (o.withLeopard != nil && *o.withLeopard == true && parityShards > 0) { return newFF16(dataShards, parityShards, o) } if dataShards+parityShards > 256 { @@ -372,9 +409,9 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) { } r := reedSolomon{ - DataShards: dataShards, - ParityShards: parityShards, - Shards: dataShards + parityShards, + dataShards: dataShards, + parityShards: parityShards, + totalShards: dataShards + parityShards, o: o, } @@ -392,7 +429,7 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) { if len(r.o.customMatrix) < parityShards { return nil, errors.New("coding matrix must contain at least parityShards rows") } - r.m = make([][]byte, r.Shards) + r.m = make([][]byte, r.totalShards) for i := 0; i < dataShards; i++ { r.m[i] = make([]byte, dataShards) r.m[i][i] = 1 @@ -405,15 +442,15 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) { copy(r.m[dataShards+k], row) } case r.o.fastOneParity && parityShards == 1: - r.m, err = buildXorMatrix(dataShards, r.Shards) + r.m, err = buildXorMatrix(dataShards, r.totalShards) case r.o.useCauchy: - r.m, err = buildMatrixCauchy(dataShards, r.Shards) + r.m, err = buildMatrixCauchy(dataShards, r.totalShards) case r.o.usePAR1Matrix: - r.m, err = buildMatrixPAR1(dataShards, r.Shards) + r.m, err = buildMatrixPAR1(dataShards, r.totalShards) case r.o.useJerasureMatrix: - r.m, err = buildMatrixJerasure(dataShards, r.Shards) + r.m, err = buildMatrixJerasure(dataShards, r.totalShards) default: - r.m, err = buildMatrix(dataShards, r.Shards) + r.m, err = buildMatrix(dataShards, r.totalShards) } if err != nil { return nil, err @@ -511,7 +548,7 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) { } if avx2CodeGen && r.o.useAVX2 { - sz := r.DataShards * r.ParityShards * 2 * 32 + sz := r.dataShards * r.parityShards * 2 * 32 r.mPool.New = func() interface{} { return make([]byte, sz) } @@ -531,7 +568,7 @@ var ErrTooFewShards = errors.New("too few shards given") // The parity shards will always be overwritten and the data shards // will remain the same. func (r *reedSolomon) Encode(shards [][]byte) error { - if len(shards) != r.Shards { + if len(shards) != r.totalShards { return ErrTooFewShards } @@ -541,10 +578,10 @@ func (r *reedSolomon) Encode(shards [][]byte) error { } // Get the slice of output buffers. - output := shards[r.DataShards:] + output := shards[r.dataShards:] // Do the coding. - r.codeSomeShards(r.parity, shards[0:r.DataShards], output[:r.ParityShards], len(shards[0])) + r.codeSomeShards(r.parity, shards[0:r.dataShards], output[:r.parityShards], len(shards[0])) return nil } @@ -553,13 +590,13 @@ func (r *reedSolomon) Encode(shards [][]byte) error { // Data shards should only be delivered once. There is no check for this. // The parity shards will always be updated and the data shards will remain the unchanged. func (r *reedSolomon) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error { - if len(parity) != r.ParityShards { + if len(parity) != r.parityShards { return ErrTooFewShards } if len(parity) == 0 { return nil } - if idx < 0 || idx >= r.DataShards { + if idx < 0 || idx >= r.dataShards { return ErrInvShardNum } err := checkShards(parity, false) @@ -578,7 +615,7 @@ func (r *reedSolomon) EncodeIdx(dataShard []byte, idx int, parity [][]byte) erro for start < len(dataShard) { in := dataShard[start:end] - for iRow := 0; iRow < r.ParityShards; iRow++ { + for iRow := 0; iRow < r.parityShards; iRow++ { galMulSliceXor(r.parity[iRow][idx], in, parity[iRow][start:end], &r.o) } start = end @@ -594,11 +631,11 @@ func (r *reedSolomon) EncodeIdx(dataShard []byte, idx int, parity [][]byte) erro var ErrInvalidInput = errors.New("invalid input") func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error { - if len(shards) != r.Shards { + if len(shards) != r.totalShards { return ErrTooFewShards } - if len(newDatashards) != r.DataShards { + if len(newDatashards) != r.dataShards { return ErrTooFewShards } @@ -617,7 +654,7 @@ func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error { return ErrInvalidInput } } - for _, p := range shards[r.DataShards:] { + for _, p := range shards[r.dataShards:] { if p == nil { return ErrInvalidInput } @@ -626,10 +663,10 @@ func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error { shardSize := shardSize(shards) // Get the slice of output buffers. - output := shards[r.DataShards:] + output := shards[r.dataShards:] // Do the coding. - r.updateParityShards(r.parity, shards[0:r.DataShards], newDatashards[0:r.DataShards], output, r.ParityShards, shardSize) + r.updateParityShards(r.parity, shards[0:r.dataShards], newDatashards[0:r.dataShards], output, r.parityShards, shardSize) return nil } @@ -643,7 +680,7 @@ func (r *reedSolomon) updateParityShards(matrixRows, oldinputs, newinputs, outpu return } - for c := 0; c < r.DataShards; c++ { + for c := 0; c < r.dataShards; c++ { in := newinputs[c] if in == nil { continue @@ -670,7 +707,7 @@ func (r *reedSolomon) updateParityShardsP(matrixRows, oldinputs, newinputs, outp } wg.Add(1) go func(start, stop int) { - for c := 0; c < r.DataShards; c++ { + for c := 0; c < r.dataShards; c++ { in := newinputs[c] if in == nil { continue @@ -692,7 +729,7 @@ func (r *reedSolomon) updateParityShardsP(matrixRows, oldinputs, newinputs, outp // Verify returns true if the parity shards contain the right data. // The data is the same format as Encode. No data is modified. func (r *reedSolomon) Verify(shards [][]byte) (bool, error) { - if len(shards) != r.Shards { + if len(shards) != r.totalShards { return false, ErrTooFewShards } err := checkShards(shards, false) @@ -701,10 +738,10 @@ func (r *reedSolomon) Verify(shards [][]byte) (bool, error) { } // Slice of buffers being checked. - toCheck := shards[r.DataShards:] + toCheck := shards[r.dataShards:] // Do the checking. - return r.checkSomeShards(r.parity, shards[:r.DataShards], toCheck[:r.ParityShards], len(shards[0])), nil + return r.checkSomeShards(r.parity, shards[:r.dataShards], toCheck[:r.parityShards], len(shards[0])), nil } func (r *reedSolomon) canAVX2C(byteCount int, inputs, outputs int) bool { @@ -714,11 +751,11 @@ func (r *reedSolomon) canAVX2C(byteCount int, inputs, outputs int) bool { } // Multiplies a subset of rows from a coding matrix by a full set of -// input shards to produce some output shards. +// input totalShards to produce some output totalShards. // 'matrixRows' is The rows from the matrix to use. // 'inputs' An array of byte arrays, each of which is one input shard. // The number of inputs used is determined by the length of each matrix row. -// outputs Byte arrays where the computed shards are stored. +// outputs Byte arrays where the computed totalShards are stored. // The number of outputs computed, and the // number of matrix rows used, is determined by // outputCount, which is the number of outputs to compute. @@ -1095,7 +1132,7 @@ func shardSize(shards [][]byte) int { // Given a list of shards, some of which contain data, fills in the // ones that don't have data. // -// The length of the array must be equal to Shards. +// The length of the array must be equal to shards. // You indicate that a shard is missing by setting it to nil or zero-length. // If a shard is zero-length but has sufficient capacity, that memory will // be used, otherwise a new []byte will be allocated. @@ -1114,7 +1151,7 @@ func (r *reedSolomon) Reconstruct(shards [][]byte) error { // Given a list of shards, some of which contain data, fills in the // data shards that don't have data. // -// The length of the array must be equal to Shards. +// The length of the array must be equal to shards. // You indicate that a shard is missing by setting it to nil or zero-length. // If a shard is zero-length but has sufficient capacity, that memory will // be used, otherwise a new []byte will be allocated. @@ -1132,9 +1169,9 @@ func (r *reedSolomon) ReconstructData(shards [][]byte) error { // // Given a list of shards, some of which contain data, fills in the // data shards indicated by true values in the "required" parameter. -// The length of "required" array must be equal to DataShards. +// The length of "required" array must be equal to dataShards. // -// The length of "shards" array must be equal to Shards. +// The length of "shards" array must be equal to shards. // You indicate that a shard is missing by setting it to nil or zero-length. // If a shard is zero-length but has sufficient capacity, that memory will // be used, otherwise a new []byte will be allocated. @@ -1148,16 +1185,16 @@ func (r *reedSolomon) ReconstructSome(shards [][]byte, required []bool) error { return r.reconstruct(shards, true, required) } -// reconstruct will recreate the missing data shards, and unless -// dataOnly is true, also the missing parity shards +// reconstruct will recreate the missing data totalShards, and unless +// dataOnly is true, also the missing parity totalShards // -// The length of "shards" array must be equal to Shards. +// The length of "shards" array must be equal to totalShards. // You indicate that a shard is missing by setting it to nil. // -// If there are too few shards to reconstruct the missing +// If there are too few totalShards to reconstruct the missing // ones, ErrTooFewShards will be returned. func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []bool) error { - if len(shards) != r.Shards || required != nil && len(required) < r.DataShards { + if len(shards) != r.totalShards || required != nil && len(required) < r.dataShards { return ErrTooFewShards } // Check arguments. @@ -1173,17 +1210,17 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo numberPresent := 0 dataPresent := 0 missingRequired := 0 - for i := 0; i < r.Shards; i++ { + for i := 0; i < r.totalShards; i++ { if len(shards[i]) != 0 { numberPresent++ - if i < r.DataShards { + if i < r.dataShards { dataPresent++ } } else if required != nil && required[i] { missingRequired++ } } - if numberPresent == r.Shards || dataOnly && dataPresent == r.DataShards || + if numberPresent == r.totalShards || dataOnly && dataPresent == r.dataShards || required != nil && missingRequired == 0 { // Cool. All of the shards have data. We don't // need to do anything. @@ -1191,7 +1228,7 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo } // More complete sanity check - if numberPresent < r.DataShards { + if numberPresent < r.dataShards { return ErrTooFewShards } @@ -1202,11 +1239,11 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo // // Also, create an array of indices of the valid rows we do have // and the invalid rows we don't have up until we have enough valid rows. - subShards := make([][]byte, r.DataShards) - validIndices := make([]int, r.DataShards) + subShards := make([][]byte, r.dataShards) + validIndices := make([]int, r.dataShards) invalidIndices := make([]int, 0) subMatrixRow := 0 - for matrixRow := 0; matrixRow < r.Shards && subMatrixRow < r.DataShards; matrixRow++ { + for matrixRow := 0; matrixRow < r.totalShards && subMatrixRow < r.dataShards; matrixRow++ { if len(shards[matrixRow]) != 0 { subShards[subMatrixRow] = shards[matrixRow] validIndices[subMatrixRow] = matrixRow @@ -1228,9 +1265,9 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo // shards that we have and build a square matrix. This // matrix could be used to generate the shards that we have // from the original data. - subMatrix, _ := newMatrix(r.DataShards, r.DataShards) + subMatrix, _ := newMatrix(r.dataShards, r.dataShards) for subMatrixRow, validIndex := range validIndices { - for c := 0; c < r.DataShards; c++ { + for c := 0; c < r.dataShards; c++ { subMatrix[subMatrixRow][c] = r.m[validIndex][c] } } @@ -1246,7 +1283,7 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo // Cache the inverted matrix in the tree for future use keyed on the // indices of the invalid rows. - err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.Shards) + err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.totalShards) if err != nil { return err } @@ -1257,11 +1294,11 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo // The input to the coding is all of the shards we actually // have, and the output is the missing data shards. The computation // is done using the special decode matrix we just built. - outputs := make([][]byte, r.ParityShards) - matrixRows := make([][]byte, r.ParityShards) + outputs := make([][]byte, r.parityShards) + matrixRows := make([][]byte, r.parityShards) outputCount := 0 - for iShard := 0; iShard < r.DataShards; iShard++ { + for iShard := 0; iShard < r.dataShards; iShard++ { if len(shards[iShard]) == 0 && (required == nil || required[iShard]) { if cap(shards[iShard]) >= shardSize { shards[iShard] = shards[iShard][0:shardSize] @@ -1287,7 +1324,7 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo // any that we just calculated. The output is whichever of the // data shards were missing. outputCount = 0 - for iShard := r.DataShards; iShard < r.Shards; iShard++ { + for iShard := r.dataShards; iShard < r.totalShards; iShard++ { if len(shards[iShard]) == 0 && (required == nil || required[iShard]) { if cap(shards[iShard]) >= shardSize { shards[iShard] = shards[iShard][0:shardSize] @@ -1295,11 +1332,11 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []boo shards[iShard] = make([]byte, shardSize) } outputs[outputCount] = shards[iShard] - matrixRows[outputCount] = r.parity[iShard-r.DataShards] + matrixRows[outputCount] = r.parity[iShard-r.dataShards] outputCount++ } } - r.codeSomeShards(matrixRows, shards[:r.DataShards], outputs[:outputCount], shardSize) + r.codeSomeShards(matrixRows, shards[:r.dataShards], outputs[:outputCount], shardSize) return nil } @@ -1325,7 +1362,7 @@ func (r *reedSolomon) Split(data []byte) ([][]byte, error) { } dataLen := len(data) // Calculate number of bytes per data shard. - perShard := (len(data) + r.DataShards - 1) / r.DataShards + perShard := (len(data) + r.dataShards - 1) / r.dataShards if cap(data) > len(data) { data = data[:cap(data)] @@ -1333,20 +1370,20 @@ func (r *reedSolomon) Split(data []byte) ([][]byte, error) { // Only allocate memory if necessary var padding []byte - if len(data) < (r.Shards * perShard) { + if len(data) < (r.totalShards * perShard) { // calculate maximum number of full shards in `data` slice fullShards := len(data) / perShard - padding = make([]byte, r.Shards*perShard-perShard*fullShards) + padding = make([]byte, r.totalShards*perShard-perShard*fullShards) copy(padding, data[perShard*fullShards:]) data = data[0 : perShard*fullShards] } else { - for i := dataLen; i < dataLen+r.DataShards; i++ { + for i := dataLen; i < dataLen+r.dataShards; i++ { data[i] = 0 } } // Split into equal-length shards. - dst := make([][]byte, r.Shards) + dst := make([][]byte, r.totalShards) i := 0 for ; i < len(dst) && len(data) >= perShard; i++ { dst[i] = data[:perShard:perShard] @@ -1375,10 +1412,10 @@ var ErrReconstructRequired = errors.New("reconstruction required as one or more // If one or more required data shards are nil, ErrReconstructRequired will be returned. func (r *reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error { // Do we have enough shards? - if len(shards) < r.DataShards { + if len(shards) < r.dataShards { return ErrTooFewShards } - shards = shards[:r.DataShards] + shards = shards[:r.dataShards] // Do we have enough data? size := 0 diff --git a/reedsolomon_test.go b/reedsolomon_test.go index 820b9d95..c8a0ea67 100644 --- a/reedsolomon_test.go +++ b/reedsolomon_test.go @@ -9,6 +9,7 @@ package reedsolomon import ( "bytes" + "errors" "flag" "fmt" "math/rand" @@ -16,6 +17,7 @@ import ( "runtime" "strconv" "testing" + "time" ) var noSSE2 = flag.Bool("no-sse2", !defaultOptions.useSSE2, "Disable SSE2") @@ -174,7 +176,10 @@ func testOpts() [][]Option { {WithMaxGoroutines(1), WithMinSplitSize(500000), WithSSSE3(false), WithAVX2(false), WithAVX512(false)}, {WithAutoGoroutines(50000), WithMinSplitSize(500)}, {WithInversionCache(false)}, + {WithJerasureMatrix()}, + {WithLeopardGF16(true)}, } + for _, o := range opts[:] { if defaultOptions.useSSSE3 { n := make([]Option, len(o), len(o)+1) @@ -198,27 +203,67 @@ func testOpts() [][]Option { return opts } +func parallelIfNotShort(t *testing.T) { + if !testing.Short() { + t.Parallel() + } +} + func TestEncoding(t *testing.T) { t.Run("default", func(t *testing.T) { + parallelIfNotShort(t) testEncoding(t, testOptions()...) }) - t.Run("default-dx", func(t *testing.T) { + t.Run("default-idx", func(t *testing.T) { + parallelIfNotShort(t) testEncodingIdx(t, testOptions()...) }) - for i, o := range testOpts() { - t.Run(fmt.Sprintf("opt-%d", i), func(t *testing.T) { - testEncoding(t, o...) - }) - if !testing.Short() { - t.Run(fmt.Sprintf("idx-opt-%d", i), func(t *testing.T) { - testEncodingIdx(t, o...) + if testing.Short() { + return + } + // Spread somewhat, but don't overload... + to := testOpts() + to2 := to[len(to)/2:] + to = to[:len(to)/2] + t.Run("reg", func(t *testing.T) { + parallelIfNotShort(t) + for i, o := range to { + t.Run(fmt.Sprintf("opt-%d", i), func(t *testing.T) { + testEncoding(t, o...) + }) + } + }) + t.Run("reg2", func(t *testing.T) { + parallelIfNotShort(t) + for i, o := range to2 { + t.Run(fmt.Sprintf("opt-%d", i), func(t *testing.T) { + testEncoding(t, o...) }) } + }) + if !testing.Short() { + t.Run("idx", func(t *testing.T) { + parallelIfNotShort(t) + for i, o := range to { + t.Run(fmt.Sprintf("idx-opt-%d", i), func(t *testing.T) { + testEncodingIdx(t, o...) + }) + } + }) + t.Run("idx2", func(t *testing.T) { + parallelIfNotShort(t) + for i, o := range to2 { + t.Run(fmt.Sprintf("idx-opt-%d", i), func(t *testing.T) { + testEncodingIdx(t, o...) + }) + } + }) + } } // matrix sizes to test. -// note that par1 matric will fail on some combinations. +// note that par1 matrix will fail on some combinations. var testSizes = [][2]int{ {1, 0}, {3, 0}, {5, 0}, {8, 0}, {10, 0}, {12, 0}, {14, 0}, {41, 0}, {49, 0}, {1, 1}, {1, 2}, {3, 3}, {3, 1}, {5, 3}, {8, 4}, {10, 30}, {12, 10}, {14, 7}, {41, 17}, {49, 1}, {5, 20}, @@ -235,21 +280,21 @@ func testEncoding(t *testing.T, o ...Option) { sz := testDataSizes if testing.Short() || data+parity > 256 { sz = testDataSizesShort + if raceEnabled { + sz = testDataSizesShort[:1] + } } for _, perShard := range sz { - if data+parity > 256 { - if perShard > 1000 { - t.Skip("long tests not needed. Not length sensitive") - } - // Round up to 64 bytes. - perShard = (perShard + 63) &^ 63 + r, err := New(data, parity, testOptions(o...)...) + if err != nil { + t.Fatal(err) } + + mul := r.(Extensions).ShardSizeMultiple() + perShard = ((perShard + mul - 1) / mul) * mul + t.Run(fmt.Sprint(perShard), func(t *testing.T) { - r, err := New(data, parity, testOptions(o...)...) - if err != nil { - t.Fatal(err) - } shards := make([][]byte, data+parity) for s := range shards { shards[s] = make([]byte, perShard) @@ -334,20 +379,25 @@ func testEncodingIdx(t *testing.T, o ...Option) { data, parity := size[0], size[1] rng := rand.New(rand.NewSource(0xabadc0cac01a)) t.Run(fmt.Sprintf("%dx%d", data, parity), func(t *testing.T) { - if data+parity > 256 { - t.Skip("EncodingIdx not supported for total shards > 256") - } + sz := testDataSizes if testing.Short() { sz = testDataSizesShort } for _, perShard := range sz { + r, err := New(data, parity, testOptions(o...)...) + if err != nil { + t.Fatal(err) + } + if err := r.EncodeIdx(nil, 0, nil); err == ErrNotSupported { + t.Skip(err) + return + } + mul := r.(Extensions).ShardSizeMultiple() + perShard = ((perShard + mul - 1) / mul) * mul + t.Run(fmt.Sprint(perShard), func(t *testing.T) { - r, err := New(data, parity, testOptions(o...)...) - if err != nil { - t.Fatal(err) - } shards := make([][]byte, data+parity) for s := range shards { shards[s] = make([]byte, perShard) @@ -435,6 +485,7 @@ func testEncodingIdx(t *testing.T, o ...Option) { } func TestUpdate(t *testing.T) { + parallelIfNotShort(t) for i, o := range testOpts() { t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) { testUpdate(t, o...) @@ -443,7 +494,6 @@ func TestUpdate(t *testing.T) { } func testUpdate(t *testing.T, o ...Option) { - rand.Seed(0) for _, size := range [][2]int{{10, 3}, {17, 2}} { data, parity := size[0], size[1] t.Run(fmt.Sprintf("%dx%d", data, parity), func(t *testing.T) { @@ -452,11 +502,15 @@ func testUpdate(t *testing.T, o ...Option) { sz = []int{50000} } for _, perShard := range sz { + r, err := New(data, parity, testOptions(o...)...) + if err != nil { + t.Fatal(err) + } + mul := r.(Extensions).ShardSizeMultiple() + perShard = ((perShard + mul - 1) / mul) * mul + t.Run(fmt.Sprint(perShard), func(t *testing.T) { - r, err := New(data, parity, testOptions(o...)...) - if err != nil { - t.Fatal(err) - } + shards := make([][]byte, data+parity) for s := range shards { shards[s] = make([]byte, perShard) @@ -484,6 +538,10 @@ func testUpdate(t *testing.T, o ...Option) { fillRandom(newdatashards[s]) err = r.Update(shards, newdatashards) if err != nil { + if errors.Is(err, ErrNotSupported) { + t.Skip(err) + return + } t.Fatal(err) } shards[s] = newdatashards[s] @@ -549,6 +607,7 @@ func testUpdate(t *testing.T, o ...Option) { } func TestReconstruct(t *testing.T) { + parallelIfNotShort(t) testReconstruct(t) for i, o := range testOpts() { t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) { @@ -563,12 +622,16 @@ func testReconstruct(t *testing.T, o ...Option) { if err != nil { t.Fatal(err) } + xt := r.(Extensions) + mul := xt.ShardSizeMultiple() + perShard = ((perShard + mul - 1) / mul) * mul + + t.Log(perShard) shards := make([][]byte, 13) for s := range shards { shards[s] = make([]byte, perShard) } - rand.Seed(0) for s := 0; s < 13; s++ { fillRandom(shards[s]) } @@ -644,7 +707,6 @@ func TestReconstructCustom(t *testing.T) { shards[s] = make([]byte, perShard) } - rand.Seed(0) for s := 0; s < len(shards); s++ { fillRandom(shards[s]) } @@ -693,6 +755,7 @@ func TestReconstructCustom(t *testing.T) { } func TestReconstructData(t *testing.T) { + parallelIfNotShort(t) testReconstructData(t) for i, o := range testOpts() { t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) { @@ -707,14 +770,16 @@ func testReconstructData(t *testing.T, o ...Option) { if err != nil { t.Fatal(err) } + mul := r.(Extensions).ShardSizeMultiple() + perShard = ((perShard + mul - 1) / mul) * mul + shards := make([][]byte, 13) for s := range shards { shards[s] = make([]byte, perShard) } - rand.Seed(0) for s := 0; s < 13; s++ { - fillRandom(shards[s]) + fillRandom(shards[s], int64(s)) } err = r.Encode(shards) @@ -751,7 +816,8 @@ func testReconstructData(t *testing.T, o ...Option) { } if shardsCopy[2] != nil || shardsCopy[5] != nil || shardsCopy[6] != nil { - t.Fatal("ReconstructSome reconstructed extra shards") + // This is expected in some cases. + t.Log("ReconstructSome reconstructed extra shards") } // Reconstruct with 10 shards present. Use pre-allocated memory for one of them. @@ -759,7 +825,7 @@ func testReconstructData(t *testing.T, o ...Option) { shards[2] = nil shard4 := shards[4] shards[4] = shard4[:0] - fillRandom(shard4) + fillRandom(shard4, 4) err = r.ReconstructData(shards) if err != nil { @@ -836,6 +902,7 @@ func testReconstructData(t *testing.T, o ...Option) { } func TestReconstructPAR1Singular(t *testing.T) { + parallelIfNotShort(t) perShard := 50 r, err := New(4, 4, testOptions(WithPAR1Matrix())...) if err != nil { @@ -846,7 +913,6 @@ func TestReconstructPAR1Singular(t *testing.T) { shards[s] = make([]byte, perShard) } - rand.Seed(0) for s := 0; s < 8; s++ { fillRandom(shards[s]) } @@ -873,6 +939,7 @@ func TestReconstructPAR1Singular(t *testing.T) { } func TestVerify(t *testing.T) { + parallelIfNotShort(t) testVerify(t) for i, o := range testOpts() { t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) { @@ -887,14 +954,16 @@ func testVerify(t *testing.T, o ...Option) { if err != nil { t.Fatal(err) } + mul := r.(Extensions).ShardSizeMultiple() + perShard = ((perShard + mul - 1) / mul) * mul + shards := make([][]byte, 14) for s := range shards { shards[s] = make([]byte, perShard) } - rand.Seed(0) for s := 0; s < 10; s++ { - fillRandom(shards[s]) + fillRandom(shards[s], 0) } err = r.Encode(shards) @@ -911,7 +980,7 @@ func testVerify(t *testing.T, o ...Option) { } // Put in random data. Verification should fail - fillRandom(shards[10]) + fillRandom(shards[10], 1) ok, err = r.Verify(shards) if err != nil { t.Fatal(err) @@ -925,7 +994,7 @@ func testVerify(t *testing.T, o ...Option) { t.Fatal(err) } // Fill a data segment with random data - fillRandom(shards[0]) + fillRandom(shards[0], 2) ok, err = r.Verify(shards) if err != nil { t.Fatal(err) @@ -997,9 +1066,14 @@ func TestOneEncode(t *testing.T) { } -func fillRandom(p []byte) { +func fillRandom(p []byte, seed ...int64) { + src := rand.NewSource(time.Now().UnixNano()) + if len(seed) > 0 { + src = rand.NewSource(seed[0]) + } + rng := rand.New(src) for i := 0; i < len(p); i += 7 { - val := rand.Int63() + val := rng.Int63() for j := 0; i+j < len(p) && j < 7; j++ { p[i+j] = byte(val) val >>= 8 @@ -1017,7 +1091,6 @@ func benchmarkEncode(b *testing.B, dataShards, parityShards, shardSize int) { shards[s] = make([]byte, shardSize) } - rand.Seed(0) for s := 0; s < dataShards; s++ { fillRandom(shards[s]) } @@ -1043,7 +1116,6 @@ func benchmarkDecode(b *testing.B, dataShards, parityShards, shardSize int) { shards[s] = make([]byte, shardSize) } - rand.Seed(0) for s := 0; s < dataShards; s++ { fillRandom(shards[s]) } @@ -1178,7 +1250,6 @@ func benchmarkVerify(b *testing.B, dataShards, parityShards, shardSize int) { shards[s] = make([]byte, shardSize) } - rand.Seed(0) for s := 0; s < dataShards; s++ { fillRandom(shards[s]) } @@ -1260,7 +1331,6 @@ func benchmarkReconstruct(b *testing.B, dataShards, parityShards, shardSize int) shards[s] = make([]byte, shardSize) } - rand.Seed(0) for s := 0; s < dataShards; s++ { fillRandom(shards[s]) } @@ -1344,7 +1414,6 @@ func benchmarkReconstructData(b *testing.B, dataShards, parityShards, shardSize shards[s] = make([]byte, shardSize) } - rand.Seed(0) for s := 0; s < dataShards; s++ { fillRandom(shards[s]) } @@ -1426,7 +1495,6 @@ func benchmarkReconstructP(b *testing.B, dataShards, parityShards, shardSize int shards[s] = make([]byte, shardSize) } - rand.Seed(0) for s := 0; s < dataShards; s++ { fillRandom(shards[s]) } @@ -1457,6 +1525,7 @@ func BenchmarkReconstructP10x5x20000(b *testing.B) { } func TestEncoderReconstruct(t *testing.T) { + parallelIfNotShort(t) testEncoderReconstruct(t) for _, o := range testOpts() { testEncoderReconstruct(t, o...) @@ -1465,7 +1534,7 @@ func TestEncoderReconstruct(t *testing.T) { func testEncoderReconstruct(t *testing.T, o ...Option) { // Create some sample data - var data = make([]byte, 250000) + var data = make([]byte, 250<<10) fillRandom(data) // Create 5 data slices of 50000 elements each @@ -1542,7 +1611,6 @@ func testEncoderReconstruct(t *testing.T, o ...Option) { func TestSplitJoin(t *testing.T) { var data = make([]byte, 250000) - rand.Seed(0) fillRandom(data) enc, _ := New(5, 3, testOptions()...) @@ -1590,11 +1658,11 @@ func TestCodeSomeShards(t *testing.T) { shards, _ := enc.Split(data) old := runtime.GOMAXPROCS(1) - r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:r.DataShards+r.ParityShards], len(shards[0])) + r.codeSomeShards(r.parity, shards[:r.dataShards], shards[r.dataShards:r.dataShards+r.parityShards], len(shards[0])) // hopefully more than 1 CPU runtime.GOMAXPROCS(runtime.NumCPU()) - r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:r.DataShards+r.ParityShards], len(shards[0])) + r.codeSomeShards(r.parity, shards[:r.dataShards], shards[r.dataShards:r.dataShards+r.parityShards], len(shards[0])) // reset MAXPROCS, otherwise testing complains runtime.GOMAXPROCS(old) @@ -1608,7 +1676,7 @@ func TestStandardMatrices(t *testing.T) { for i := 1; i < 256; i++ { i := i t.Run(fmt.Sprintf("x%d", i), func(t *testing.T) { - t.Parallel() + parallelIfNotShort(t) // i == n.o. datashards var shards = make([][]byte, 255) for p := range shards { @@ -1669,7 +1737,7 @@ func TestCauchyMatrices(t *testing.T) { for i := 1; i < 256; i++ { i := i t.Run(fmt.Sprintf("x%d", i), func(t *testing.T) { - t.Parallel() + parallelIfNotShort(t) var shards = make([][]byte, 255) for p := range shards { v := byte(i) @@ -1729,7 +1797,7 @@ func TestPar1Matrices(t *testing.T) { for i := 1; i < 256; i++ { i := i t.Run(fmt.Sprintf("x%d", i), func(t *testing.T) { - t.Parallel() + parallelIfNotShort(t) var shards = make([][]byte, 255) for p := range shards { v := byte(i) @@ -1879,7 +1947,6 @@ func benchmarkParallel(b *testing.B, dataShards, parityShards, shardSize int) { // Create independent shards shardsCh := make(chan [][]byte, c) for i := 0; i < c; i++ { - rand.Seed(int64(i)) shards := make([][]byte, dataShards+parityShards) for s := range shards { shards[s] = make([]byte, shardSize) diff --git a/streaming.go b/streaming.go index ac5d978f..e3aaf008 100644 --- a/streaming.go +++ b/streaming.go @@ -223,18 +223,18 @@ func (r *rsStream) createSlice() [][]byte { // will be returned. If a parity writer returns an error, a // StreamWriteError will be returned. func (r *rsStream) Encode(data []io.Reader, parity []io.Writer) error { - if len(data) != r.r.DataShards { + if len(data) != r.r.dataShards { return ErrTooFewShards } - if len(parity) != r.r.ParityShards { + if len(parity) != r.r.parityShards { return ErrTooFewShards } all := r.createSlice() defer r.blockPool.Put(all) - in := all[:r.r.DataShards] - out := all[r.r.DataShards:] + in := all[:r.r.dataShards] + out := all[r.r.dataShards:] read := 0 for { @@ -429,7 +429,7 @@ func cWriteShards(out []io.Writer, in [][]byte) error { // If a shard stream returns an error, a StreamReadError type error // will be returned. func (r *rsStream) Verify(shards []io.Reader) (bool, error) { - if len(shards) != r.r.Shards { + if len(shards) != r.r.totalShards { return false, ErrTooFewShards } @@ -476,10 +476,10 @@ var ErrReconstructMismatch = errors.New("valid shards and fill shards are mutual // However its integrity is not automatically verified. // Use the Verify function to check in case the data set is complete. func (r *rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error { - if len(valid) != r.r.Shards { + if len(valid) != r.r.totalShards { return ErrTooFewShards } - if len(fill) != r.r.Shards { + if len(fill) != r.r.totalShards { return ErrTooFewShards } @@ -490,7 +490,7 @@ func (r *rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error { if valid[i] != nil && fill[i] != nil { return ErrReconstructMismatch } - if i >= r.r.DataShards && fill[i] != nil { + if i >= r.r.dataShards && fill[i] != nil { reconDataOnly = false } } @@ -534,12 +534,12 @@ func (r *rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error { // If the total data size is less than outSize, ErrShortData will be returned. func (r *rsStream) Join(dst io.Writer, shards []io.Reader, outSize int64) error { // Do we have enough shards? - if len(shards) < r.r.DataShards { + if len(shards) < r.r.dataShards { return ErrTooFewShards } // Trim off parity shards if any - shards = shards[:r.r.DataShards] + shards = shards[:r.r.dataShards] for i := range shards { if shards[i] == nil { return StreamReadError{Err: ErrShardNoData, Stream: i} @@ -575,7 +575,7 @@ func (r *rsStream) Split(data io.Reader, dst []io.Writer, size int64) error { if size == 0 { return ErrShortData } - if len(dst) != r.r.DataShards { + if len(dst) != r.r.dataShards { return ErrInvShardNum } @@ -586,10 +586,10 @@ func (r *rsStream) Split(data io.Reader, dst []io.Writer, size int64) error { } // Calculate number of bytes per shard. - perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards) + perShard := (size + int64(r.r.dataShards) - 1) / int64(r.r.dataShards) // Pad data to r.Shards*perShard. - padding := make([]byte, (int64(r.r.Shards)*perShard)-size) + padding := make([]byte, (int64(r.r.totalShards)*perShard)-size) data = io.MultiReader(data, bytes.NewBuffer(padding)) // Split into equal-length shards and copy.