|
| 1 | +package provider |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "testing" |
| 6 | + "time" |
| 7 | + |
| 8 | + "github.com/ipfs/boxo/blockservice" |
| 9 | + "github.com/ipfs/boxo/blockstore" |
| 10 | + "github.com/ipfs/boxo/exchange/offline" |
| 11 | + bsfetcher "github.com/ipfs/boxo/fetcher/impl/blockservice" |
| 12 | + "github.com/ipfs/boxo/ipld/merkledag" |
| 13 | + mdutils "github.com/ipfs/boxo/ipld/merkledag/test" |
| 14 | + ipinner "github.com/ipfs/boxo/pinning/pinner" |
| 15 | + "github.com/ipfs/boxo/pinning/pinner/dspinner" |
| 16 | + "github.com/ipfs/go-datastore" |
| 17 | + dssync "github.com/ipfs/go-datastore/sync" |
| 18 | + "github.com/stretchr/testify/require" |
| 19 | +) |
| 20 | + |
| 21 | +// TestBufferedPinProvider checks that we can modify a pinset while reading |
| 22 | +// from the provider, as all elements of the pinset have been placed in |
| 23 | +// memory. |
| 24 | +func TestBufferedPinProvider(t *testing.T) { |
| 25 | + ctx := context.Background() |
| 26 | + |
| 27 | + // Setup |
| 28 | + ds := dssync.MutexWrap(datastore.NewMapDatastore()) |
| 29 | + bs := blockstore.NewBlockstore(ds) |
| 30 | + bserv := blockservice.New(bs, offline.Exchange(bs)) |
| 31 | + fetcher := bsfetcher.NewFetcherConfig(bserv) |
| 32 | + dserv := merkledag.NewDAGService(bserv) |
| 33 | + pinner, err := dspinner.New(ctx, ds, dserv) |
| 34 | + require.NoError(t, err) |
| 35 | + daggen := mdutils.NewDAGGenerator() |
| 36 | + root, _, err := daggen.MakeDagNode(dserv.Add, 1, 64) |
| 37 | + require.NoError(t, err) |
| 38 | + root2, _, err := daggen.MakeDagNode(dserv.Add, 1, 64) |
| 39 | + require.NoError(t, err) |
| 40 | + |
| 41 | + // test with 0 pins to ensure things work. |
| 42 | + zeroProv := NewPinnedProvider(false, pinner, fetcher) |
| 43 | + zeroKeyChanF := NewBufferedProvider(zeroProv) |
| 44 | + zeroPins, err := zeroKeyChanF(ctx) |
| 45 | + require.NoError(t, err) |
| 46 | + for range zeroPins { |
| 47 | + t.Error("There should not be any pins") |
| 48 | + } |
| 49 | + |
| 50 | + // Pin the first DAG. |
| 51 | + err = pinner.PinWithMode(ctx, root, ipinner.Recursive, "test") |
| 52 | + require.NoError(t, err) |
| 53 | + |
| 54 | + // Then open the keyChanF to read the pins. This should trigger the |
| 55 | + // pin query, but we don't read from it, so in normal condiditions |
| 56 | + // it would block. |
| 57 | + pinProv := NewPinnedProvider(false, pinner, fetcher) |
| 58 | + keyChanF := NewBufferedProvider(pinProv) |
| 59 | + root1pins, err := keyChanF(ctx) |
| 60 | + require.NoError(t, err) |
| 61 | + |
| 62 | + // Give time to buffer all the results as this is happening in the |
| 63 | + // background. |
| 64 | + time.Sleep(200 * time.Millisecond) |
| 65 | + |
| 66 | + // If the previous query was blocking the pinset under a read-lock, |
| 67 | + // we would not be able to write a second pin: |
| 68 | + err = pinner.PinWithMode(ctx, root2, ipinner.Recursive, "test") |
| 69 | + require.NoError(t, err) |
| 70 | + |
| 71 | + // Now we trigger a second query. |
| 72 | + pinProv2 := NewPinnedProvider(false, pinner, fetcher) |
| 73 | + keyChanF2 := NewBufferedProvider(pinProv2) |
| 74 | + root2pins, err := keyChanF2(ctx) |
| 75 | + require.NoError(t, err) |
| 76 | + |
| 77 | + // And finally proceed to read pins. The second keyChan should contain |
| 78 | + // both root and root2 pins, while the first keyChan contains only the |
| 79 | + // elements from the first pin because they were all cached before the |
| 80 | + // second pin happened. |
| 81 | + root1count := 0 |
| 82 | + root2count := 0 |
| 83 | + for range root2pins { |
| 84 | + root2count++ |
| 85 | + } |
| 86 | + for range root1pins { |
| 87 | + root1count++ |
| 88 | + } |
| 89 | + require.Equal(t, 64, root1count, "first pin should have provided 2048 cids") |
| 90 | + require.Equal(t, 64+64, root2count, "second pin should have provided 4096 cids") |
| 91 | +} |
0 commit comments