diff --git a/.artifacts/sql-benchmark/base-strong.json b/.artifacts/sql-benchmark/base-strong.json new file mode 100644 index 00000000..01faf2a8 --- /dev/null +++ b/.artifacts/sql-benchmark/base-strong.json @@ -0,0 +1,207 @@ +{ + "generatedAt": "2026-02-22T22:54:59.805Z", + "projectId": "arcade-main", + "collectionAddress": "0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4", + "warmup": 3, + "iterations": 20, + "operations": [ + { + "name": "listCollectionTokens:first-page", + "samplesMs": [ + 1995.28, + 1501.24, + 1503.38, + 1446.17, + 2131.37, + 1472.24, + 2352.88, + 1535.47, + 1833.8, + 1411.04, + 2062.23, + 1580.57, + 1965.12, + 1577.92, + 1389.06, + 1421.9, + 1511, + 1429.48, + 1801.66, + 1361.46 + ], + "stats": { + "count": 20, + "minMs": 1361.46, + "maxMs": 2352.88, + "meanMs": 1664.16, + "p50Ms": 1511, + "p95Ms": 2131.37 + } + }, + { + "name": "listCollectionTokens:next-page", + "samplesMs": [ + 1647.13, + 2836.37, + 1438.96, + 1576.93, + 3200.86, + 2931.14, + 2338.94, + 4353.14, + 7439.6, + 2768.53, + 1468.95, + 2491.73, + 2385.96, + 2063.57, + 1374.61, + 3304.32, + 1340.33, + 1609.78, + 1507.36, + 1737.16 + ], + "stats": { + "count": 20, + "minMs": 1340.33, + "maxMs": 7439.6, + "meanMs": 2490.77, + "p50Ms": 2063.57, + "p95Ms": 4353.14 + } + }, + { + "name": "listCollectionTokens:attribute-filters", + "samplesMs": [ + 269.14, + 267.22, + 248.83, + 247.73, + 258.7, + 237.34, + 260.73, + 280.82, + 241.46, + 274.55, + 282.14, + 240.22, + 242.14, + 256.68, + 260.22, + 240.92, + 256.68, + 245.33, + 265.6, + 251.26 + ], + "stats": { + "count": 20, + "minMs": 237.34, + "maxMs": 282.14, + "meanMs": 256.39, + "p50Ms": 256.68, + "p95Ms": 280.82 + } + }, + { + "name": "getCollectionOrders", + "samplesMs": [ + 292.9, + 349.56, + 329.62, + 443.33, + 273.72, + 272.06, + 286.63, + 296.99, + 271.14, + 336.32, + 282.29, + 304.84, + 342.12, + 351.88, + 306.36, + 377.74, + 337.91, + 372.2, + 301.46, + 322.97 + ], + "stats": { + "count": 20, + "minMs": 271.14, + "maxMs": 443.33, + "meanMs": 322.6, + "p50Ms": 306.36, + "p95Ms": 377.74 + } + }, + { + "name": "listCollectionListings:verifyOwnership=false", + "samplesMs": [ + 365.9, + 366.62, + 312.59, + 329.52, + 313.09, + 282.68, + 284.3, + 279.26, + 334.12, + 274.08, + 337.22, + 327.74, + 294.43, + 274.17, + 286.97, + 305.18, + 260.88, + 269.59, + 373.3, + 303.77 + ], + "stats": { + "count": 20, + "minMs": 260.88, + "maxMs": 373.3, + "meanMs": 308.77, + "p50Ms": 303.77, + "p95Ms": 366.62 + } + }, + { + "name": "fetchTraitValues:beast id", + "samplesMs": [ + 281.78, + 244.38, + 241.64, + 248.39, + 247.26, + 229.08, + 237.24, + 257.78, + 245.49, + 271.99, + 239.82, + 248.23, + 323.17, + 304.83, + 236.83, + 261.9, + 255.46, + 238.17, + 258.92, + 248.3 + ], + "stats": { + "count": 20, + "minMs": 229.08, + "maxMs": 323.17, + "meanMs": 256.03, + "p50Ms": 248.23, + "p95Ms": 304.83 + } + } + ] +} \ No newline at end of file diff --git a/.artifacts/sql-benchmark/base-strong.md b/.artifacts/sql-benchmark/base-strong.md new file mode 100644 index 00000000..95c339f7 --- /dev/null +++ b/.artifacts/sql-benchmark/base-strong.md @@ -0,0 +1,16 @@ +## SQL Benchmark Report + +- Project: `arcade-main` +- Collection: `0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4` +- Generated: `2026-02-22T22:54:59.805Z` +- Warmup: `3` +- Iterations: `20` + +| Operation | p50 (ms) | p95 (ms) | mean (ms) | +| --- | ---: | ---: | ---: | +| listCollectionTokens:first-page | 1511 | 2131.37 | 1664.16 | +| listCollectionTokens:next-page | 2063.57 | 4353.14 | 2490.77 | +| listCollectionTokens:attribute-filters | 256.68 | 280.82 | 256.39 | +| getCollectionOrders | 306.36 | 377.74 | 322.6 | +| listCollectionListings:verifyOwnership=false | 303.77 | 366.62 | 308.77 | +| fetchTraitValues:beast id | 248.23 | 304.83 | 256.03 | \ No newline at end of file diff --git a/.artifacts/sql-benchmark/base.json b/.artifacts/sql-benchmark/base.json new file mode 100644 index 00000000..7b364b27 --- /dev/null +++ b/.artifacts/sql-benchmark/base.json @@ -0,0 +1,117 @@ +{ + "generatedAt": "2026-02-22T22:11:34.845Z", + "projectId": "arcade-main", + "collectionAddress": "0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4", + "warmup": 1, + "iterations": 5, + "operations": [ + { + "name": "listCollectionTokens:first-page", + "samplesMs": [ + 1397.08, + 1736.85, + 909.75, + 1825.89, + 1042.25 + ], + "stats": { + "count": 5, + "minMs": 909.75, + "maxMs": 1825.89, + "meanMs": 1382.36, + "p50Ms": 1397.08, + "p95Ms": 1825.89 + } + }, + { + "name": "listCollectionTokens:next-page", + "samplesMs": [ + 900.35, + 1870.32, + 853.78, + 1907.47, + 807.06 + ], + "stats": { + "count": 5, + "minMs": 807.06, + "maxMs": 1907.47, + "meanMs": 1267.8, + "p50Ms": 900.35, + "p95Ms": 1907.47 + } + }, + { + "name": "listCollectionTokens:attribute-filters", + "samplesMs": [ + 226.51, + 223.68, + 222.39, + 227.22, + 229.62 + ], + "stats": { + "count": 5, + "minMs": 222.39, + "maxMs": 229.62, + "meanMs": 225.88, + "p50Ms": 226.51, + "p95Ms": 229.62 + } + }, + { + "name": "getCollectionOrders", + "samplesMs": [ + 248.45, + 251.37, + 293.99, + 244.36, + 260.68 + ], + "stats": { + "count": 5, + "minMs": 244.36, + "maxMs": 293.99, + "meanMs": 259.77, + "p50Ms": 251.37, + "p95Ms": 293.99 + } + }, + { + "name": "listCollectionListings:verifyOwnership=false", + "samplesMs": [ + 275.68, + 358.47, + 263.02, + 266.63, + 253.18 + ], + "stats": { + "count": 5, + "minMs": 253.18, + "maxMs": 358.47, + "meanMs": 283.4, + "p50Ms": 266.63, + "p95Ms": 358.47 + } + }, + { + "name": "fetchTraitValues:beast id", + "samplesMs": [ + 236.25, + 228.37, + 219.75, + 250.3, + 232.37 + ], + "stats": { + "count": 5, + "minMs": 219.75, + "maxMs": 250.3, + "meanMs": 233.41, + "p50Ms": 232.37, + "p95Ms": 250.3 + } + } + ] +} \ No newline at end of file diff --git a/.artifacts/sql-benchmark/base.md b/.artifacts/sql-benchmark/base.md new file mode 100644 index 00000000..3581d076 --- /dev/null +++ b/.artifacts/sql-benchmark/base.md @@ -0,0 +1,16 @@ +## SQL Benchmark Report + +- Project: `arcade-main` +- Collection: `0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4` +- Generated: `2026-02-22T22:11:34.845Z` +- Warmup: `1` +- Iterations: `5` + +| Operation | p50 (ms) | p95 (ms) | mean (ms) | +| --- | ---: | ---: | ---: | +| listCollectionTokens:first-page | 1397.08 | 1825.89 | 1382.36 | +| listCollectionTokens:next-page | 900.35 | 1907.47 | 1267.8 | +| listCollectionTokens:attribute-filters | 226.51 | 229.62 | 225.88 | +| getCollectionOrders | 251.37 | 293.99 | 259.77 | +| listCollectionListings:verifyOwnership=false | 266.63 | 358.47 | 283.4 | +| fetchTraitValues:beast id | 232.37 | 250.3 | 233.41 | \ No newline at end of file diff --git a/.artifacts/sql-benchmark/head-compare.json b/.artifacts/sql-benchmark/head-compare.json new file mode 100644 index 00000000..81789a5d --- /dev/null +++ b/.artifacts/sql-benchmark/head-compare.json @@ -0,0 +1,117 @@ +{ + "generatedAt": "2026-02-22T22:12:00.569Z", + "projectId": "arcade-main", + "collectionAddress": "0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4", + "warmup": 1, + "iterations": 5, + "operations": [ + { + "name": "listCollectionTokens:first-page", + "samplesMs": [ + 1980.76, + 1015.37, + 1811.09, + 1046.43, + 2000.73 + ], + "stats": { + "count": 5, + "minMs": 1015.37, + "maxMs": 2000.73, + "meanMs": 1570.88, + "p50Ms": 1811.09, + "p95Ms": 2000.73 + } + }, + { + "name": "listCollectionTokens:next-page", + "samplesMs": [ + 1868.71, + 1031.94, + 2094.58, + 763.02, + 1837.51 + ], + "stats": { + "count": 5, + "minMs": 763.02, + "maxMs": 2094.58, + "meanMs": 1519.15, + "p50Ms": 1837.51, + "p95Ms": 2094.58 + } + }, + { + "name": "listCollectionTokens:attribute-filters", + "samplesMs": [ + 217.57, + 218.38, + 221.56, + 225.41, + 222.8 + ], + "stats": { + "count": 5, + "minMs": 217.57, + "maxMs": 225.41, + "meanMs": 221.14, + "p50Ms": 221.56, + "p95Ms": 225.41 + } + }, + { + "name": "getCollectionOrders", + "samplesMs": [ + 345.23, + 243.88, + 410.91, + 243.3, + 264.79 + ], + "stats": { + "count": 5, + "minMs": 243.3, + "maxMs": 410.91, + "meanMs": 301.62, + "p50Ms": 264.79, + "p95Ms": 410.91 + } + }, + { + "name": "listCollectionListings:verifyOwnership=false", + "samplesMs": [ + 358.66, + 240.85, + 407.04, + 235.16, + 268.87 + ], + "stats": { + "count": 5, + "minMs": 235.16, + "maxMs": 407.04, + "meanMs": 302.12, + "p50Ms": 268.87, + "p95Ms": 407.04 + } + }, + { + "name": "fetchTraitValues:beast id", + "samplesMs": [ + 221.09, + 217.36, + 216.62, + 239.19, + 219.41 + ], + "stats": { + "count": 5, + "minMs": 216.62, + "maxMs": 239.19, + "meanMs": 222.73, + "p50Ms": 219.41, + "p95Ms": 239.19 + } + } + ] +} \ No newline at end of file diff --git a/.artifacts/sql-benchmark/head-compare.md b/.artifacts/sql-benchmark/head-compare.md new file mode 100644 index 00000000..780485a0 --- /dev/null +++ b/.artifacts/sql-benchmark/head-compare.md @@ -0,0 +1,27 @@ +## SQL Benchmark Report + +- Project: `arcade-main` +- Collection: `0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4` +- Generated: `2026-02-22T22:12:00.569Z` +- Warmup: `1` +- Iterations: `5` + +| Operation | p50 (ms) | p95 (ms) | mean (ms) | +| --- | ---: | ---: | ---: | +| listCollectionTokens:first-page | 1811.09 | 2000.73 | 1570.88 | +| listCollectionTokens:next-page | 1837.51 | 2094.58 | 1519.15 | +| listCollectionTokens:attribute-filters | 221.56 | 225.41 | 221.14 | +| getCollectionOrders | 264.79 | 410.91 | 301.62 | +| listCollectionListings:verifyOwnership=false | 268.87 | 407.04 | 302.12 | +| fetchTraitValues:beast id | 219.41 | 239.19 | 222.73 | + +### Base vs Head + +| Operation | base p50 | head p50 | delta p50 | base p95 | head p95 | delta p95 | +| --- | ---: | ---: | ---: | ---: | ---: | ---: | +| listCollectionTokens:first-page | 1397.08 | 1811.09 | +29.63% | 1825.89 | 2000.73 | +9.58% | +| listCollectionTokens:next-page | 900.35 | 1837.51 | +104.09% | 1907.47 | 2094.58 | +9.81% | +| listCollectionTokens:attribute-filters | 226.51 | 221.56 | -2.19% | 229.62 | 225.41 | -1.83% | +| getCollectionOrders | 251.37 | 264.79 | +5.34% | 293.99 | 410.91 | +39.77% | +| listCollectionListings:verifyOwnership=false | 266.63 | 268.87 | +0.84% | 358.47 | 407.04 | +13.55% | +| fetchTraitValues:beast id | 232.37 | 219.41 | -5.58% | 250.3 | 239.19 | -4.44% | \ No newline at end of file diff --git a/.artifacts/sql-benchmark/head-strong.json b/.artifacts/sql-benchmark/head-strong.json new file mode 100644 index 00000000..6eedbe85 --- /dev/null +++ b/.artifacts/sql-benchmark/head-strong.json @@ -0,0 +1,207 @@ +{ + "generatedAt": "2026-02-22T22:19:31.489Z", + "projectId": "arcade-main", + "collectionAddress": "0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4", + "warmup": 3, + "iterations": 20, + "operations": [ + { + "name": "listCollectionTokens:first-page", + "samplesMs": [ + 1702.86, + 1321.7, + 1828.58, + 971.92, + 1161.59, + 879.07, + 1663.42, + 1472.46, + 1661.7, + 1498.43, + 704.62, + 714.83, + 1778.17, + 705.73, + 868.2, + 1342.85, + 706.49, + 720, + 1805.74, + 758.61 + ], + "stats": { + "count": 20, + "minMs": 704.62, + "maxMs": 1828.58, + "meanMs": 1213.35, + "p50Ms": 1161.59, + "p95Ms": 1805.74 + } + }, + { + "name": "listCollectionTokens:next-page", + "samplesMs": [ + 720.9, + 1441.42, + 694.77, + 706.4, + 1315.71, + 708.74, + 701.18, + 1804.77, + 1502.77, + 707.88, + 715.18, + 702.06, + 706.11, + 691.79, + 724.38, + 698.26, + 700.93, + 1746.92, + 727.48, + 710.61 + ], + "stats": { + "count": 20, + "minMs": 691.79, + "maxMs": 1804.77, + "meanMs": 921.41, + "p50Ms": 708.74, + "p95Ms": 1746.92 + } + }, + { + "name": "listCollectionTokens:attribute-filters", + "samplesMs": [ + 217.9, + 218.48, + 217.83, + 217.77, + 247.12, + 221.41, + 216.61, + 228.54, + 223.55, + 218.95, + 217.51, + 218.48, + 217.73, + 219.89, + 218.61, + 217.76, + 219.8, + 234.97, + 217.42, + 243.64 + ], + "stats": { + "count": 20, + "minMs": 216.61, + "maxMs": 247.12, + "meanMs": 222.7, + "p50Ms": 218.48, + "p95Ms": 243.64 + } + }, + { + "name": "getCollectionOrders", + "samplesMs": [ + 243.3, + 294.53, + 247.98, + 353.54, + 244.69, + 247.74, + 244.94, + 248.43, + 257.95, + 283.29, + 246.01, + 243.44, + 245.64, + 246.41, + 246.89, + 253.74, + 266.28, + 243.3, + 251.46, + 249.62 + ], + "stats": { + "count": 20, + "minMs": 243.3, + "maxMs": 353.54, + "meanMs": 257.96, + "p50Ms": 247.74, + "p95Ms": 294.53 + } + }, + { + "name": "listCollectionListings:verifyOwnership=false", + "samplesMs": [ + 237.79, + 243.32, + 302.48, + 242.36, + 241.12, + 238.2, + 254.7, + 240.55, + 239.31, + 237.54, + 430.13, + 240.88, + 320.45, + 238.35, + 397.08, + 242.3, + 237.92, + 250.41, + 294.45, + 238.57 + ], + "stats": { + "count": 20, + "minMs": 237.54, + "maxMs": 430.13, + "meanMs": 268.4, + "p50Ms": 241.12, + "p95Ms": 397.08 + } + }, + { + "name": "fetchTraitValues:beast id", + "samplesMs": [ + 216.48, + 220.09, + 218.88, + 225.99, + 218.07, + 217.85, + 217.33, + 217.61, + 216.44, + 217.64, + 220.06, + 222.56, + 219.33, + 215.19, + 224.44, + 217.83, + 218.03, + 219.72, + 218.23, + 220.25 + ], + "stats": { + "count": 20, + "minMs": 215.19, + "maxMs": 225.99, + "meanMs": 219.1, + "p50Ms": 218.07, + "p95Ms": 224.44 + } + } + ] +} \ No newline at end of file diff --git a/.artifacts/sql-benchmark/head-strong.md b/.artifacts/sql-benchmark/head-strong.md new file mode 100644 index 00000000..01d2552a --- /dev/null +++ b/.artifacts/sql-benchmark/head-strong.md @@ -0,0 +1,16 @@ +## SQL Benchmark Report + +- Project: `arcade-main` +- Collection: `0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4` +- Generated: `2026-02-22T22:19:31.489Z` +- Warmup: `3` +- Iterations: `20` + +| Operation | p50 (ms) | p95 (ms) | mean (ms) | +| --- | ---: | ---: | ---: | +| listCollectionTokens:first-page | 1161.59 | 1805.74 | 1213.35 | +| listCollectionTokens:next-page | 708.74 | 1746.92 | 921.41 | +| listCollectionTokens:attribute-filters | 218.48 | 243.64 | 222.7 | +| getCollectionOrders | 247.74 | 294.53 | 257.96 | +| listCollectionListings:verifyOwnership=false | 241.12 | 397.08 | 268.4 | +| fetchTraitValues:beast id | 218.07 | 224.44 | 219.1 | \ No newline at end of file diff --git a/.artifacts/sql-benchmark/head.json b/.artifacts/sql-benchmark/head.json new file mode 100644 index 00000000..c94bf7a1 --- /dev/null +++ b/.artifacts/sql-benchmark/head.json @@ -0,0 +1,225 @@ +{ + "generatedAt": "2026-02-23T00:27:59.247Z", + "projectId": "arcade-main", + "collectionAddress": "0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4", + "warmup": 1, + "iterations": 5, + "operations": [ + { + "name": "getCollection", + "samplesMs": [ + 413.37, + 265.24, + 239.4, + 243.34, + 253.98 + ], + "stats": { + "count": 5, + "minMs": 239.4, + "maxMs": 413.37, + "meanMs": 283.07, + "p50Ms": 253.98, + "p95Ms": 413.37 + } + }, + { + "name": "listCollectionTokens:first-page:eager", + "samplesMs": [ + 1972.51, + 1737.69, + 1567.52, + 1823.13, + 1980.78 + ], + "stats": { + "count": 5, + "minMs": 1567.52, + "maxMs": 1980.78, + "meanMs": 1816.33, + "p50Ms": 1823.13, + "p95Ms": 1980.78 + } + }, + { + "name": "listCollectionTokens:next-page:eager", + "samplesMs": [ + 1918.12, + 1825.4, + 1567.17, + 1841.87, + 1503.16 + ], + "stats": { + "count": 5, + "minMs": 1503.16, + "maxMs": 1918.12, + "meanMs": 1731.14, + "p50Ms": 1825.4, + "p95Ms": 1918.12 + } + }, + { + "name": "listCollectionTokens:attribute-filters:eager", + "samplesMs": [ + 234.02, + 240.87, + 252.03, + 248.14, + 275.03 + ], + "stats": { + "count": 5, + "minMs": 234.02, + "maxMs": 275.03, + "meanMs": 250.02, + "p50Ms": 248.14, + "p95Ms": 275.03 + } + }, + { + "name": "listCollectionTokens:first-page:deferred", + "samplesMs": [ + 251.48, + 269.44, + 258.23, + 285.79, + 292.5 + ], + "stats": { + "count": 5, + "minMs": 251.48, + "maxMs": 292.5, + "meanMs": 271.49, + "p50Ms": 269.44, + "p95Ms": 292.5 + } + }, + { + "name": "listCollectionTokens:next-page:deferred", + "samplesMs": [ + 260.31, + 260.12, + 297.95, + 264.29, + 258.07 + ], + "stats": { + "count": 5, + "minMs": 258.07, + "maxMs": 297.95, + "meanMs": 268.15, + "p50Ms": 260.31, + "p95Ms": 297.95 + } + }, + { + "name": "listCollectionTokens:attribute-filters:deferred", + "samplesMs": [ + 238.79, + 243.81, + 245.82, + 246.28, + 231.81 + ], + "stats": { + "count": 5, + "minMs": 231.81, + "maxMs": 246.28, + "meanMs": 241.3, + "p50Ms": 243.81, + "p95Ms": 246.28 + } + }, + { + "name": "getCollectionOrders", + "samplesMs": [ + 276.46, + 283.34, + 263.46, + 270.94, + 274.84 + ], + "stats": { + "count": 5, + "minMs": 263.46, + "maxMs": 283.34, + "meanMs": 273.81, + "p50Ms": 274.84, + "p95Ms": 283.34 + } + }, + { + "name": "listCollectionListings:verifyOwnership=false", + "samplesMs": [ + 264.37, + 330.38, + 279.5, + 262.1, + 415.67 + ], + "stats": { + "count": 5, + "minMs": 262.1, + "maxMs": 415.67, + "meanMs": 310.4, + "p50Ms": 279.5, + "p95Ms": 415.67 + } + }, + { + "name": "listCollectionListings:verifyOwnership=true", + "samplesMs": [ + 523.32, + 510.16, + 533.3, + 529.26, + 601.69 + ], + "stats": { + "count": 5, + "minMs": 510.16, + "maxMs": 601.69, + "meanMs": 539.55, + "p50Ms": 529.26, + "p95Ms": 601.69 + } + }, + { + "name": "fetchCollectionTraitMetadata", + "samplesMs": [ + 272.62, + 263.1, + 265.88, + 275.31, + 272.92 + ], + "stats": { + "count": 5, + "minMs": 263.1, + "maxMs": 275.31, + "meanMs": 269.97, + "p50Ms": 272.62, + "p95Ms": 275.31 + } + }, + { + "name": "fetchTraitValues:beast id", + "samplesMs": [ + 244.28, + 234.24, + 251.52, + 241.34, + 247.06 + ], + "stats": { + "count": 5, + "minMs": 234.24, + "maxMs": 251.52, + "meanMs": 243.69, + "p50Ms": 244.28, + "p95Ms": 251.52 + } + } + ] +} \ No newline at end of file diff --git a/.artifacts/sql-benchmark/head.md b/.artifacts/sql-benchmark/head.md new file mode 100644 index 00000000..13b37a7f --- /dev/null +++ b/.artifacts/sql-benchmark/head.md @@ -0,0 +1,22 @@ +## SQL Benchmark Report + +- Project: `arcade-main` +- Collection: `0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4` +- Generated: `2026-02-23T00:27:59.247Z` +- Warmup: `1` +- Iterations: `5` + +| Operation | p50 (ms) | p95 (ms) | mean (ms) | +| --- | ---: | ---: | ---: | +| getCollection | 253.98 | 413.37 | 283.07 | +| listCollectionTokens:first-page:eager | 1823.13 | 1980.78 | 1816.33 | +| listCollectionTokens:next-page:eager | 1825.4 | 1918.12 | 1731.14 | +| listCollectionTokens:attribute-filters:eager | 248.14 | 275.03 | 250.02 | +| listCollectionTokens:first-page:deferred | 269.44 | 292.5 | 271.49 | +| listCollectionTokens:next-page:deferred | 260.31 | 297.95 | 268.15 | +| listCollectionTokens:attribute-filters:deferred | 243.81 | 246.28 | 241.3 | +| getCollectionOrders | 274.84 | 283.34 | 273.81 | +| listCollectionListings:verifyOwnership=false | 279.5 | 415.67 | 310.4 | +| listCollectionListings:verifyOwnership=true | 529.26 | 601.69 | 539.55 | +| fetchCollectionTraitMetadata | 272.62 | 275.31 | 269.97 | +| fetchTraitValues:beast id | 244.28 | 251.52 | 243.69 | \ No newline at end of file diff --git a/.github/workflows/benchmark-sql.yml b/.github/workflows/benchmark-sql.yml new file mode 100644 index 00000000..174d3e8b --- /dev/null +++ b/.github/workflows/benchmark-sql.yml @@ -0,0 +1,191 @@ +name: benchmark-sql + +on: + pull_request: + branches: + - main + paths: + - "packages/arcade-ts/**" + - ".github/workflows/benchmark-sql.yml" + workflow_dispatch: + inputs: + project_id: + description: "Torii project id" + required: false + default: "arcade-main" + collection_address: + description: "Collection contract address" + required: false + default: "0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4" + attribute_filters_json: + description: "JSON attribute filters for token benchmarks" + required: false + default: '{"beast id":["trait"]}' + trait_name: + description: "Trait name for trait-value benchmark query" + required: false + default: "beast id" + warmup: + description: "Warmup runs per operation" + required: false + default: "3" + iterations: + description: "Measured runs per operation" + required: false + default: "10" + operation_timeout_ms: + description: "Per-operation timeout in milliseconds" + required: false + default: "15000" + include_optional_ops: + description: "Include optional operations (getCollection, ownership=true, full trait metadata)" + required: false + default: "false" + fail_on_operation_error: + description: "Fail workflow when one benchmark operation errors" + required: false + default: "false" + fail_on_deferred_regression: + description: "Fail workflow when deferred benchmark regressions exceed threshold" + required: false + default: "false" + deferred_max_p95_delta_pct: + description: "Max allowed deferred p95 delta (%) vs baseline" + required: false + default: "10" + eager_max_p95_delta_pct: + description: "Max allowed eager p95 delta (%) vs baseline" + required: false + default: "10" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + benchmark-sql: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: pnpm/action-setup@v4 + with: + version: 10.5.2 + + - uses: actions/setup-node@v4 + with: + node-version: 20 + cache: "pnpm" + + - name: Install dependencies + run: pnpm install --frozen-lockfile --ignore-scripts + + - name: Resolve benchmark settings + id: settings + env: + PROJECT_ID_INPUT: ${{ github.event.inputs.project_id }} + COLLECTION_INPUT: ${{ github.event.inputs.collection_address }} + FILTERS_INPUT: ${{ github.event.inputs.attribute_filters_json }} + TRAIT_INPUT: ${{ github.event.inputs.trait_name }} + WARMUP_INPUT: ${{ github.event.inputs.warmup }} + ITER_INPUT: ${{ github.event.inputs.iterations }} + TIMEOUT_INPUT: ${{ github.event.inputs.operation_timeout_ms }} + OPTIONAL_OPS_INPUT: ${{ github.event.inputs.include_optional_ops }} + FAIL_FAST_INPUT: ${{ github.event.inputs.fail_on_operation_error }} + FAIL_DEFERRED_INPUT: ${{ github.event.inputs.fail_on_deferred_regression }} + DEFERRED_DELTA_INPUT: ${{ github.event.inputs.deferred_max_p95_delta_pct }} + EAGER_DELTA_INPUT: ${{ github.event.inputs.eager_max_p95_delta_pct }} + run: | + project_id="${PROJECT_ID_INPUT:-arcade-main}" + collection_address="${COLLECTION_INPUT:-0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4}" + attribute_filters_json="${FILTERS_INPUT:-{\"beast id\":[\"trait\"]}}" + trait_name="${TRAIT_INPUT:-beast id}" + warmup="${WARMUP_INPUT:-3}" + iterations="${ITER_INPUT:-10}" + operation_timeout_ms="${TIMEOUT_INPUT:-15000}" + include_optional_ops="${OPTIONAL_OPS_INPUT:-false}" + fail_on_operation_error="${FAIL_FAST_INPUT:-false}" + fail_on_deferred_regression="${FAIL_DEFERRED_INPUT:-false}" + deferred_max_p95_delta_pct="${DEFERRED_DELTA_INPUT:-10}" + eager_max_p95_delta_pct="${EAGER_DELTA_INPUT:-10}" + + mkdir -p .artifacts/sql-benchmark + + echo "project_id=${project_id}" >> "$GITHUB_OUTPUT" + echo "collection_address=${collection_address}" >> "$GITHUB_OUTPUT" + echo "attribute_filters_json=${attribute_filters_json}" >> "$GITHUB_OUTPUT" + echo "trait_name=${trait_name}" >> "$GITHUB_OUTPUT" + echo "warmup=${warmup}" >> "$GITHUB_OUTPUT" + echo "iterations=${iterations}" >> "$GITHUB_OUTPUT" + echo "operation_timeout_ms=${operation_timeout_ms}" >> "$GITHUB_OUTPUT" + echo "include_optional_ops=${include_optional_ops}" >> "$GITHUB_OUTPUT" + echo "fail_on_operation_error=${fail_on_operation_error}" >> "$GITHUB_OUTPUT" + echo "fail_on_deferred_regression=${fail_on_deferred_regression}" >> "$GITHUB_OUTPUT" + echo "deferred_max_p95_delta_pct=${deferred_max_p95_delta_pct}" >> "$GITHUB_OUTPUT" + echo "eager_max_p95_delta_pct=${eager_max_p95_delta_pct}" >> "$GITHUB_OUTPUT" + + - name: Benchmark pull request base + if: github.event_name == 'pull_request' + env: + BASE_SHA: ${{ github.event.pull_request.base.sha }} + BENCH_PROJECT_ID: ${{ steps.settings.outputs.project_id }} + BENCH_COLLECTION_ADDRESS: ${{ steps.settings.outputs.collection_address }} + BENCH_ATTRIBUTE_FILTERS_JSON: ${{ steps.settings.outputs.attribute_filters_json }} + BENCH_TRAIT_NAME: ${{ steps.settings.outputs.trait_name }} + BENCH_WARMUP: ${{ steps.settings.outputs.warmup }} + BENCH_ITERATIONS: ${{ steps.settings.outputs.iterations }} + BENCH_OPERATION_TIMEOUT_MS: ${{ steps.settings.outputs.operation_timeout_ms }} + BENCH_INCLUDE_OPTIONAL_OPS: ${{ steps.settings.outputs.include_optional_ops }} + BENCH_FAIL_ON_OPERATION_ERROR: ${{ steps.settings.outputs.fail_on_operation_error }} + BENCH_FAIL_ON_DEFERRED_REGRESSION: ${{ steps.settings.outputs.fail_on_deferred_regression }} + BENCH_DEFERRED_MAX_P95_DELTA_PCT: ${{ steps.settings.outputs.deferred_max_p95_delta_pct }} + BENCH_EAGER_MAX_P95_DELTA_PCT: ${{ steps.settings.outputs.eager_max_p95_delta_pct }} + BENCH_OUTPUT_FILE: .artifacts/sql-benchmark/base.json + BENCH_MARKDOWN_FILE: .artifacts/sql-benchmark/base.md + run: | + rm -rf /tmp/arcade-base + git worktree add /tmp/arcade-base "${BASE_SHA}" + pnpm --dir /tmp/arcade-base install --frozen-lockfile --ignore-scripts + pnpm --dir /tmp/arcade-base --filter @cartridge/arcade build + BENCH_ARCADE_DIST_DIR=/tmp/arcade-base/packages/arcade-ts/dist \ + node packages/arcade-ts/scripts/sql-benchmark.mjs + + - name: Benchmark current head + env: + BENCH_PROJECT_ID: ${{ steps.settings.outputs.project_id }} + BENCH_COLLECTION_ADDRESS: ${{ steps.settings.outputs.collection_address }} + BENCH_ATTRIBUTE_FILTERS_JSON: ${{ steps.settings.outputs.attribute_filters_json }} + BENCH_TRAIT_NAME: ${{ steps.settings.outputs.trait_name }} + BENCH_WARMUP: ${{ steps.settings.outputs.warmup }} + BENCH_ITERATIONS: ${{ steps.settings.outputs.iterations }} + BENCH_OPERATION_TIMEOUT_MS: ${{ steps.settings.outputs.operation_timeout_ms }} + BENCH_INCLUDE_OPTIONAL_OPS: ${{ steps.settings.outputs.include_optional_ops }} + BENCH_FAIL_ON_OPERATION_ERROR: ${{ steps.settings.outputs.fail_on_operation_error }} + BENCH_FAIL_ON_DEFERRED_REGRESSION: ${{ steps.settings.outputs.fail_on_deferred_regression }} + BENCH_DEFERRED_MAX_P95_DELTA_PCT: ${{ steps.settings.outputs.deferred_max_p95_delta_pct }} + BENCH_EAGER_MAX_P95_DELTA_PCT: ${{ steps.settings.outputs.eager_max_p95_delta_pct }} + BENCH_OUTPUT_FILE: .artifacts/sql-benchmark/head.json + BENCH_MARKDOWN_FILE: .artifacts/sql-benchmark/head.md + run: | + pnpm --filter @cartridge/arcade build + if [ -f .artifacts/sql-benchmark/base.json ]; then + export BENCH_BASELINE_FILE=.artifacts/sql-benchmark/base.json + fi + node packages/arcade-ts/scripts/sql-benchmark.mjs + + - name: Upload benchmark artifacts + uses: actions/upload-artifact@v4 + with: + name: sql-benchmark-${{ github.run_id }} + path: .artifacts/sql-benchmark/ + if-no-files-found: error + + - name: Append benchmark summary + run: | + { + echo "### SQL Benchmark"; + echo ""; + cat .artifacts/sql-benchmark/head.md; + } >> "$GITHUB_STEP_SUMMARY" diff --git a/packages/arcade-ts/docs/collection-fetch-sql-performance.prd.md b/packages/arcade-ts/docs/collection-fetch-sql-performance.prd.md new file mode 100644 index 00000000..afe3f03c --- /dev/null +++ b/packages/arcade-ts/docs/collection-fetch-sql-performance.prd.md @@ -0,0 +1,127 @@ +# Collection Fetch SQL Performance PRD (TDD) + +## Context + +Large collections are slow to load through the edge SQL marketplace client in `packages/arcade-ts`. Profiling and code review identified scaling bottlenecks in token pagination, attribute filtering, and listing retrieval. + +## Problem Statement + +Collection fetch latency grows superlinearly with collection size in the edge SQL path because: + +- token pagination uses `OFFSET`, which degrades at deep pages; +- attribute filtering is applied client-side after token rows are fetched; +- listing queries are uncapped by default when `limit` is omitted. + +## Goals + +1. Make token page cost stable as collection size grows. +2. Reduce transferred/parsed rows when trait filters are active. +3. Prevent accidental full-table listing reads from default call sites. +4. Preserve existing external API shapes (`FetchCollectionTokensResult`, `CollectionListingsOptions`) and compatibility for existing numeric cursors. + +## Non-Goals + +- Rewriting the Dojo runtime path. +- Changing trait aggregation APIs in `filters.ts`. +- Introducing backend schema/index migrations in this PR. + +## Success Metrics + +1. `listCollectionTokens` no longer emits `OFFSET` for default pagination path. +2. `listCollectionTokens` produces keyset cursors and accepts keyset cursors. +3. `attributeFilters` are pushed into SQL query generation. +4. `listCollectionListings` applies a default `LIMIT` when caller omits one. +5. Existing marketplace edge tests continue to pass. + +## Constraints + +- Keep behavior deterministic for consumers already storing numeric cursors. +- Keep SQL injection protections through existing escaping helpers. +- Keep implementation minimal and test-driven. + +## Proposed Solution + +### 1) Token Keyset Pagination + +- Replace offset paging with keyset paging for new cursors: + - sort remains `ORDER BY token_id`; + - add `token_id > :lastTokenId` when keyset cursor is present; + - derive `nextCursor` from the last row token id if page is full. +- Maintain backward compatibility: + - if cursor is numeric, continue to honor offset behavior. + +### 2) SQL-Level Attribute Filtering + +- Convert `attributeFilters` into SQL clauses against `token_attributes`: + - `(trait_name = 'X' AND trait_value IN (...))` grouped with `OR`; + - constrained to collection prefix (`token_id LIKE ':%'`); + - `HAVING COUNT(DISTINCT trait_name) = ` to enforce AND semantics across traits. +- Keep existing value escaping. + +### 3) Safe Default for Listing Query + +- In `getCollectionOrders`, apply `DEFAULT_LIMIT` when `options.limit` is not provided. +- Preserve explicit `limit` override behavior. + +## TDD Plan (Red-Green-Refactor) + +### Test Set A: Keyset Pagination + +RED: +- Add test asserting first token-page SQL does not include `OFFSET`. +- Add test asserting `nextCursor` is keyset-form and based on last token id. +- Add test asserting keyset cursor injects `token_id > ...`. + +GREEN: +- Introduce cursor parsing/encoding helpers. +- Update SQL builder in `listCollectionTokens`. + +### Test Set B: SQL Attribute Filters + +RED: +- Add test asserting generated SQL includes `token_attributes` subquery and `HAVING COUNT(DISTINCT trait_name)`. + +GREEN: +- Implement SQL trait filter clause builder. +- Remove client-side post-filter step from `listCollectionTokens`. + +### Test Set C: Listing Default Limit + +RED: +- Add test asserting `getCollectionOrders` includes `LIMIT ` when no limit is supplied. + +GREEN: +- Apply default limit in query generation. + +### Refactor + +- Keep helper boundaries small (`cursor` and `attributeFilters` SQL builders). +- Preserve existing exported API signatures. + +## Risks and Mitigations + +1. Lexicographic vs numeric token ordering +- Risk: keyset comparison over string token IDs can differ from numeric order. +- Mitigation: keep existing `ORDER BY token_id` semantics; keyset is aligned with current sort behavior. + +2. Cursor compatibility +- Risk: existing persisted numeric cursor values. +- Mitigation: support both numeric and keyset cursor formats. + +3. Query complexity +- Risk: trait subquery can still be expensive at extreme scale. +- Mitigation: pushes filtering server-side to reduce client over-fetch immediately; index tuning can be separate follow-up. + +## Acceptance Criteria + +- New tests for A/B/C are present and pass. +- Existing `client.edge.test.ts` scenarios remain green. +- Package tests pass for `packages/arcade-ts`. +- No public API/type breaking change. + +## Validation Commands + +```bash +pnpm -C packages/arcade-ts test -- src/marketplace/client.edge.test.ts +pnpm -C packages/arcade-ts test +``` diff --git a/packages/arcade-ts/docs/sql-edge-optimization.prd.md b/packages/arcade-ts/docs/sql-edge-optimization.prd.md new file mode 100644 index 00000000..c7158863 --- /dev/null +++ b/packages/arcade-ts/docs/sql-edge-optimization.prd.md @@ -0,0 +1,266 @@ +# SQL Edge Optimization PRD (TDD) + +## Document Control + +- Status: Draft +- Owner: `packages/arcade-ts` maintainers +- Last updated: 2026-02-22 +- Priority: P0 (package consumers depending on SQL path) + +## Objective + +Optimize the SQL marketplace path in `packages/arcade-ts` for reliability and latency at large-collection scale. + +Primary target: +- `packages/arcade-ts/src/marketplace/client.edge.ts` +- `packages/arcade-ts/src/marketplace/filters.ts` + +Non-primary target: +- benchmark tooling and CI gating for SQL regressions + +This PRD is SQL-first by design. UI/runtime changes outside the package are out of scope except where needed to validate package behavior. + +## Why This Matters + +Integrators using the SQL runtime need predictable collection-page performance on high-cardinality collections and large orderbooks. + +Observed production-like dataset characteristics (`arcade-main`): +- Beasts collection `0x046da895...5cf0e4`: `79,120` tokens +- Comparator collection `0x027838de...ad10e2d`: `1,121` tokens +- Beasts orders in `"ARCADE-Order"`: `5,832` total, `3,328` active sell listings +- Comparator orders: `0` + +Observed package-level pain points: +- `getCollection` can return `400 Bad Request` for Beasts in SQL path. +- trait aggregation queries can stall/time out in large datasets. +- large `IN (...)` lists and ownership verification queries can inflate query/parse cost. +- token ID normalization is inconsistent across hex formats, creating correctness risk in SQL filtering. + +## Problem Statement + +The current SQL path is partially optimized (keyset token paging and SQL-side attribute filtering are present), but still has four unresolved risk areas: + +1. Reliability: collection summary query can fail for valid collections. +2. Correctness: token ID normalization is not canonical across input forms. +3. Performance: trait metadata/summary queries are full-scan oriented and unbounded. +4. Scalability: large `IN` predicates for token and ownership filtering are fragile at high cardinality. + +## Goals + +1. Eliminate known SQL correctness failures in edge marketplace client. +2. Reduce tail latency for high-volume collection operations. +3. Keep API compatibility for existing package consumers. +4. Add benchmark-based CI guardrails for SQL regressions. + +## Non-Goals + +- Rewriting non-SQL runtime paths. +- Contract/schema migrations managed outside this repository. +- Frontend UX redesign. + +## In Scope + +- SQL generation and execution logic in edge marketplace client. +- SQL trait metadata query shapes. +- token ID canonicalization helpers used by SQL path. +- benchmark scenario coverage and CI budgets for SQL operations. + +## Baseline Operations to Protect + +Core operations: +- `listCollectionTokens:first-page` +- `listCollectionTokens:next-page` +- `listCollectionTokens:attribute-filters` +- `getCollectionOrders` +- `listCollectionListings:verifyOwnership=false` +- `fetchTraitValues:` + +Optional/heavy operations (tracked separately): +- `getCollection` +- `listCollectionListings:verifyOwnership=true` +- `fetchCollectionTraitMetadata` + +## Functional Requirements + +### FR-1: `getCollection` reliability +- SQL query must return a valid collection summary or `null` without throwing for known-good addresses. +- No `400` failures for Beasts benchmark collection. + +### FR-2: Canonical token ID behavior +- SQL token filtering must accept decimal, `0x`-prefixed hex, and bare-hex token IDs consistently. +- `tokenIds` filtering must behave identically for equivalent numeric IDs across formats. + +### FR-3: Trait query performance shape +- Replace prefix-scan-only query shapes (`token_id LIKE '
:%'`) with collection-token join strategy where feasible. +- keep semantic parity for existing trait APIs. + +### FR-4: Large list resilience +- token ID and ownership checks must avoid pathological single-query `IN (...)` expansion. +- apply chunking or CTE/value-table strategy with deterministic merge. + +### FR-5: Benchmark gates +- SQL benchmark CI must cover the above operations for the target large collection and compare base vs head. +- regressions over defined thresholds must be visible and optionally fail CI. + +## Performance Targets (Initial) + +Targets are relative to benchmark baseline in CI (network variance aware): + +1. No regression > 10% p95 in core operations for head vs base. +2. `getCollectionOrders` on Beasts: target 20% p50 improvement from current baseline. +3. `getCollection`: success rate 100% for benchmark collection. +4. trait summary/metadata operations: timeout-bounded and reported; no hanging runs. + +## TDD Delivery Plan + +### Epic A: Correctness and Reliability Hardening + +#### A1. Fix `getCollection` query robustness + +RED (tests first): +- Add `client.edge.test.ts` case where `token_contracts` row lacks metadata/token sample and fallback path still returns collection. +- Add case where first query fails; method returns `null` (or typed error contract if chosen) without unhandled throw. + +GREEN: +- Rewrite `getCollection` to avoid correlated subqueries in `COALESCE`. +- Use deterministic two-step read: + 1. contract row by `contract_address` + 2. optional token sample lookup only when needed + +REFACTOR: +- isolate SQL builders for collection summary query pieces. + +#### A2. Canonical token ID normalization + +RED: +- Add tests asserting `tokenIds: ["ff"]`, `["0xff"]`, and decimal equivalent resolve to same SQL predicate values. +- Add tests around mixed input deduplication. + +GREEN: +- add canonical token-id parser helper used by SQL edge path. +- update `normalizeTokenIds` and related query token helpers. + +REFACTOR: +- centralize token-id normalization in one utility and reuse across SQL methods. + +### Epic B: Trait Query Optimization + +#### B1. Query shape improvements for trait summary/metadata + +RED: +- Extend `filters.test.ts` assertions to verify new query shape uses `tokens`-scoped join for collection filtering. +- Ensure semantic tests for OR-within-trait and AND-across-traits still pass. + +GREEN: +- replace `token_id LIKE '
:%'` centered scans with collection token join CTE pattern where feasible. +- preserve outputs for: + - `fetchTraitNamesSummary` + - `fetchTraitValues` + - `fetchCollectionTraitMetadata` + - `fetchExpandedTraitsMetadata` + +REFACTOR: +- extract shared CTE builders to reduce duplicated SQL templates. + +#### B2. Timeout-aware heavy operations + +RED: +- add benchmark/unit coverage proving heavy operations report timeout/failure deterministically. + +GREEN: +- keep operation-level timeout handling in benchmark harness. +- expose clear failure reporting in markdown/json artifacts. + +REFACTOR: +- normalize error payload shape for benchmark reports. + +### Epic C: Large Predicate Resilience + +#### C1. Large `tokenIds` query strategy + +RED: +- add tests asserting large token ID inputs avoid one unbounded `IN (...)` string. +- assert deterministic ordering and pagination behavior preserved. + +GREEN: +- implement chunking or CTE value-table strategy for large token lists. + +REFACTOR: +- encapsulate chunk/merge behavior with focused helper tests. + +#### C2. Ownership verification scaling + +RED: +- add tests for large owner/token sets to ensure query builder chunks and merges without data loss. + +GREEN: +- chunk ownership verification queries in `verifyListingsOwnership`. + +REFACTOR: +- reuse shared chunk utility for owners/tokenIds. + +### Epic D: Benchmark and CI Gates + +RED: +- add benchmark test coverage for comparison rows and failure sections (already partially present). + +GREEN: +- keep SQL benchmark workflow as required gate for marketplace SQL changes. +- include Beasts defaults: + - project: `arcade-main` + - collection: `0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4` + +REFACTOR: +- simplify benchmark operation registry and per-operation configuration. + +## Test Plan + +Target test files: +- `packages/arcade-ts/src/marketplace/client.edge.test.ts` +- `packages/arcade-ts/src/marketplace/filters.test.ts` +- `packages/arcade-ts/src/marketplace/benchmark.test.ts` + +Execution gates: + +```bash +pnpm -C packages/arcade-ts test -- src/marketplace/client.edge.test.ts +pnpm -C packages/arcade-ts test -- src/marketplace/filters.test.ts +pnpm -C packages/arcade-ts test -- src/marketplace/benchmark.test.ts +pnpm -C packages/arcade-ts test +``` + +Benchmark validation: + +```bash +pnpm --filter @cartridge/arcade build +BENCH_WARMUP=1 BENCH_ITERATIONS=5 node packages/arcade-ts/scripts/sql-benchmark.mjs +``` + +## Risks and Mitigations + +1. Query planner behavior differs by backend version. +- Mitigation: benchmark base vs head in CI with same backend target. + +2. Token ID format ambiguity across consumers. +- Mitigation: canonical normalization plus format-coverage tests. + +3. Trait query semantics regression while changing SQL shape. +- Mitigation: preserve existing result-shape tests and add cross-query parity fixtures. + +4. Network variance causing noisy benchmark deltas. +- Mitigation: use multiple iterations, report p50/p95, keep thresholds conservative. + +## Rollout + +1. Ship Epic A (correctness) first. +2. Ship Epic B (trait query shape) second. +3. Ship Epic C (large list resilience) third. +4. Enforce Epic D CI thresholds after baseline stabilizes. + +## Definition of Done + +- All new RED tests introduced per epic are observed failing before implementation and green after. +- SQL benchmark workflow produces stable artifacts and markdown summary. +- Core SQL operations have no >10% p95 regression vs base on PR benchmark. +- `getCollection` succeeds for benchmark collection. +- token ID normalization tests pass for hex/decimal mixed inputs. diff --git a/packages/arcade-ts/docs/sql-metadata-preserving-listing-sprint.prd.md b/packages/arcade-ts/docs/sql-metadata-preserving-listing-sprint.prd.md new file mode 100644 index 00000000..efdf7f08 --- /dev/null +++ b/packages/arcade-ts/docs/sql-metadata-preserving-listing-sprint.prd.md @@ -0,0 +1,290 @@ +# SQL Metadata-Preserving Listing Performance PRD (Sprint TDD) + +## Document Control + +- Status: Completed +- Owner: `packages/arcade-ts` maintainers +- Last updated: 2026-02-23 +- Sprint window: current sprint (1 sprint) +- Priority: P0 + +## Objective + +Reduce SQL token-list latency in `packages/arcade-ts` while preserving NFT visibility and metadata access. + +Key principle for this sprint: +- metadata is required for full NFT UX, +- but metadata does not need to be eagerly fetched for every token card on first page load. + +## Context and Baseline + +Current benchmark (`.artifacts/sql-benchmark/head.json`, generated 2026-02-22): + +- `listCollectionTokens:first-page`: p50 `1497.68ms`, p95 `1918.1ms` +- `listCollectionTokens:next-page`: p50 `1745.04ms`, p95 `2003.09ms` +- `listCollectionTokens:attribute-filters`: p50 `240.95ms`, p95 `251.43ms` + +Direct SQL profiling against `arcade-main` Beasts collection showed payload dominance: + +- with metadata (`LIMIT 100`): ~`3.25MB` response, ~`1.69s` avg +- without metadata (`LIMIT 100`): ~`16.9KB` response, ~`0.26s` avg + +This indicates first-page latency is primarily response size + transfer + JSON parsing, not only predicate/index quality. + +## Problem Statement + +`listCollectionTokens` currently always selects `metadata` in the edge SQL path, which over-fetches for list/grid surfaces that only need: + +- token identity (`contract_address`, `token_id`), +- image URL (derived from Torii static endpoint), +- optional lightweight labels. + +We need a TDD-scoped path that: + +1. keeps current default behavior safe for existing consumers, +2. enables deferred metadata loading for high-volume list views, +3. preserves eventual full metadata access without changing business semantics. + +## Goals + +1. Preserve existing default API behavior for backwards compatibility. +2. Introduce an opt-in deferred metadata mode for token listing. +3. Add explicit metadata hydration path for selected token IDs. +4. Add benchmark/CI coverage to prevent regressions and prove benefit. + +## Non-Goals + +- Removing metadata support. +- Rewriting Dojo runtime path. +- Contract/index migrations. +- Frontend redesign outside API integration examples. + +## In Scope + +- `packages/arcade-ts/src/marketplace/types.ts` +- `packages/arcade-ts/src/marketplace/client.edge.ts` +- `packages/arcade-ts/src/marketplace/index.ts` +- `packages/arcade-ts/src/marketplace/README.md` +- `packages/arcade-ts/scripts/sql-benchmark.mjs` +- `packages/arcade-ts/src/marketplace/client.edge.test.ts` +- `packages/arcade-ts/src/marketplace/benchmark.test.ts` + +## Out of Scope + +- Client app route-level virtualization work. +- Multi-project cache invalidation redesign. +- Marketplace schema changes. + +## Functional Requirements + +### FR-1: Backwards-compatible default + +- `listCollectionTokens` behavior remains unchanged when no new option is provided. +- Existing callers continue receiving metadata as today. + +### FR-2: Deferred metadata option + +- Add an option on `FetchCollectionTokensOptions`: + - `includeMetadata?: boolean` (default `true`). +- When `includeMetadata === false`, SQL projection must exclude metadata-heavy fields. +- Returned token objects must still include: + - canonical `contract_address`, + - `token_id`, + - image URL resolution path (`fetchImages` behavior remains functional). + +### FR-3: Batch metadata hydration + +- Add a new client method for targeted hydration by token IDs for a collection: + - `getCollectionTokenMetadataBatch(...)` (name finalization during implementation). +- It must support chunking for large token-id inputs and return normalized tokens with metadata. + +### FR-4: Deterministic behavior and safety + +- Existing token ID canonicalization and dedupe behavior must be reused. +- SQL generation remains escaped and chunked for large `IN` sets. + +### FR-5: Benchmark visibility and gating + +- Benchmark script must include both modes: + - eager metadata list path, + - deferred metadata list path. +- Markdown report must clearly separate these operations. +- CI should fail on deferred-mode regressions once baseline stabilizes. + +## Success Metrics (Sprint Exit) + +1. Deferred-mode benchmark target on Beasts (`LIMIT 100`): +- `listCollectionTokens:first-page:deferred` p50 <= `700ms` +- `listCollectionTokens:next-page:deferred` p50 <= `800ms` + +2. No regression guardrails: +- Existing eager-mode ops do not regress > `10%` p95 vs base. + +3. Correctness: +- 100% passing tests for new API behavior and compatibility. + +4. UX compatibility: +- Deferred-mode tokens still produce image URLs when `fetchImages=true`. + +## TDD Delivery Plan (RED/GREEN/REFACTOR) + +### Epic A: Deferred Metadata Listing (P0) + +#### A1. Add API surface [x] + +RED: +- Add type-level and runtime tests asserting: + - default call includes metadata projection, + - `includeMetadata: false` excludes metadata projection. + +GREEN: +- Introduce `includeMetadata?: boolean` in `FetchCollectionTokensOptions`. +- Keep default `true`. + +REFACTOR: +- Extract SQL projection builder for token queries. + +#### A2. Preserve image visibility in deferred mode [x] + +RED: +- Add test proving returned tokens still include deterministic image URL when: + - `includeMetadata=false` + - `fetchImages=true`. + +GREEN: +- Ensure normalization path tolerates absent metadata and still resolves image URL. + +REFACTOR: +- Minimize branching in token normalization. + +### Epic B: Metadata Hydration Batch API (P0) + +#### B1. Add batch metadata fetch method [x] + +RED: +- Add tests for new method: + - hydrates metadata for requested token IDs, + - returns empty for invalid token IDs, + - chunks large token ID sets. + +GREEN: +- Implement SQL query path scoped to collection + token IDs. +- Reuse `normalizeTokenIds` and chunk helpers. + +REFACTOR: +- Reuse shared query builder utilities between list + hydrate methods. + +#### B2. Edge cases and compatibility [x] + +RED: +- Add tests for decimal/hex/bare-hex token ID equivalence in hydration path. + +GREEN: +- Route hydration path through canonicalization utility. + +REFACTOR: +- Consolidate token-id normalization entry points. + +### Epic C: Benchmarks and CI Gates (P1) + +#### C1. Benchmark operation expansion [x] + +RED: +- Add benchmark helper/unit tests for additional operation names. + +GREEN: +- Add operations: + - `listCollectionTokens:first-page:eager` + - `listCollectionTokens:first-page:deferred` + - `listCollectionTokens:next-page:eager` + - `listCollectionTokens:next-page:deferred` + - optional: `getCollectionTokenMetadataBatch:100` + +REFACTOR: +- Centralize operation registry in benchmark script. + +#### C2. CI threshold enforcement [x] + +RED: +- Add script/unit tests for threshold evaluation logic. + +GREEN: +- Add fail condition for deferred-mode regressions above threshold. +- Keep optional heavy operations non-blocking for now. + +REFACTOR: +- Separate core and optional comparison sets. + +## Sprint Backlog and Estimates + +1. [x] Story A: Deferred metadata option + SQL projection split +- Estimate: 2 days +- Owner: SDK engineer +- Risk: medium (API + normalization semantics) + +2. [x] Story B: Batch metadata hydration API +- Estimate: 2 days +- Owner: SDK engineer +- Risk: medium (API shape + chunk behavior) + +3. [x] Story C: Benchmark/CI expansion and gates +- Estimate: 1 day +- Owner: infra + SDK +- Risk: low/medium (flaky network variance) + +4. [x] Story D: Docs and migration examples +- Estimate: 0.5 day +- Owner: SDK engineer +- Risk: low + +Total: 5.5 engineering days (fits one sprint with review buffer). + +## Test Plan + +Primary files: + +- `packages/arcade-ts/src/marketplace/client.edge.test.ts` +- `packages/arcade-ts/src/marketplace/benchmark.test.ts` +- optional new tests: + - `packages/arcade-ts/src/marketplace/metadata-batch.test.ts` + +Execution: + +```bash +pnpm -C packages/arcade-ts test -- src/marketplace/client.edge.test.ts +pnpm -C packages/arcade-ts test -- src/marketplace/benchmark.test.ts +pnpm -C packages/arcade-ts test +``` + +Benchmark validation: + +```bash +pnpm --filter @cartridge/arcade build +BENCH_WARMUP=1 BENCH_ITERATIONS=5 BENCH_INCLUDE_OPTIONAL_OPS=1 node packages/arcade-ts/scripts/sql-benchmark.mjs +``` + +## Rollout Plan + +1. Land API + tests behind backward-compatible defaults. +2. Land hydration API + tests. +3. Land benchmark operation expansion and CI thresholds in warning mode. +4. Flip CI thresholds to blocking after 3-5 stable PR runs. + +## Risks and Mitigations + +1. Consumers rely on metadata in list call implicitly. +- Mitigation: default remains eager (`includeMetadata=true`), deferred is opt-in. + +2. Deferred mode adopted without hydration strategy. +- Mitigation: provide batch hydration API and README usage pattern in same sprint. + +3. Benchmark noise from network/backend variance. +- Mitigation: compare base/head in same workflow; use conservative p95 threshold. + +## Definition of Done + +- [x] New deferred metadata mode exists and is fully tested. +- [x] Batch metadata hydration method exists and is fully tested. +- [x] Benchmark reports include eager vs deferred operations. +- [x] CI can detect/flag deferred-mode regressions. +- [x] README documents when to use eager vs deferred mode and how to hydrate metadata. diff --git a/packages/arcade-ts/package.json b/packages/arcade-ts/package.json index 5cf73791..8cd038ea 100644 --- a/packages/arcade-ts/package.json +++ b/packages/arcade-ts/package.json @@ -50,6 +50,7 @@ "test:coverage": "vitest run --coverage", "test:watch": "vitest", "test:edge:smoke": "vitest run --config vitest.edge.config.ts", + "benchmark:sql": "node ./scripts/sql-benchmark.mjs", "test:integration": "RUN_INTEGRATION_TESTS=true vitest run --config vitest.integration.config.ts", "docs": "cd www && npm run start", "docs:build": "cd www && GIT_REVISION_OVERRIDE=${npm_config_git_revision_override} npm run build", diff --git a/packages/arcade-ts/scripts/sql-benchmark-helpers.mjs b/packages/arcade-ts/scripts/sql-benchmark-helpers.mjs new file mode 100644 index 00000000..f856c41e --- /dev/null +++ b/packages/arcade-ts/scripts/sql-benchmark-helpers.mjs @@ -0,0 +1,261 @@ +const round = (value, precision = 2) => { + const factor = 10 ** precision; + return Math.round(value * factor) / factor; +}; + +const percentile = (sorted, p) => { + if (!sorted.length) return 0; + if (sorted.length === 1) return sorted[0]; + const index = Math.ceil((p / 100) * sorted.length) - 1; + const boundedIndex = Math.min(sorted.length - 1, Math.max(0, index)); + return sorted[boundedIndex]; +}; + +const computeBenchmarkStats = (samplesMs) => { + if (!samplesMs.length) { + throw new Error("Cannot compute benchmark stats with zero samples"); + } + + const sorted = [...samplesMs].sort((a, b) => a - b); + const total = sorted.reduce((sum, value) => sum + value, 0); + + return { + count: sorted.length, + minMs: round(sorted[0]), + maxMs: round(sorted[sorted.length - 1]), + meanMs: round(total / sorted.length), + p50Ms: round(percentile(sorted, 50)), + p95Ms: round(percentile(sorted, 95)), + }; +}; + +const normalizeErrorMessage = (error) => { + if (error instanceof Error) return error.message; + if (typeof error === "string") return error; + return String(error); +}; + +const withTimeout = async (promise, timeoutMs) => { + if (!timeoutMs || timeoutMs <= 0) { + return promise; + } + + return new Promise((resolve, reject) => { + const timer = setTimeout(() => { + reject(new Error(`Operation timed out after ${timeoutMs}ms`)); + }, timeoutMs); + + promise + .then((value) => resolve(value)) + .catch((error) => reject(error)) + .finally(() => clearTimeout(timer)); + }); +}; + +export const fallbackRunBenchmarkOperation = async (options) => { + const { + name, + warmup, + iterations, + timeoutMs, + execute, + now = () => Date.now(), + } = options; + + try { + let lastResult; + + for (let index = 0; index < warmup; index += 1) { + lastResult = await withTimeout(execute(), timeoutMs); + } + + const samplesMs = []; + for (let index = 0; index < iterations; index += 1) { + const startedAt = now(); + lastResult = await withTimeout(execute(), timeoutMs); + samplesMs.push(round(now() - startedAt)); + } + + return { + result: { + name, + samplesMs, + stats: computeBenchmarkStats(samplesMs), + }, + lastResult, + }; + } catch (error) { + return { + failure: { + name, + error: normalizeErrorMessage(error), + }, + }; + } +}; + +const percentDelta = (base, head) => { + if (base === 0) return 0; + return round(((head - base) / base) * 100); +}; + +export const fallbackCompareBenchmarkReports = (base, head) => { + const baseByName = new Map(base.operations.map((op) => [op.name, op])); + const rows = []; + + for (const operation of head.operations) { + const baseOp = baseByName.get(operation.name); + if (!baseOp) continue; + + rows.push({ + name: operation.name, + baseP50Ms: baseOp.stats.p50Ms, + headP50Ms: operation.stats.p50Ms, + deltaP50Pct: percentDelta(baseOp.stats.p50Ms, operation.stats.p50Ms), + baseP95Ms: baseOp.stats.p95Ms, + headP95Ms: operation.stats.p95Ms, + deltaP95Pct: percentDelta(baseOp.stats.p95Ms, operation.stats.p95Ms), + }); + } + + return rows; +}; + +const getComparisonDelta = (row, metric) => + metric === "p50" ? row.deltaP50Pct : row.deltaP95Pct; + +export const fallbackEvaluateBenchmarkRegressions = ( + rows, + thresholds, +) => { + if (!rows?.length || !thresholds?.length) { + return []; + } + + const rowsByName = new Map(rows.map((row) => [row.name, row])); + const violations = []; + + for (const threshold of thresholds) { + const row = rowsByName.get(threshold.operationName); + if (!row) continue; + + const maxDeltaPct = Number(threshold.maxDeltaPct); + if (!Number.isFinite(maxDeltaPct) || maxDeltaPct < 0) { + continue; + } + + const actualDeltaPct = getComparisonDelta(row, threshold.metric); + if (actualDeltaPct > maxDeltaPct) { + violations.push({ + name: row.name, + metric: threshold.metric, + maxDeltaPct, + actualDeltaPct, + }); + } + } + + return violations; +}; + +const formatDelta = (value) => { + const signed = value > 0 ? `+${value}` : `${value}`; + return `${signed}%`; +}; + +const escapeTableCell = (value) => + value.replace(/\|/g, "\\|").replace(/\r?\n/g, " "); + +export const fallbackRenderBenchmarkMarkdown = ( + report, + comparison = [], +) => { + const lines = []; + lines.push("## SQL Benchmark Report"); + lines.push(""); + lines.push(`- Project: \`${report.projectId}\``); + lines.push(`- Collection: \`${report.collectionAddress}\``); + lines.push(`- Generated: \`${report.generatedAt}\``); + lines.push(`- Warmup: \`${report.warmup}\``); + lines.push(`- Iterations: \`${report.iterations}\``); + lines.push(""); + lines.push("| Operation | p50 (ms) | p95 (ms) | mean (ms) |"); + lines.push("| --- | ---: | ---: | ---: |"); + for (const operation of report.operations) { + lines.push( + `| ${operation.name} | ${operation.stats.p50Ms} | ${operation.stats.p95Ms} | ${operation.stats.meanMs} |`, + ); + } + + if (report.failures && report.failures.length > 0) { + lines.push(""); + lines.push("### Failed Operations"); + lines.push(""); + lines.push("| Operation | Error |"); + lines.push("| --- | --- |"); + for (const failure of report.failures) { + lines.push( + `| ${escapeTableCell(failure.name)} | ${escapeTableCell( + failure.error, + )} |`, + ); + } + } + + if (comparison.length > 0) { + lines.push(""); + lines.push("### Base vs Head"); + lines.push(""); + lines.push( + "| Operation | base p50 | head p50 | delta p50 | base p95 | head p95 | delta p95 |", + ); + lines.push("| --- | ---: | ---: | ---: | ---: | ---: | ---: |"); + for (const row of comparison) { + lines.push( + `| ${row.name} | ${row.baseP50Ms} | ${row.headP50Ms} | ${formatDelta( + row.deltaP50Pct, + )} | ${row.baseP95Ms} | ${row.headP95Ms} | ${formatDelta( + row.deltaP95Pct, + )} |`, + ); + } + } + + return lines.join("\n"); +}; + +export const resolveBenchmarkHelpers = (marketplace) => { + const { createMarketplaceClient, fetchCollectionTraitMetadata, fetchTraitValues } = + marketplace ?? {}; + + if (!createMarketplaceClient) { + throw new Error("createMarketplaceClient export was not found"); + } + + const runBenchmarkOperation = + marketplace?.runBenchmarkOperation ?? fallbackRunBenchmarkOperation; + const compareBenchmarkReports = + marketplace?.compareBenchmarkReports ?? fallbackCompareBenchmarkReports; + const evaluateBenchmarkRegressions = + marketplace?.evaluateBenchmarkRegressions ?? + fallbackEvaluateBenchmarkRegressions; + const renderBenchmarkMarkdown = + marketplace?.renderBenchmarkMarkdown ?? fallbackRenderBenchmarkMarkdown; + + const usingFallbackHelpers = + !marketplace?.runBenchmarkOperation || + !marketplace?.compareBenchmarkReports || + !marketplace?.evaluateBenchmarkRegressions || + !marketplace?.renderBenchmarkMarkdown; + + return { + createMarketplaceClient, + fetchCollectionTraitMetadata, + fetchTraitValues, + runBenchmarkOperation, + compareBenchmarkReports, + evaluateBenchmarkRegressions, + renderBenchmarkMarkdown, + usingFallbackHelpers, + }; +}; diff --git a/packages/arcade-ts/scripts/sql-benchmark.mjs b/packages/arcade-ts/scripts/sql-benchmark.mjs new file mode 100644 index 00000000..7e0506bd --- /dev/null +++ b/packages/arcade-ts/scripts/sql-benchmark.mjs @@ -0,0 +1,458 @@ +#!/usr/bin/env node + +import fs from "node:fs/promises"; +import path from "node:path"; +import { performance } from "node:perf_hooks"; +import { pathToFileURL } from "node:url"; +import { constants } from "starknet"; +import { resolveBenchmarkHelpers } from "./sql-benchmark-helpers.mjs"; + +const DEFAULT_COLLECTION = + "0x046da8955829adf2bda310099a0063451923f02e648cf25a1203aac6335cf0e4"; +const DEFAULT_PROJECT_ID = "arcade-main"; +const DEFAULT_ATTRIBUTE_FILTERS_JSON = '{"beast id":["trait"]}'; + +const toPositiveInt = (value, fallback) => { + const parsed = Number.parseInt(String(value ?? ""), 10); + if (!Number.isFinite(parsed) || parsed <= 0) return fallback; + return parsed; +}; + +const toNonNegativeNumber = (value, fallback) => { + if (value == null || `${value}`.trim().length === 0) { + return fallback; + } + + const parsed = Number.parseFloat(String(value)); + if (!Number.isFinite(parsed) || parsed < 0) { + return fallback; + } + return parsed; +}; + +const toBoolean = (value, fallback = false) => { + if (value == null || `${value}`.trim().length === 0) { + return fallback; + } + + const normalized = `${value}`.trim().toLowerCase(); + return ( + normalized === "1" || + normalized === "true" || + normalized === "yes" || + normalized === "on" + ); +}; + +const parseAttributeFilters = (jsonInput) => { + if (!jsonInput || jsonInput.trim().length === 0) return undefined; + + let parsed; + try { + parsed = JSON.parse(jsonInput); + } catch (error) { + throw new Error( + `Invalid BENCH_ATTRIBUTE_FILTERS_JSON value: ${String(error)}`, + ); + } + + if (!parsed || typeof parsed !== "object" || Array.isArray(parsed)) { + throw new Error("BENCH_ATTRIBUTE_FILTERS_JSON must be a JSON object"); + } + + const normalized = {}; + for (const [trait, values] of Object.entries(parsed)) { + if (!Array.isArray(values)) continue; + const filtered = values + .map((value) => String(value)) + .filter((value) => value.length > 0); + if (filtered.length === 0) continue; + normalized[trait] = filtered; + } + + return Object.keys(normalized).length > 0 ? normalized : undefined; +}; + +const ensureDir = async (targetFile) => { + const dir = path.dirname(targetFile); + await fs.mkdir(dir, { recursive: true }); +}; + +const loadMarketplaceModule = async (distDir) => { + const target = path.resolve(distDir, "marketplace/index.mjs"); + const moduleUrl = pathToFileURL(target).href; + return import(moduleUrl); +}; + +const getDefaultDistDir = (cwd) => { + const packageDist = path.resolve(cwd, "packages/arcade-ts/dist"); + return packageDist; +}; + +const maybeReadBaseline = async (baselinePath) => { + if (!baselinePath) return undefined; + try { + const content = await fs.readFile(baselinePath, "utf-8"); + return JSON.parse(content); + } catch { + return undefined; + } +}; + +const main = async () => { + const cwd = process.cwd(); + const projectId = process.env.BENCH_PROJECT_ID || DEFAULT_PROJECT_ID; + const collectionAddress = + process.env.BENCH_COLLECTION_ADDRESS || DEFAULT_COLLECTION; + const warmup = toPositiveInt(process.env.BENCH_WARMUP, 3); + const iterations = toPositiveInt(process.env.BENCH_ITERATIONS, 10); + const operationTimeoutMs = toPositiveInt( + process.env.BENCH_OPERATION_TIMEOUT_MS, + 15000, + ); + const includeOptionalOps = toBoolean( + process.env.BENCH_INCLUDE_OPTIONAL_OPS, + false, + ); + const failOnOperationError = toBoolean( + process.env.BENCH_FAIL_ON_OPERATION_ERROR, + false, + ); + const failOnDeferredRegression = toBoolean( + process.env.BENCH_FAIL_ON_DEFERRED_REGRESSION, + false, + ); + const tokenLimit = toPositiveInt(process.env.BENCH_TOKEN_LIMIT, 100); + const orderLimit = toPositiveInt(process.env.BENCH_ORDER_LIMIT, 100); + const listingLimit = toPositiveInt(process.env.BENCH_LISTING_LIMIT, 100); + const deferredMaxP95DeltaPct = toNonNegativeNumber( + process.env.BENCH_DEFERRED_MAX_P95_DELTA_PCT, + 10, + ); + const eagerMaxP95DeltaPct = toNonNegativeNumber( + process.env.BENCH_EAGER_MAX_P95_DELTA_PCT, + 10, + ); + const distDir = + process.env.BENCH_ARCADE_DIST_DIR || getDefaultDistDir(cwd); + const outputFile = path.resolve( + process.env.BENCH_OUTPUT_FILE || ".artifacts/sql-benchmark/head.json", + ); + const markdownFile = path.resolve( + process.env.BENCH_MARKDOWN_FILE || ".artifacts/sql-benchmark/head.md", + ); + const baselineFile = process.env.BENCH_BASELINE_FILE + ? path.resolve(process.env.BENCH_BASELINE_FILE) + : undefined; + + const attributeFilters = parseAttributeFilters( + process.env.BENCH_ATTRIBUTE_FILTERS_JSON || DEFAULT_ATTRIBUTE_FILTERS_JSON, + ); + const traitName = process.env.BENCH_TRAIT_NAME || "beast id"; + + const marketplace = await loadMarketplaceModule(distDir); + + const { + createMarketplaceClient, + fetchCollectionTraitMetadata, + fetchTraitValues, + runBenchmarkOperation, + compareBenchmarkReports, + evaluateBenchmarkRegressions, + renderBenchmarkMarkdown, + usingFallbackHelpers, + } = resolveBenchmarkHelpers(marketplace); + + if (usingFallbackHelpers) { + console.warn( + "[benchmark] dist module missing benchmark helper exports, using script fallbacks", + ); + } + + const client = await createMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + defaultProject: projectId, + runtime: "edge", + }); + + const operationResults = []; + const operationFailures = []; + + const ensureTokenPageResult = async (resultPromise) => { + const result = await resultPromise; + const maybeError = result?.error?.error ?? result?.error; + if (maybeError) { + throw maybeError instanceof Error + ? maybeError + : new Error(String(maybeError)); + } + return result; + }; + + const ensurePageErrorsEmpty = async (resultPromise) => { + const result = await resultPromise; + const maybeError = result?.errors?.[0]?.error ?? result?.errors?.[0]; + if (maybeError) { + throw maybeError instanceof Error + ? maybeError + : new Error(String(maybeError)); + } + return result; + }; + + const runAndRecord = async (name, execute) => { + const outcome = await runBenchmarkOperation({ + name, + warmup, + iterations, + timeoutMs: operationTimeoutMs, + execute, + now: () => performance.now(), + }); + + if (outcome.result) { + operationResults.push(outcome.result); + return outcome.lastResult; + } + + if (outcome.failure) { + operationFailures.push(outcome.failure); + console.warn(`[benchmark] operation failed: ${name}`); + console.warn(`[benchmark] reason: ${outcome.failure.error}`); + if (failOnOperationError) { + throw new Error(`[${name}] ${outcome.failure.error}`); + } + } + + return undefined; + }; + + if (includeOptionalOps) { + await runAndRecord("getCollection", async () => + client.getCollection({ + projectId, + address: collectionAddress, + fetchImages: false, + }), + ); + } + + const listingModes = [ + { label: "eager", includeMetadata: true }, + { label: "deferred", includeMetadata: false }, + ]; + let metadataBatchTokenIds = []; + + for (const mode of listingModes) { + const firstPageResult = await runAndRecord( + `listCollectionTokens:first-page:${mode.label}`, + async () => + ensureTokenPageResult( + client.listCollectionTokens({ + address: collectionAddress, + project: projectId, + limit: tokenLimit, + includeMetadata: mode.includeMetadata, + fetchImages: false, + }), + ), + ); + + if (metadataBatchTokenIds.length === 0) { + metadataBatchTokenIds = (firstPageResult?.page?.tokens ?? []) + .map((token) => String(token?.token_id ?? "")) + .filter((tokenId) => tokenId.length > 0) + .slice(0, 100); + } + + const seedCursor = firstPageResult?.page?.nextCursor; + if (seedCursor) { + await runAndRecord(`listCollectionTokens:next-page:${mode.label}`, async () => + ensureTokenPageResult( + client.listCollectionTokens({ + address: collectionAddress, + project: projectId, + cursor: seedCursor, + limit: tokenLimit, + includeMetadata: mode.includeMetadata, + fetchImages: false, + }), + ), + ); + } + + if (attributeFilters) { + await runAndRecord( + `listCollectionTokens:attribute-filters:${mode.label}`, + async () => + ensureTokenPageResult( + client.listCollectionTokens({ + address: collectionAddress, + project: projectId, + limit: tokenLimit, + includeMetadata: mode.includeMetadata, + attributeFilters, + fetchImages: false, + }), + ), + ); + } + } + + await runAndRecord("getCollectionOrders", async () => + client.getCollectionOrders({ + collection: collectionAddress, + limit: orderLimit, + }), + ); + + await runAndRecord("listCollectionListings:verifyOwnership=false", async () => + client.listCollectionListings({ + collection: collectionAddress, + limit: listingLimit, + verifyOwnership: false, + projectId, + }), + ); + + if (includeOptionalOps) { + await runAndRecord("listCollectionListings:verifyOwnership=true", async () => + client.listCollectionListings({ + collection: collectionAddress, + limit: listingLimit, + verifyOwnership: true, + projectId, + }), + ); + } + + if (includeOptionalOps && fetchCollectionTraitMetadata) { + await runAndRecord("fetchCollectionTraitMetadata", async () => + ensurePageErrorsEmpty( + fetchCollectionTraitMetadata({ + address: collectionAddress, + projects: [projectId], + }), + ), + ); + } + + if (fetchTraitValues && traitName) { + await runAndRecord(`fetchTraitValues:${traitName}`, async () => + ensurePageErrorsEmpty( + fetchTraitValues({ + address: collectionAddress, + traitName, + projects: [projectId], + }), + ), + ); + } + + if ( + includeOptionalOps && + metadataBatchTokenIds.length >= 100 && + typeof client.getCollectionTokenMetadataBatch === "function" + ) { + await runAndRecord("getCollectionTokenMetadataBatch:100", async () => + client.getCollectionTokenMetadataBatch({ + address: collectionAddress, + project: projectId, + tokenIds: metadataBatchTokenIds.slice(0, 100), + fetchImages: false, + }), + ); + } + + if (operationResults.length === 0) { + throw new Error("No successful benchmark operations completed"); + } + + const report = { + generatedAt: new Date().toISOString(), + projectId, + collectionAddress, + warmup, + iterations, + operations: operationResults, + ...(operationFailures.length > 0 ? { failures: operationFailures } : {}), + }; + + const baseline = await maybeReadBaseline(baselineFile); + const comparison = + baseline && compareBenchmarkReports + ? compareBenchmarkReports(baseline, report) + : []; + + const coreThresholds = [ + { + operationName: "listCollectionTokens:first-page:deferred", + metric: "p95", + maxDeltaPct: deferredMaxP95DeltaPct, + }, + { + operationName: "listCollectionTokens:next-page:deferred", + metric: "p95", + maxDeltaPct: deferredMaxP95DeltaPct, + }, + ]; + const optionalThresholds = [ + { + operationName: "listCollectionTokens:first-page:eager", + metric: "p95", + maxDeltaPct: eagerMaxP95DeltaPct, + }, + { + operationName: "listCollectionTokens:next-page:eager", + metric: "p95", + maxDeltaPct: eagerMaxP95DeltaPct, + }, + ]; + + const coreViolations = + comparison.length > 0 + ? evaluateBenchmarkRegressions(comparison, coreThresholds) + : []; + const optionalViolations = + comparison.length > 0 + ? evaluateBenchmarkRegressions(comparison, optionalThresholds) + : []; + + for (const violation of coreViolations) { + console.warn( + `[benchmark] core regression ${violation.name} ${violation.metric}: +${violation.actualDeltaPct}% (threshold ${violation.maxDeltaPct}%)`, + ); + } + for (const violation of optionalViolations) { + console.warn( + `[benchmark] optional regression ${violation.name} ${violation.metric}: +${violation.actualDeltaPct}% (threshold ${violation.maxDeltaPct}%)`, + ); + } + + if (failOnDeferredRegression && coreViolations.length > 0) { + throw new Error( + `Deferred benchmark regression threshold exceeded (${coreViolations.length} operations)`, + ); + } + + const markdown = renderBenchmarkMarkdown(report, comparison); + + await ensureDir(outputFile); + await ensureDir(markdownFile); + await fs.writeFile(outputFile, JSON.stringify(report, null, 2), "utf-8"); + await fs.writeFile(markdownFile, markdown, "utf-8"); + + console.log(`Benchmark output written to ${outputFile}`); + console.log(`Benchmark markdown written to ${markdownFile}`); + console.log(markdown); +}; + +main() + .then(() => { + process.exit(0); + }) + .catch((error) => { + const message = error instanceof Error ? error.message : String(error); + console.error(`SQL benchmark failed: ${message}`); + process.exit(1); + }); diff --git a/packages/arcade-ts/src/marketplace/README.md b/packages/arcade-ts/src/marketplace/README.md index 1b315047..4a3c7e16 100644 --- a/packages/arcade-ts/src/marketplace/README.md +++ b/packages/arcade-ts/src/marketplace/README.md @@ -110,6 +110,7 @@ if (result.error) { | `cursor` | `string \| null` | No | Pagination cursor from a previous call. | | `attributeFilters` | `AttributeFilterInput` | No | Trait filters as `{ trait: value \| values[] }`. | | `tokenIds` | `string[]` | No | Return only specific token IDs. | +| `includeMetadata` | `boolean` | No | Include token metadata payloads (default `true`). | | `fetchImages` | `boolean` | No | Resolve token images. | | `project` | `string` | No | Override project for this request. | | `resolveTokenImage`| `ResolveTokenImage` | No | Per-call image resolver override. | @@ -117,6 +118,50 @@ if (result.error) { Returns `Promise` — `{ page, error }`. +For high-volume grids, prefer deferred metadata and hydrate selected tokens on demand: + +```ts +const deferredPage = await client.listCollectionTokens({ + address: "0x04f5...b15f", + limit: 100, + includeMetadata: false, + fetchImages: true, +}); + +const tokenIdsToHydrate = (deferredPage.page?.tokens ?? []) + .slice(0, 24) + .map((token) => String(token.token_id)); + +const hydrated = await client.getCollectionTokenMetadataBatch({ + address: "0x04f5...b15f", + tokenIds: tokenIdsToHydrate, + fetchImages: true, +}); +``` + +--- + +### `getCollectionTokenMetadataBatch` + +Hydrates metadata for a targeted set of token IDs in a collection. + +```ts +const tokens = await client.getCollectionTokenMetadataBatch({ + address: "0x04f5...b15f", + tokenIds: ["1", "2", "3"], + fetchImages: false, +}); +``` + +| Option | Type | Required | Description | +| ------------- | --------- | -------- | ------------------------------------ | +| `address` | `string` | Yes | Contract address of the collection. | +| `tokenIds` | `string[]`| Yes | Token IDs to hydrate metadata for. | +| `project` | `string` | No | Override the default Torii project. | +| `fetchImages` | `boolean` | No | Resolve token image URLs. | + +Returns `Promise`. + --- ### `getCollectionOrders` diff --git a/packages/arcade-ts/src/marketplace/benchmark.test.ts b/packages/arcade-ts/src/marketplace/benchmark.test.ts new file mode 100644 index 00000000..087645e3 --- /dev/null +++ b/packages/arcade-ts/src/marketplace/benchmark.test.ts @@ -0,0 +1,240 @@ +import { describe, expect, it } from "vitest"; +import { + compareBenchmarkReports, + computeBenchmarkStats, + evaluateBenchmarkRegressions, + renderBenchmarkMarkdown, + runBenchmarkOperation, + type BenchmarkReport, +} from "./benchmark"; + +describe("marketplace benchmark helpers", () => { + it("computes percentile stats from sample durations", () => { + const stats = computeBenchmarkStats([10, 20, 30, 40, 50]); + + expect(stats.count).toBe(5); + expect(stats.minMs).toBe(10); + expect(stats.maxMs).toBe(50); + expect(stats.meanMs).toBe(30); + expect(stats.p50Ms).toBe(30); + expect(stats.p95Ms).toBe(50); + }); + + it("compares base and head reports by operation name", () => { + const base: BenchmarkReport = { + generatedAt: "2026-01-01T00:00:00.000Z", + projectId: "arcade-main", + collectionAddress: "0x1", + warmup: 1, + iterations: 3, + operations: [ + { + name: "listCollectionTokens:first-page", + samplesMs: [20, 21, 22], + stats: computeBenchmarkStats([20, 21, 22]), + }, + ], + }; + + const head: BenchmarkReport = { + ...base, + generatedAt: "2026-01-02T00:00:00.000Z", + operations: [ + { + name: "listCollectionTokens:first-page", + samplesMs: [15, 16, 17], + stats: computeBenchmarkStats([15, 16, 17]), + }, + ], + }; + + const rows = compareBenchmarkReports(base, head); + expect(rows).toHaveLength(1); + expect(rows[0]?.name).toBe("listCollectionTokens:first-page"); + expect(rows[0]?.deltaP50Pct).toBeLessThan(0); + expect(rows[0]?.deltaP95Pct).toBeLessThan(0); + }); + + it("renders markdown summary for CI step output", () => { + const report: BenchmarkReport = { + generatedAt: "2026-01-02T00:00:00.000Z", + projectId: "arcade-main", + collectionAddress: "0x1", + warmup: 1, + iterations: 3, + operations: [ + { + name: "getCollection", + samplesMs: [5, 6, 7], + stats: computeBenchmarkStats([5, 6, 7]), + }, + ], + }; + + const markdown = renderBenchmarkMarkdown(report); + expect(markdown).toContain("SQL Benchmark Report"); + expect(markdown).toContain("getCollection"); + expect(markdown).toContain("| Operation |"); + }); + + it("runs a benchmark operation and records success samples", async () => { + let currentNow = 0; + const now = () => { + currentNow += 5; + return currentNow; + }; + + const outcome = await runBenchmarkOperation({ + name: "listCollectionTokens:first-page", + warmup: 1, + iterations: 3, + execute: async () => ({ ok: true }), + now, + }); + + expect(outcome.result?.name).toBe("listCollectionTokens:first-page"); + expect(outcome.result?.stats.count).toBe(3); + expect(outcome.result?.samplesMs).toEqual([5, 5, 5]); + expect(outcome.failure).toBeUndefined(); + }); + + it("marks operation as failed when execute throws", async () => { + const outcome = await runBenchmarkOperation({ + name: "getCollection", + warmup: 1, + iterations: 2, + execute: async () => { + throw new Error("HTTP error! status: 400"); + }, + }); + + expect(outcome.result).toBeUndefined(); + expect(outcome.failure?.name).toBe("getCollection"); + expect(outcome.failure?.error).toContain("400"); + }); + + it("marks operation as failed when timeout elapses", async () => { + const outcome = await runBenchmarkOperation({ + name: "fetchCollectionTraitMetadata", + warmup: 1, + iterations: 1, + timeoutMs: 10, + execute: async () => + new Promise((resolve) => { + setTimeout(resolve, 100); + }), + }); + + expect(outcome.result).toBeUndefined(); + expect(outcome.failure?.name).toBe("fetchCollectionTraitMetadata"); + expect(outcome.failure?.error).toContain("timed out"); + }); + + it("renders failed operations in markdown output", () => { + const report: BenchmarkReport = { + generatedAt: "2026-01-02T00:00:00.000Z", + projectId: "arcade-main", + collectionAddress: "0x1", + warmup: 1, + iterations: 1, + operations: [ + { + name: "listCollectionTokens:first-page", + samplesMs: [10], + stats: computeBenchmarkStats([10]), + }, + ], + failures: [ + { + name: "getCollection", + error: "HTTP error! status: 400", + }, + ], + }; + + const markdown = renderBenchmarkMarkdown(report); + expect(markdown).toContain("Failed Operations"); + expect(markdown).toContain("getCollection"); + expect(markdown).toContain("status: 400"); + }); + + it("flags deferred operation regressions above configured threshold", () => { + const base: BenchmarkReport = { + generatedAt: "2026-01-01T00:00:00.000Z", + projectId: "arcade-main", + collectionAddress: "0x1", + warmup: 1, + iterations: 3, + operations: [ + { + name: "listCollectionTokens:first-page:deferred", + samplesMs: [100, 110, 120], + stats: computeBenchmarkStats([100, 110, 120]), + }, + ], + }; + const head: BenchmarkReport = { + ...base, + operations: [ + { + name: "listCollectionTokens:first-page:deferred", + samplesMs: [140, 145, 150], + stats: computeBenchmarkStats([140, 145, 150]), + }, + ], + }; + + const rows = compareBenchmarkReports(base, head); + const regressions = evaluateBenchmarkRegressions(rows, [ + { + operationName: "listCollectionTokens:first-page:deferred", + metric: "p95", + maxDeltaPct: 10, + }, + ]); + + expect(regressions).toHaveLength(1); + expect(regressions[0]?.name).toBe( + "listCollectionTokens:first-page:deferred", + ); + expect(regressions[0]?.actualDeltaPct).toBeGreaterThan(10); + }); + + it("does not flag deferred operation when delta stays within threshold", () => { + const base: BenchmarkReport = { + generatedAt: "2026-01-01T00:00:00.000Z", + projectId: "arcade-main", + collectionAddress: "0x1", + warmup: 1, + iterations: 3, + operations: [ + { + name: "listCollectionTokens:next-page:deferred", + samplesMs: [100, 110, 120], + stats: computeBenchmarkStats([100, 110, 120]), + }, + ], + }; + const head: BenchmarkReport = { + ...base, + operations: [ + { + name: "listCollectionTokens:next-page:deferred", + samplesMs: [105, 112, 121], + stats: computeBenchmarkStats([105, 112, 121]), + }, + ], + }; + + const rows = compareBenchmarkReports(base, head); + const regressions = evaluateBenchmarkRegressions(rows, [ + { + operationName: "listCollectionTokens:next-page:deferred", + metric: "p95", + maxDeltaPct: 10, + }, + ]); + + expect(regressions).toEqual([]); + }); +}); diff --git a/packages/arcade-ts/src/marketplace/benchmark.ts b/packages/arcade-ts/src/marketplace/benchmark.ts new file mode 100644 index 00000000..4b583019 --- /dev/null +++ b/packages/arcade-ts/src/marketplace/benchmark.ts @@ -0,0 +1,306 @@ +export interface BenchmarkStats { + count: number; + minMs: number; + maxMs: number; + meanMs: number; + p50Ms: number; + p95Ms: number; +} + +export interface BenchmarkOperationResult { + name: string; + samplesMs: number[]; + stats: BenchmarkStats; + notes?: string; +} + +export interface BenchmarkOperationFailure { + name: string; + error: string; +} + +export interface BenchmarkReport { + generatedAt: string; + projectId: string; + collectionAddress: string; + warmup: number; + iterations: number; + operations: BenchmarkOperationResult[]; + failures?: BenchmarkOperationFailure[]; +} + +export interface BenchmarkComparisonRow { + name: string; + baseP50Ms: number; + headP50Ms: number; + deltaP50Pct: number; + baseP95Ms: number; + headP95Ms: number; + deltaP95Pct: number; +} + +export interface BenchmarkRegressionThreshold { + operationName: string; + metric: "p50" | "p95"; + maxDeltaPct: number; +} + +export interface BenchmarkRegressionViolation { + name: string; + metric: "p50" | "p95"; + maxDeltaPct: number; + actualDeltaPct: number; +} + +const round = (value: number, precision = 2): number => { + const factor = 10 ** precision; + return Math.round(value * factor) / factor; +}; + +const normalizeErrorMessage = (error: unknown): string => { + if (error instanceof Error) return error.message; + if (typeof error === "string") return error; + return String(error); +}; + +const percentile = (sorted: number[], p: number): number => { + if (sorted.length === 0) return 0; + if (sorted.length === 1) return sorted[0]; + const index = Math.ceil((p / 100) * sorted.length) - 1; + const boundedIndex = Math.min(sorted.length - 1, Math.max(0, index)); + return sorted[boundedIndex]; +}; + +export function computeBenchmarkStats(samplesMs: number[]): BenchmarkStats { + if (samplesMs.length === 0) { + throw new Error("Cannot compute benchmark stats with zero samples"); + } + + const sorted = [...samplesMs].sort((a, b) => a - b); + const total = sorted.reduce((sum, value) => sum + value, 0); + + return { + count: sorted.length, + minMs: round(sorted[0]), + maxMs: round(sorted[sorted.length - 1]), + meanMs: round(total / sorted.length), + p50Ms: round(percentile(sorted, 50)), + p95Ms: round(percentile(sorted, 95)), + }; +} + +const percentDelta = (base: number, head: number): number => { + if (base === 0) return 0; + return round(((head - base) / base) * 100); +}; + +export function compareBenchmarkReports( + base: BenchmarkReport, + head: BenchmarkReport, +): BenchmarkComparisonRow[] { + const baseByName = new Map( + base.operations.map((operation) => [operation.name, operation]), + ); + + const rows: BenchmarkComparisonRow[] = []; + for (const operation of head.operations) { + const baseOp = baseByName.get(operation.name); + if (!baseOp) continue; + + rows.push({ + name: operation.name, + baseP50Ms: baseOp.stats.p50Ms, + headP50Ms: operation.stats.p50Ms, + deltaP50Pct: percentDelta(baseOp.stats.p50Ms, operation.stats.p50Ms), + baseP95Ms: baseOp.stats.p95Ms, + headP95Ms: operation.stats.p95Ms, + deltaP95Pct: percentDelta(baseOp.stats.p95Ms, operation.stats.p95Ms), + }); + } + + return rows; +} + +const getComparisonDelta = ( + row: BenchmarkComparisonRow, + metric: "p50" | "p95", +): number => (metric === "p50" ? row.deltaP50Pct : row.deltaP95Pct); + +export function evaluateBenchmarkRegressions( + rows: BenchmarkComparisonRow[], + thresholds: BenchmarkRegressionThreshold[], +): BenchmarkRegressionViolation[] { + if (rows.length === 0 || thresholds.length === 0) { + return []; + } + + const rowsByName = new Map(rows.map((row) => [row.name, row])); + const violations: BenchmarkRegressionViolation[] = []; + + for (const threshold of thresholds) { + const row = rowsByName.get(threshold.operationName); + if (!row) continue; + + const maxDeltaPct = Number(threshold.maxDeltaPct); + if (!Number.isFinite(maxDeltaPct) || maxDeltaPct < 0) { + continue; + } + + const actualDeltaPct = getComparisonDelta(row, threshold.metric); + if (actualDeltaPct > maxDeltaPct) { + violations.push({ + name: row.name, + metric: threshold.metric, + maxDeltaPct, + actualDeltaPct, + }); + } + } + + return violations; +} + +const withTimeout = async ( + promise: Promise, + timeoutMs: number | undefined, +): Promise => { + if (!timeoutMs || timeoutMs <= 0) { + return promise; + } + + return new Promise((resolve, reject) => { + const timer = setTimeout(() => { + reject(new Error(`Operation timed out after ${timeoutMs}ms`)); + }, timeoutMs); + + promise + .then((value) => resolve(value)) + .catch((error) => reject(error)) + .finally(() => clearTimeout(timer)); + }); +}; + +export interface RunBenchmarkOperationOptions { + name: string; + warmup: number; + iterations: number; + timeoutMs?: number; + execute: () => Promise; + now?: () => number; +} + +export interface RunBenchmarkOperationResult { + result?: BenchmarkOperationResult; + failure?: BenchmarkOperationFailure; + lastResult?: unknown; +} + +export async function runBenchmarkOperation( + options: RunBenchmarkOperationOptions, +): Promise { + const { + name, + warmup, + iterations, + timeoutMs, + execute, + now = () => Date.now(), + } = options; + + try { + let lastResult: unknown; + + for (let i = 0; i < warmup; i += 1) { + lastResult = await withTimeout(execute(), timeoutMs); + } + + const samplesMs: number[] = []; + for (let i = 0; i < iterations; i += 1) { + const startedAt = now(); + lastResult = await withTimeout(execute(), timeoutMs); + samplesMs.push(round(now() - startedAt)); + } + + return { + result: { + name, + samplesMs, + stats: computeBenchmarkStats(samplesMs), + }, + lastResult, + }; + } catch (error) { + return { + failure: { + name, + error: normalizeErrorMessage(error), + }, + }; + } +} + +const formatDelta = (value: number): string => { + const signed = value > 0 ? `+${value}` : `${value}`; + return `${signed}%`; +}; + +const escapeTableCell = (value: string): string => + value.replace(/\|/g, "\\|").replace(/\r?\n/g, " "); + +export function renderBenchmarkMarkdown( + report: BenchmarkReport, + comparison: BenchmarkComparisonRow[] = [], +): string { + const lines: string[] = []; + lines.push("## SQL Benchmark Report"); + lines.push(""); + lines.push(`- Project: \`${report.projectId}\``); + lines.push(`- Collection: \`${report.collectionAddress}\``); + lines.push(`- Generated: \`${report.generatedAt}\``); + lines.push(`- Warmup: \`${report.warmup}\``); + lines.push(`- Iterations: \`${report.iterations}\``); + lines.push(""); + lines.push("| Operation | p50 (ms) | p95 (ms) | mean (ms) |"); + lines.push("| --- | ---: | ---: | ---: |"); + for (const operation of report.operations) { + lines.push( + `| ${operation.name} | ${operation.stats.p50Ms} | ${operation.stats.p95Ms} | ${operation.stats.meanMs} |`, + ); + } + + if (report.failures && report.failures.length > 0) { + lines.push(""); + lines.push("### Failed Operations"); + lines.push(""); + lines.push("| Operation | Error |"); + lines.push("| --- | --- |"); + for (const failure of report.failures) { + lines.push( + `| ${escapeTableCell(failure.name)} | ${escapeTableCell( + failure.error, + )} |`, + ); + } + } + + if (comparison.length > 0) { + lines.push(""); + lines.push("### Base vs Head"); + lines.push(""); + lines.push( + "| Operation | base p50 | head p50 | delta p50 | base p95 | head p95 | delta p95 |", + ); + lines.push("| --- | ---: | ---: | ---: | ---: | ---: | ---: |"); + for (const row of comparison) { + lines.push( + `| ${row.name} | ${row.baseP50Ms} | ${row.headP50Ms} | ${formatDelta( + row.deltaP50Pct, + )} | ${row.baseP95Ms} | ${row.headP95Ms} | ${formatDelta( + row.deltaP95Pct, + )} |`, + ); + } + } + + return lines.join("\n"); +} diff --git a/packages/arcade-ts/src/marketplace/client.dojo.ts b/packages/arcade-ts/src/marketplace/client.dojo.ts index f0610160..d0fb2900 100644 --- a/packages/arcade-ts/src/marketplace/client.dojo.ts +++ b/packages/arcade-ts/src/marketplace/client.dojo.ts @@ -28,9 +28,11 @@ import { defaultResolveContractImage, defaultResolveTokenImage, inferImageFromMetadata, + normalizeTokenIds, parseJsonSafe, } from "./utils"; import type { + CollectionTokenMetadataBatchOptions, CollectionListingsOptions, CollectionOrdersOptions, CollectionSummaryOptions, @@ -38,6 +40,7 @@ import type { MarketplaceClientConfig, MarketplaceFees, NormalizedCollection, + NormalizedToken, RoyaltyFee, RoyaltyFeeOptions, TokenDetails, @@ -310,6 +313,45 @@ export async function createDojoMarketplaceClient( return queryOrders(options); }; + const getCollectionTokenMetadataBatch = async ( + options: CollectionTokenMetadataBatchOptions, + ): Promise => { + const projectId = ensureProjectId(options.project, defaultProject); + const normalizedTokenIds = [ + ...new Set(normalizeTokenIds(options.tokenIds)), + ]; + if (normalizedTokenIds.length === 0) { + return []; + } + + const { page, error } = await fetchCollectionTokens({ + address: options.address, + project: projectId, + tokenIds: normalizedTokenIds, + limit: normalizedTokenIds.length, + includeMetadata: true, + fetchImages: options.fetchImages ?? false, + resolveTokenImage: resolveTokenImage ?? defaultResolveTokenImage, + defaultProjectId: defaultProject, + }); + if (error) { + throw error.error; + } + + const tokensById = new Map(); + for (const token of page?.tokens ?? []) { + const normalizedId = normalizeTokenIdForQuery(String(token.token_id)); + if (!normalizedId) continue; + if (!tokensById.has(normalizedId)) { + tokensById.set(normalizedId, token); + } + } + + return normalizedTokenIds + .map((tokenId) => tokensById.get(tokenId)) + .filter((token): token is NormalizedToken => Boolean(token)); + }; + const listCollectionListings = async ( options: CollectionListingsOptions, ): Promise => { @@ -513,6 +555,7 @@ export async function createDojoMarketplaceClient( resolveTokenImage: resolveTokenImage ?? defaultResolveTokenImage, defaultProjectId: defaultProject, }), + getCollectionTokenMetadataBatch, getCollectionOrders, listCollectionListings, getToken, diff --git a/packages/arcade-ts/src/marketplace/client.edge.test.ts b/packages/arcade-ts/src/marketplace/client.edge.test.ts index 09fa69a4..30b80539 100644 --- a/packages/arcade-ts/src/marketplace/client.edge.test.ts +++ b/packages/arcade-ts/src/marketplace/client.edge.test.ts @@ -36,6 +36,74 @@ describe("createEdgeMarketplaceClient", () => { ); }); + it("falls back to token sample lookup when contract metadata is missing", async () => { + mockedFetchToriisSql + .mockResolvedValueOnce({ + data: [ + { + endpoint: "arcade-main", + data: [ + { + contract_address: "0xabc", + contract_type: "ERC721", + metadata: null, + total_supply: "0x2", + token_id: null, + }, + ], + }, + ], + errors: [], + } as any) + .mockResolvedValueOnce({ + data: [ + { + endpoint: "arcade-main", + data: [ + { + token_id: "0x2", + metadata: JSON.stringify({ name: "Fallback metadata" }), + }, + ], + }, + ], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const collection = await client.getCollection({ + address: "0xabc", + fetchImages: false, + }); + + expect(collection).not.toBeNull(); + expect(collection?.tokenIdSample).toBe("0x2"); + expect(collection?.metadata).toMatchObject({ name: "Fallback metadata" }); + expect(mockedFetchToriisSql).toHaveBeenCalledTimes(2); + expect(mockedFetchToriisSql.mock.calls[1]?.[1]).toContain("FROM tokens"); + }); + + it("returns null instead of throwing when getCollection SQL query fails", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [], + errors: [new Error("HTTP error! status: 400")], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await expect( + client.getCollection({ + address: "0xabc", + fetchImages: false, + }), + ).resolves.toBeNull(); + }); + it("lists tokens through SQL transport", async () => { mockedFetchToriisSql.mockResolvedValueOnce({ data: [ @@ -70,4 +138,434 @@ describe("createEdgeMarketplaceClient", () => { expect.stringContaining("FROM tokens"), ); }); + + it("includes metadata projection by default when listing tokens", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.listCollectionTokens({ + address: "0xabc", + fetchImages: false, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("SELECT contract_address, token_id, metadata"); + }); + + it("omits metadata projection when includeMetadata is false", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [ + { + endpoint: "arcade-main", + data: [{ contract_address: "0xabc", token_id: "0x1" }], + }, + ], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const result = await client.listCollectionTokens({ + address: "0xabc", + includeMetadata: false, + fetchImages: true, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain( + "SELECT contract_address, token_id, name, symbol, decimals", + ); + expect(sql).not.toContain("metadata"); + expect(result.error).toBeNull(); + expect(result.page?.tokens[0]?.image).toContain("/torii/static/"); + }); + + it("normalizes tokenIds before building SQL IN clause", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.listCollectionTokens({ + address: "0xabc", + tokenIds: ["0x1"], + fetchImages: false, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("token_id IN ('1')"); + }); + + it("canonicalizes equivalent decimal and hex tokenIds and deduplicates them", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.listCollectionTokens({ + address: "0xabc", + tokenIds: ["ff", "0xff", "255"], + fetchImages: false, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("'255'"); + expect(sql).not.toContain("'0xff'"); + expect(sql).not.toContain("'ff'"); + expect((sql.match(/'255'/g) ?? []).length).toBe(1); + }); + + it("chunks large tokenId filters instead of building one unbounded IN list", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.listCollectionTokens({ + address: "0xabc", + tokenIds: Array.from({ length: 450 }, (_, index) => String(index + 1)), + fetchImages: false, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + const inClauseCount = (sql.match(/token_id IN \(/g) ?? []).length; + expect(inClauseCount).toBeGreaterThan(1); + expect(sql).toContain(" OR token_id IN ("); + }); + + it("hydrates metadata for normalized token ids through batch API", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [ + { + endpoint: "arcade-main", + data: [ + { + contract_address: "0xabc", + token_id: "255", + metadata: JSON.stringify({ name: "Token 255" }), + }, + ], + }, + ], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const tokens = await client.getCollectionTokenMetadataBatch({ + address: "0xabc", + tokenIds: ["ff", "0xff", "255"], + fetchImages: false, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("FROM tokens"); + expect(sql).toContain("'255'"); + expect((sql.match(/'255'/g) ?? []).length).toBe(1); + expect(tokens).toHaveLength(1); + expect(tokens[0]?.metadata?.name).toBe("Token 255"); + }); + + it("returns empty metadata batch and avoids SQL when token ids are invalid", async () => { + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const tokens = await client.getCollectionTokenMetadataBatch({ + address: "0xabc", + tokenIds: ["", " "], + fetchImages: false, + }); + + expect(tokens).toEqual([]); + expect(mockedFetchToriisSql).not.toHaveBeenCalled(); + }); + + it("chunks metadata hydration for large token id sets", async () => { + mockedFetchToriisSql.mockResolvedValue({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.getCollectionTokenMetadataBatch({ + address: "0xabc", + tokenIds: Array.from({ length: 450 }, (_, index) => String(index + 1)), + fetchImages: false, + }); + + expect(mockedFetchToriisSql.mock.calls.length).toBeGreaterThan(1); + }); + + it("returns null nextCursor when limit is invalid and no rows are returned", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const result = await client.listCollectionTokens({ + address: "0xabc", + limit: 0, + fetchImages: false, + }); + + expect(result.error).toBeNull(); + expect(result.page?.nextCursor).toBeNull(); + }); + + it("uses keyset pagination for token queries and emits keyset cursor", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [ + { + endpoint: "arcade-main", + data: [ + { + contract_address: "0xabc", + token_id: "2", + metadata: JSON.stringify({ name: "Token 2" }), + }, + ], + }, + ], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const result = await client.listCollectionTokens({ + address: "0xabc", + limit: 1, + fetchImages: false, + }); + + expect(result.error).toBeNull(); + expect(result.page?.nextCursor).toBe("keyset:2"); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).not.toContain("OFFSET"); + }); + + it("applies keyset cursor to token query predicate", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.listCollectionTokens({ + address: "0xabc", + cursor: "keyset:9", + limit: 25, + fetchImages: false, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("token_id > '9'"); + expect(sql).not.toContain("OFFSET"); + }); + + it("pushes attribute filters into SQL instead of filtering client-side", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.listCollectionTokens({ + address: "0xabc", + attributeFilters: { + rarity: new Set(["legendary", "epic"]), + class: new Set(["wizard"]), + }, + fetchImages: false, + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("FROM token_attributes"); + expect(sql).toContain("HAVING COUNT(DISTINCT trait_name) = 2"); + expect(sql).toContain("trait_name = 'rarity'"); + expect(sql).toContain("trait_name = 'class'"); + }); + + it("applies default order limit when getCollectionOrders limit is omitted", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + await client.getCollectionOrders({ + collection: "0xabc", + }); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("ORDER BY id DESC LIMIT 100"); + }); + + it("short-circuits invalid orderIds without issuing malformed SQL", async () => { + mockedFetchToriisSql.mockResolvedValue({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const orders = await client.getCollectionOrders({ + collection: "0xabc", + orderIds: [Number.NaN as unknown as number], + }); + + expect(orders).toEqual([]); + expect(mockedFetchToriisSql).not.toHaveBeenCalled(); + }); + + it("chunks ownership verification queries for large listing sets", async () => { + const orderRows = Array.from({ length: 450 }, (_, index) => ({ + id: index + 1, + category: 2, + status: 1, + expiration: 9999999999, + collection: "0xabc", + token_id: index + 1, + quantity: 1, + price: 1, + currency: "0x1", + owner: "0x123", + })); + + mockedFetchToriisSql.mockImplementation(async (_projects, sql) => { + if (sql.includes('FROM "ARCADE-Order"')) { + return { + data: [{ endpoint: "arcade-main", data: orderRows }], + errors: [], + } as any; + } + + if (sql.includes("FROM token_balances")) { + return { + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any; + } + + return { + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + } as any; + }); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const listings = await client.listCollectionListings({ + collection: "0xabc", + limit: 450, + verifyOwnership: true, + projectId: "arcade-main", + }); + + expect(listings).toEqual([]); + + const ownershipQueries = mockedFetchToriisSql.mock.calls + .map((call) => call[1]) + .filter((sql) => sql.includes("FROM token_balances")); + + expect(ownershipQueries.length).toBeGreaterThan(1); + }); + + it("relies on SQL category and status filtering for verifyOwnership=false listings", async () => { + mockedFetchToriisSql.mockResolvedValueOnce({ + data: [ + { + endpoint: "arcade-main", + data: [ + { + id: 1, + category: 2, + status: 1, + expiration: 9999999999, + collection: "0xabc", + token_id: 1, + quantity: 1, + price: 1, + currency: "0x1", + owner: "0x123", + }, + { + id: 2, + category: 1, + status: 2, + expiration: 9999999999, + collection: "0xabc", + token_id: 2, + quantity: 1, + price: 1, + currency: "0x1", + owner: "0x123", + }, + ], + }, + ], + errors: [], + } as any); + + const client = await createEdgeMarketplaceClient({ + chainId: constants.StarknetChainId.SN_MAIN, + }); + + const listings = await client.listCollectionListings({ + collection: "0xabc", + verifyOwnership: false, + projectId: "arcade-main", + }); + + expect(listings).toHaveLength(2); + + const sql = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(sql).toContain("category = 2"); + expect(sql).toContain("status = 1"); + }); }); diff --git a/packages/arcade-ts/src/marketplace/client.edge.ts b/packages/arcade-ts/src/marketplace/client.edge.ts index 980286fa..72aa89f1 100644 --- a/packages/arcade-ts/src/marketplace/client.edge.ts +++ b/packages/arcade-ts/src/marketplace/client.edge.ts @@ -3,6 +3,7 @@ import { fetchToriisSql } from "../modules/torii-sql-fetcher"; import { CategoryType, StatusType } from "../classes"; import { OrderModel } from "../modules/marketplace/order"; import type { + CollectionTokenMetadataBatchOptions, CollectionListingsOptions, CollectionOrdersOptions, CollectionSummaryOptions, @@ -19,14 +20,17 @@ import type { TokenDetailsOptions, } from "./types"; import { + canonicalizeTokenId, defaultResolveContractImage, defaultResolveTokenImage, inferImageFromMetadata, + normalizeTokenIds, normalizeTokens, parseJsonSafe, } from "./utils"; const DEFAULT_LIMIT = 100; +const SQL_IN_CHUNK_SIZE = 200; const statusValueMap: Record = { [StatusType.None]: 0, @@ -61,11 +65,7 @@ const asBigInt = (value: unknown): bigint => { const normalizeTokenIdForQuery = (tokenId?: string): string | undefined => { if (!tokenId) return undefined; - try { - return BigInt(tokenId).toString(); - } catch (_error) { - return tokenId; - } + return canonicalizeTokenId(tokenId) ?? undefined; }; const ensureProjectId = ( @@ -90,6 +90,115 @@ const extractRows = (data: any): any[] => { const toSqlList = (values: string[]): string => values.map((value) => `'${escapeSqlValue(value)}'`).join(", "); +const toPositiveInt = (value: number, fallback: number): number => { + if (!Number.isFinite(value)) return fallback; + const intValue = Math.floor(value); + if (intValue <= 0) return fallback; + return intValue; +}; + +const chunkArray = (values: T[], size: number): T[][] => { + if (values.length === 0) return []; + if (size <= 0 || values.length <= size) return [values]; + + const chunks: T[][] = []; + for (let index = 0; index < values.length; index += size) { + chunks.push(values.slice(index, index + size)); + } + return chunks; +}; + +const buildChunkedInPredicate = ( + column: string, + values: string[], + chunkSize = SQL_IN_CHUNK_SIZE, +): string => { + const chunks = chunkArray(values, chunkSize); + if (chunks.length === 0) return "1 = 0"; + if (chunks.length === 1) return `${column} IN (${toSqlList(chunks[0])})`; + return `(${chunks + .map((chunk) => `${column} IN (${toSqlList(chunk)})`) + .join(" OR ")})`; +}; + +const buildTokenProjectionSql = (includeMetadata: boolean): string => + includeMetadata + ? "contract_address, token_id, metadata, name, symbol, decimals" + : "contract_address, token_id, name, symbol, decimals"; + +const KEYSET_CURSOR_PREFIX = "keyset:"; + +const parseTokenCursor = ( + cursor?: string | null | undefined, +): { offset?: number; keysetTokenId?: string } => { + if (!cursor) return {}; + if (cursor.startsWith(KEYSET_CURSOR_PREFIX)) { + const tokenId = cursor.slice(KEYSET_CURSOR_PREFIX.length); + if (!tokenId) return {}; + return { keysetTokenId: tokenId }; + } + + const numericCursor = Number.parseInt(cursor, 10); + if (Number.isFinite(numericCursor) && `${numericCursor}` === cursor.trim()) { + return { offset: Math.max(0, numericCursor) }; + } + + return { keysetTokenId: cursor }; +}; + +const encodeKeysetCursor = (tokenId: string): string => + `${KEYSET_CURSOR_PREFIX}${tokenId}`; + +const buildAttributeFilterSqlClause = ( + collectionAddress: string, + filters: FetchCollectionTokensOptions["attributeFilters"], +): string | null => { + if (!filters || Object.keys(filters).length === 0) return null; + + const traitClauses: string[] = []; + const distinctTraits = new Set(); + + for (const [trait, values] of Object.entries(filters)) { + if (values == null) continue; + const selectedValues = Array.isArray(values) + ? values + : Array.from(values as Iterable); + const normalizedValues = selectedValues + .map((value) => String(value)) + .filter((value) => value.length > 0); + if (normalizedValues.length === 0) continue; + + distinctTraits.add(trait); + + const traitName = escapeSqlValue(trait); + if (normalizedValues.length === 1) { + traitClauses.push( + `(trait_name = '${traitName}' AND trait_value = '${escapeSqlValue( + normalizedValues[0], + )}')`, + ); + continue; + } + + traitClauses.push( + `(trait_name = '${traitName}' AND trait_value IN (${toSqlList( + normalizedValues, + )}))`, + ); + } + + if (traitClauses.length === 0) return null; + + return `token_id IN ( + SELECT token_id + FROM token_attributes + WHERE token_id LIKE '${escapeSqlValue(collectionAddress)}:%' + AND (${traitClauses.join(" OR ")}) + GROUP BY token_id + HAVING COUNT(DISTINCT trait_name) = ${distinctTraits.size} +)`; +}; + async function querySql(projectId: string, sql: string): Promise { const result = await fetchToriisSql([projectId], sql); if (result.errors?.length) { @@ -103,39 +212,6 @@ async function querySql(projectId: string, sql: string): Promise { return rows; } -const tokenMatchesAttributeFilters = ( - token: { metadata?: any }, - filters: FetchCollectionTokensOptions["attributeFilters"], -): boolean => { - if (!filters || Object.keys(filters).length === 0) return true; - const metadata = parseJsonSafe(token.metadata, token.metadata); - const attributes = Array.isArray((metadata as any)?.attributes) - ? (metadata as any).attributes - : []; - if (!attributes.length) return false; - - const traitMap = new Map>(); - for (const attribute of attributes) { - const trait = attribute?.trait_type; - const value = attribute?.value; - if (trait == null || value == null) continue; - const traitName = String(trait); - if (!traitMap.has(traitName)) traitMap.set(traitName, new Set()); - traitMap.get(traitName)?.add(String(value)); - } - - for (const [trait, values] of Object.entries(filters)) { - if (values == null) continue; - const selected = Array.isArray(values) ? values : Array.from(values as any); - const available = traitMap.get(trait); - if (!available) return false; - const matched = selected.some((value) => available.has(String(value))); - if (!matched) return false; - } - - return true; -}; - function toOrderModel(row: any): OrderModel { const orderLike = { id: asNumber(row.id), @@ -165,7 +241,9 @@ async function verifyListingsOwnership( ): Promise { if (!listings.length) return listings; - const collection = addAddressPadding(getChecksumAddress(collectionAddress)); + const collection = addAddressPadding( + getChecksumAddress(collectionAddress), + ).toLowerCase(); const owners = [ ...new Set(listings.map((order) => getChecksumAddress(order.owner))), ]; @@ -175,25 +253,31 @@ async function verifyListingsOwnership( ...new Set(listings.map((order) => BigInt(order.tokenId).toString())), ]; if (tokenIds.length === 0) return []; + const ownership = new Set(); - const ownerList = toSqlList(owners.map((owner) => owner.toLowerCase())); - const tokenIdList = toSqlList(tokenIds); + const ownerChunks = chunkArray( + owners.map((owner) => owner.toLowerCase()), + SQL_IN_CHUNK_SIZE, + ); + const tokenIdChunks = chunkArray(tokenIds, SQL_IN_CHUNK_SIZE); - const sql = `SELECT account_address, token_id, balance + for (const ownerChunk of ownerChunks) { + for (const tokenIdChunk of tokenIdChunks) { + const sql = `SELECT account_address, token_id FROM token_balances -WHERE lower(contract_address) = lower('${escapeSqlValue(collection)}') - AND lower(account_address) IN (${ownerList}) - AND token_id IN (${tokenIdList})`; +WHERE contract_address = '${escapeSqlValue(collection)}' + AND account_address IN (${toSqlList(ownerChunk)}) + AND token_id IN (${toSqlList(tokenIdChunk)}) + AND balance != '0x0000000000000000000000000000000000000000000000000000000000000000'`; - const rows = await querySql(projectId, sql); - const ownership = new Set(); + const rows = await querySql(projectId, sql); - for (const row of rows) { - const balance = asBigInt(row.balance ?? 0); - if (balance <= 0n) continue; - const owner = getChecksumAddress(String(row.account_address)); - const tokenId = BigInt(String(row.token_id)).toString(); - ownership.add(`${owner}_${tokenId}`); + for (const row of rows) { + const owner = getChecksumAddress(String(row.account_address)); + const tokenId = BigInt(String(row.token_id)).toString(); + ownership.add(`${owner}_${tokenId}`); + } + } } return listings.filter((order) => { @@ -218,64 +302,86 @@ export async function createEdgeMarketplaceClient( ): Promise => { const { projectId: projectIdInput, address, fetchImages = true } = options; const projectId = ensureProjectId(projectIdInput, defaultProject); - const collection = addAddressPadding(getChecksumAddress(address)); + const collection = addAddressPadding( + getChecksumAddress(address), + ).toLowerCase(); - const rows = await querySql( - projectId, - `SELECT contract_address, contract_type, type, metadata, total_supply, token_id + try { + const rows = await querySql( + projectId, + `SELECT + contract_address, + contract_type, + type, + metadata, + total_supply, + token_id FROM token_contracts -WHERE lower(contract_address) = lower('${escapeSqlValue(collection)}') +WHERE contract_address = '${escapeSqlValue(collection)}' LIMIT 1`, - ); + ); - const contract = rows[0]; - if (!contract) return null; + const contract = rows[0]; + if (!contract) return null; - let tokenSample: any | undefined; - let metadataRaw = contract.metadata; - if (!metadataRaw) { - const tokenRows = await querySql( - projectId, - `SELECT token_id, metadata + let tokenSample: { token_id?: string; metadata?: unknown } | undefined; + const requiresTokenFallback = + contract.metadata == null || contract.token_id == null; + + if (requiresTokenFallback) { + try { + const tokenRows = await querySql( + projectId, + `SELECT token_id, metadata FROM tokens -WHERE lower(contract_address) = lower('${escapeSqlValue(collection)}') +WHERE contract_address = '${escapeSqlValue(collection)}' +ORDER BY token_id LIMIT 1`, - ); - tokenSample = tokenRows[0]; - if (tokenSample?.metadata) metadataRaw = tokenSample.metadata; - } + ); + tokenSample = tokenRows[0]; + } catch (_error) { + tokenSample = undefined; + } + } - const metadata = parseJsonSafe(metadataRaw, metadataRaw); - let image: string | undefined; + const metadata = parseJsonSafe( + contract.metadata ?? tokenSample?.metadata, + contract.metadata ?? tokenSample?.metadata ?? null, + ); + let image: string | undefined; + + if (fetchImages) { + const contractImageResolver = + resolveContractImage ?? defaultResolveContractImage; + const maybeImage = await contractImageResolver(contract as any, { + projectId, + }); + if (typeof maybeImage === "string" && maybeImage.length > 0) { + image = maybeImage; + } + if (!image) image = inferImageFromMetadata(metadata); + } - if (fetchImages) { - const contractImageResolver = - resolveContractImage ?? defaultResolveContractImage; - const maybeImage = await contractImageResolver(contract as any, { + return { projectId, - }); - if (typeof maybeImage === "string" && maybeImage.length > 0) { - image = maybeImage; - } - if (!image) image = inferImageFromMetadata(metadata); + address: getChecksumAddress( + String(contract.contract_address ?? collection), + ), + contractType: + String(contract.contract_type ?? contract.type ?? "ERC721") || + "ERC721", + metadata, + totalSupply: asBigInt(contract.total_supply ?? "0x0"), + tokenIdSample: + (contract.token_id as string | null | undefined) ?? + (tokenSample?.token_id as string | null | undefined) ?? + null, + image, + raw: contract as any, + }; + } catch (_error) { + return null; } - - return { - projectId, - address: getChecksumAddress( - String(contract.contract_address ?? collection), - ), - contractType: - String(contract.contract_type ?? contract.type ?? "ERC721") || "ERC721", - metadata, - totalSupply: asBigInt(contract.total_supply ?? "0x0"), - tokenIdSample: - (contract.token_id as string | null | undefined) ?? - (tokenSample?.token_id as string | null | undefined) ?? - null, - image, - raw: contract as any, - }; }; const listCollectionTokens = async ( @@ -288,31 +394,49 @@ LIMIT 1`, attributeFilters, tokenIds, limit = DEFAULT_LIMIT, + includeMetadata = true, fetchImages = false, } = options; const projectId = ensureProjectId(project, defaultProject); - const collection = addAddressPadding(getChecksumAddress(address)); - const offset = cursor ? Number.parseInt(cursor, 10) || 0 : 0; + const collection = addAddressPadding( + getChecksumAddress(address), + ).toLowerCase(); + const cursorState = parseTokenCursor(cursor); + const effectiveLimit = toPositiveInt(limit, DEFAULT_LIMIT); + const normalizedTokenIds = normalizeTokenIds(tokenIds); - const conditions = [ - `lower(contract_address) = lower('${escapeSqlValue(collection)}')`, - ]; + const conditions = [`contract_address = '${escapeSqlValue(collection)}'`]; + + if (normalizedTokenIds.length > 0) { + const values = [...new Set(normalizedTokenIds)]; + conditions.push(buildChunkedInPredicate("token_id", values)); + } - if (tokenIds && tokenIds.length > 0) { - const values = [ - ...new Set(tokenIds.map((value) => escapeSqlValue(value))), - ]; + if (cursorState.keysetTokenId) { conditions.push( - `token_id IN (${values.map((v) => `'${v}'`).join(", ")})`, + `token_id > '${escapeSqlValue(cursorState.keysetTokenId)}'`, ); } - const sql = `SELECT contract_address, token_id, metadata, name, symbol, decimals + const traitClause = buildAttributeFilterSqlClause( + collection, + attributeFilters, + ); + if (traitClause) { + conditions.push(traitClause); + } + + const projection = buildTokenProjectionSql(includeMetadata); + const sql = `SELECT ${projection} FROM tokens WHERE ${conditions.join(" AND ")} ORDER BY token_id -LIMIT ${Math.max(1, Math.floor(limit))} -OFFSET ${Math.max(0, offset)}`; +LIMIT ${effectiveLimit}${ + cursorState.offset != null + ? ` +OFFSET ${cursorState.offset}` + : "" + }`; try { const rows = await querySql(projectId, sql); @@ -321,15 +445,21 @@ OFFSET ${Math.max(0, offset)}`; resolveTokenImage: resolveTokenImage ?? defaultResolveTokenImage, }); - const filtered = normalized.filter((token) => - tokenMatchesAttributeFilters(token, attributeFilters), - ) as NormalizedToken[]; - - const nextCursor = - rows.length >= limit ? String(offset + rows.length) : null; + let nextCursor: string | null = null; + if (rows.length >= effectiveLimit) { + if (cursorState.offset != null) { + nextCursor = String(cursorState.offset + rows.length); + } else { + const lastRow = rows[rows.length - 1]; + const lastTokenId = lastRow?.token_id; + if (lastTokenId != null) { + nextCursor = encodeKeysetCursor(String(lastTokenId)); + } + } + } return { page: { - tokens: filtered, + tokens: normalized as NormalizedToken[], nextCursor, }, error: null, @@ -345,28 +475,92 @@ OFFSET ${Math.max(0, offset)}`; } }; + const getCollectionTokenMetadataBatch = async ( + options: CollectionTokenMetadataBatchOptions, + ): Promise => { + const { address, tokenIds, project, fetchImages = false } = options; + const projectId = ensureProjectId(project, defaultProject); + const collection = addAddressPadding( + getChecksumAddress(address), + ).toLowerCase(); + const normalizedTokenIds = [...new Set(normalizeTokenIds(tokenIds))]; + + if (normalizedTokenIds.length === 0) { + return []; + } + + const tokenIdChunks = chunkArray(normalizedTokenIds, SQL_IN_CHUNK_SIZE); + const hydratedRows: any[] = []; + + for (const tokenIdChunk of tokenIdChunks) { + if (tokenIdChunk.length === 0) continue; + + const sql = `SELECT ${buildTokenProjectionSql(true)} +FROM tokens +WHERE contract_address = '${escapeSqlValue(collection)}' + AND token_id IN (${toSqlList(tokenIdChunk)}) +ORDER BY token_id`; + + const rows = await querySql(projectId, sql); + hydratedRows.push(...rows); + } + + const normalizedTokens = await normalizeTokens( + hydratedRows as any[], + projectId, + { + fetchImages, + resolveTokenImage: resolveTokenImage ?? defaultResolveTokenImage, + }, + ); + + const tokensByCanonicalId = new Map(); + for (const token of normalizedTokens as NormalizedToken[]) { + const canonicalId = canonicalizeTokenId(String(token.token_id ?? "")); + if (!canonicalId) continue; + if (!tokensByCanonicalId.has(canonicalId)) { + tokensByCanonicalId.set(canonicalId, token); + } + } + + return normalizedTokenIds + .map((tokenId) => tokensByCanonicalId.get(tokenId)) + .filter((token): token is NormalizedToken => Boolean(token)); + }; + const getCollectionOrders = async ( options: CollectionOrdersOptions, + projectIdOverride?: string, ): Promise => { const collection = addAddressPadding( getChecksumAddress(options.collection), - ); + ).toLowerCase(); const tokenId = normalizeTokenIdForQuery(options.tokenId); const status = options.status != null ? statusValueMap[options.status] : undefined; const category = options.category != null ? categoryValueMap[options.category] : undefined; - const conditions = [ - `lower(collection) = lower('${escapeSqlValue(collection)}')`, - ]; + const normalizedOrderIds = options.orderIds?.length + ? [ + ...new Set( + options.orderIds + .map((id) => Number(id)) + .filter((id) => Number.isInteger(id) && id >= 0), + ), + ] + : []; + + if (options.orderIds?.length && normalizedOrderIds.length === 0) { + return []; + } + + const conditions = [`collection = '${escapeSqlValue(collection)}'`]; if (tokenId !== undefined) { conditions.push(`token_id = '${escapeSqlValue(tokenId)}'`); } - if (options.orderIds?.length) { - conditions.push( - `id IN (${options.orderIds.map((id) => Number(id)).join(", ")})`, - ); + if (normalizedOrderIds.length) { + conditions.push(`id IN (${normalizedOrderIds.join(", ")})`); } if (status !== undefined) { conditions.push(`status = ${status}`); @@ -378,38 +572,39 @@ OFFSET ${Math.max(0, offset)}`; conditions.push(`category = ${category}`); } + const effectiveLimit = toPositiveInt( + options.limit ?? DEFAULT_LIMIT, + DEFAULT_LIMIT, + ); const sql = `SELECT id, category, status, expiration, collection, token_id, quantity, price, currency, owner FROM "ARCADE-Order" WHERE ${conditions.join(" AND ")} -ORDER BY id DESC${options.limit ? ` LIMIT ${Math.max(1, Math.floor(options.limit))}` : ""}`; +ORDER BY id DESC LIMIT ${effectiveLimit}`; - const rows = await querySql(defaultProject, sql); + const rows = await querySql(projectIdOverride ?? defaultProject, sql); return rows.map(toOrderModel).filter((order) => order.exists()); }; const listCollectionListings = async ( options: CollectionListingsOptions, ): Promise => { - const baseOrders = await getCollectionOrders({ - collection: options.collection, - tokenId: options.tokenId, - limit: options.limit, - category: CategoryType.Sell, - status: StatusType.Placed, - }); - - const filtered = baseOrders.filter( - (order) => - order.category.value === CategoryType.Sell && - order.status.value === StatusType.Placed, + const projectId = ensureProjectId(options.projectId, defaultProject); + const baseOrders = await getCollectionOrders( + { + collection: options.collection, + tokenId: options.tokenId, + limit: options.limit, + category: CategoryType.Sell, + status: StatusType.Placed, + }, + projectId, ); - if (options.verifyOwnership === false || filtered.length === 0) { - return filtered; + if (options.verifyOwnership === false || baseOrders.length === 0) { + return baseOrders; } - const projectId = ensureProjectId(options.projectId, defaultProject); - return verifyListingsOwnership(projectId, options.collection, filtered); + return verifyListingsOwnership(projectId, options.collection, baseOrders); }; const getToken = async ( @@ -438,11 +633,14 @@ ORDER BY id DESC${options.limit ? ` LIMIT ${Math.max(1, Math.floor(options.limit const token = tokenPage.page?.tokens[0]; if (!token) return null; - const orders = await getCollectionOrders({ - collection, - tokenId, - limit: orderLimit, - }); + const orders = await getCollectionOrders( + { + collection, + tokenId, + limit: orderLimit, + }, + projectId, + ); const now = Date.now() / 1000; let listings = orders.filter( @@ -508,6 +706,7 @@ LIMIT 1`, return { getCollection, listCollectionTokens, + getCollectionTokenMetadataBatch, getCollectionOrders, listCollectionListings, getToken, diff --git a/packages/arcade-ts/src/marketplace/filters.test.ts b/packages/arcade-ts/src/marketplace/filters.test.ts index 263670c3..e91e5584 100644 --- a/packages/arcade-ts/src/marketplace/filters.test.ts +++ b/packages/arcade-ts/src/marketplace/filters.test.ts @@ -3,6 +3,8 @@ import { aggregateTraitMetadata, buildAvailableFilters, buildPrecomputedFilters, + fetchTraitNamesSummary, + fetchTraitValues, fetchCollectionTraitMetadata, filterTokensByMetadata, flattenActiveFilters, @@ -135,7 +137,9 @@ describe("marketplace filters helpers", () => { expect(mockedFetchToriisSql).toHaveBeenCalledWith( ["arcade-main", "arcade-alt"], - expect.stringContaining("SELECT trait_name, trait_value"), + expect.stringContaining( + "SELECT ta.trait_name, ta.trait_value, COUNT(*) AS count", + ), ); expect(result.pages).toHaveLength(2); @@ -195,4 +199,106 @@ describe("marketplace filters helpers", () => { expect(available.Background?.Blue).toBe(20); expect(available.Ring?.Gold).toBe(30); }); + + it("keeps OR trait filters grouped while applying collection scope", async () => { + mockedFetchToriisSql.mockResolvedValue({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + }); + + await fetchCollectionTraitMetadata({ + address: "0x123", + traits: [ + { name: "Rarity", value: "Legendary" }, + { name: "Background", value: "Gold" }, + ], + projects: ["arcade-main"], + }); + + const query = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(query).toContain("collection_tokens AS ("); + expect(query).toContain("FROM tokens"); + expect(query).toContain( + "WHERE contract_address = '0x0000000000000000000000000000000000000000000000000000000000000123'", + ); + expect(query).toContain( + "((trait_name = 'Rarity' AND trait_value = 'Legendary') OR (trait_name = 'Background' AND trait_value = 'Gold'))", + ); + expect(query).not.toContain("token_id LIKE"); + }); + + it("uses unique trait count when multiple values are selected for the same trait", async () => { + mockedFetchToriisSql.mockResolvedValue({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + }); + + await fetchTraitValues({ + address: "0x123", + traitName: "Background", + otherTraitFilters: [ + { name: "Rarity", value: "Legendary" }, + { name: "Rarity", value: "Epic" }, + ], + projects: ["arcade-main"], + }); + + const query = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(query).toContain("HAVING COUNT(DISTINCT trait_name) = 1"); + }); + + it("uses exact trait comparisons instead of LIKE wildcards", async () => { + mockedFetchToriisSql.mockResolvedValue({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + }); + + await fetchTraitValues({ + address: "0x123", + traitName: "Background", + otherTraitFilters: [{ name: "Rarity", value: "Legendary" }], + projects: ["arcade-main"], + }); + + const query = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(query).toContain("trait_name = 'Rarity'"); + expect(query).toContain("trait_value = 'Legendary'"); + expect(query).not.toContain("trait_name LIKE"); + expect(query).not.toContain("trait_value LIKE"); + }); + + it("scopes trait name summary queries using collection token joins", async () => { + mockedFetchToriisSql.mockResolvedValue({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + }); + + await fetchTraitNamesSummary({ + address: "0x123", + projects: ["arcade-main"], + }); + + const query = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(query).toContain("collection_tokens AS ("); + expect(query).toContain("JOIN collection_tokens ct"); + expect(query).not.toContain("token_id LIKE"); + }); + + it("scopes trait value queries without filters using collection token joins", async () => { + mockedFetchToriisSql.mockResolvedValue({ + data: [{ endpoint: "arcade-main", data: [] }], + errors: [], + }); + + await fetchTraitValues({ + address: "0x123", + traitName: "Background", + projects: ["arcade-main"], + }); + + const query = mockedFetchToriisSql.mock.calls[0]?.[1] ?? ""; + expect(query).toContain("collection_tokens AS ("); + expect(query).toContain("JOIN collection_tokens ct"); + expect(query).not.toContain("token_id LIKE"); + }); }); diff --git a/packages/arcade-ts/src/marketplace/filters.ts b/packages/arcade-ts/src/marketplace/filters.ts index 03f5f2f8..73871644 100644 --- a/packages/arcade-ts/src/marketplace/filters.ts +++ b/packages/arcade-ts/src/marketplace/filters.ts @@ -112,34 +112,57 @@ const buildTraitWhereClause = (traits: TraitSelection[]): string => { return "1 = 1"; } - return traits - .map(({ name, value }) => { - const traitName = escapeSqlValue(name); - const traitValue = escapeSqlValue(value); - return `(trait_name LIKE '${traitName}' AND trait_value LIKE '${traitValue}')`; - }) - .join(" OR "); + const conditions = traits.map(({ name, value }) => { + const traitName = escapeSqlValue(name); + const traitValue = escapeSqlValue(value); + return `(trait_name = '${traitName}' AND trait_value = '${traitValue}')`; + }); + + return `(${conditions.join(" OR ")})`; }; -const buildTraitNamesSummaryQuery = (address: string): string => { +const countDistinctTraitNames = (traits: TraitSelection[]): number => { + return new Set(traits.map((trait) => trait.name)).size; +}; + +const buildCollectionTokensCte = (address: string): string => { const paddedAddress = addAddressPadding(address); - return `SELECT - trait_name, - COUNT(DISTINCT trait_value) as value_count, - SUM(cnt) as total_count - FROM ( - SELECT trait_name, trait_value, COUNT(*) as cnt - FROM token_attributes - WHERE token_id IN ( - SELECT token_id - FROM token_attributes - WHERE token_id LIKE '${paddedAddress}:%' - GROUP BY token_id - ) - GROUP BY trait_name, trait_value - ) - GROUP BY trait_name - ORDER BY trait_name;`; + return `collection_tokens AS ( + SELECT token_id + FROM tokens + WHERE contract_address = '${escapeSqlValue(paddedAddress)}' +)`; +}; + +const buildFilteredTokenIdsQuery = (filters: TraitSelection[]): string => { + if (filters.length === 0) { + return `SELECT ct.token_id +FROM collection_tokens ct`; + } + + const whereClause = buildTraitWhereClause(filters); + const distinctTraitCount = countDistinctTraitNames(filters); + return `SELECT ta.token_id +FROM token_attributes ta +JOIN collection_tokens ct + ON ta.token_id = ct.token_id +WHERE ${whereClause} +GROUP BY ta.token_id +HAVING COUNT(DISTINCT trait_name) = ${distinctTraitCount}`; +}; + +const buildTraitNamesSummaryQuery = (address: string): string => { + const collectionTokensCte = buildCollectionTokensCte(address); + return `WITH ${collectionTokensCte} +SELECT + ta.trait_name, + COUNT(DISTINCT ta.trait_value) AS value_count, + COUNT(*) AS total_count +FROM token_attributes ta +JOIN collection_tokens ct + ON ta.token_id = ct.token_id +GROUP BY ta.trait_name +ORDER BY ta.trait_name`; }; const buildTraitValuesQuery = ({ @@ -151,31 +174,31 @@ const buildTraitValuesQuery = ({ traitName: string; otherTraitFilters?: TraitSelection[]; }): string => { - const paddedAddress = addAddressPadding(address); + const collectionTokensCte = buildCollectionTokensCte(address); const escapedTraitName = escapeSqlValue(traitName); if (otherTraitFilters.length === 0) { - return `SELECT trait_value, COUNT(*) as count -FROM token_attributes -WHERE token_id LIKE '${paddedAddress}:%' - AND trait_name = '${escapedTraitName}' -GROUP BY trait_value + return `WITH ${collectionTokensCte} +SELECT ta.trait_value, COUNT(*) as count +FROM token_attributes ta +JOIN collection_tokens ct + ON ta.token_id = ct.token_id +WHERE ta.trait_name = '${escapedTraitName}' +GROUP BY ta.trait_value ORDER BY count DESC`; } - const whereClause = buildTraitWhereClause(otherTraitFilters); - return `SELECT trait_value, COUNT(*) as count -FROM token_attributes -WHERE trait_name = '${escapedTraitName}' - AND token_id IN ( - SELECT token_id - FROM token_attributes - WHERE ${whereClause} - AND token_id LIKE '${paddedAddress}:%' - GROUP BY token_id - HAVING COUNT(DISTINCT trait_name) = ${otherTraitFilters.length} - ) -GROUP BY trait_value + const filteredTokenIds = buildFilteredTokenIdsQuery(otherTraitFilters); + return `WITH ${collectionTokensCte}, +filtered_tokens AS ( + ${filteredTokenIds} +) +SELECT ta.trait_value, COUNT(*) AS count +FROM token_attributes ta +JOIN filtered_tokens ft + ON ta.token_id = ft.token_id +WHERE ta.trait_name = '${escapedTraitName}' +GROUP BY ta.trait_value ORDER BY count DESC`; }; @@ -188,7 +211,7 @@ const buildExpandedTraitsMetadataQuery = ({ traitNames: string[]; otherTraitFilters?: TraitSelection[]; }): string => { - const paddedAddress = addAddressPadding(address); + const collectionTokensCte = buildCollectionTokensCte(address); if (traitNames.length === 0) { return "SELECT trait_name, trait_value, 0 as count WHERE 1 = 0"; @@ -199,28 +222,28 @@ const buildExpandedTraitsMetadataQuery = ({ .join(", "); if (otherTraitFilters.length === 0) { - return `SELECT trait_name, trait_value, COUNT(*) as count -FROM token_attributes -WHERE token_id LIKE '${paddedAddress}:%' - AND trait_name IN (${traitNamesCondition}) -GROUP BY trait_name, trait_value -ORDER BY trait_name, count DESC`; + return `WITH ${collectionTokensCte} +SELECT ta.trait_name, ta.trait_value, COUNT(*) as count +FROM token_attributes ta +JOIN collection_tokens ct + ON ta.token_id = ct.token_id +WHERE ta.trait_name IN (${traitNamesCondition}) +GROUP BY ta.trait_name, ta.trait_value +ORDER BY ta.trait_name, count DESC`; } - const whereClause = buildTraitWhereClause(otherTraitFilters); - return `SELECT trait_name, trait_value, COUNT(*) as count -FROM token_attributes -WHERE trait_name IN (${traitNamesCondition}) - AND token_id IN ( - SELECT token_id - FROM token_attributes - WHERE ${whereClause} - AND token_id LIKE '${paddedAddress}:%' - GROUP BY token_id - HAVING COUNT(DISTINCT trait_name) = ${otherTraitFilters.length} - ) -GROUP BY trait_name, trait_value -ORDER BY trait_name, count DESC`; + const filteredTokenIds = buildFilteredTokenIdsQuery(otherTraitFilters); + return `WITH ${collectionTokensCte}, +filtered_tokens AS ( + ${filteredTokenIds} +) +SELECT ta.trait_name, ta.trait_value, COUNT(*) AS count +FROM token_attributes ta +JOIN filtered_tokens ft + ON ta.token_id = ft.token_id +WHERE ta.trait_name IN (${traitNamesCondition}) +GROUP BY ta.trait_name, ta.trait_value +ORDER BY ta.trait_name, count DESC`; }; const buildTraitMetadataQuery = ({ @@ -230,24 +253,18 @@ const buildTraitMetadataQuery = ({ address: string; traits: TraitSelection[]; }): string => { - const paddedAddress = addAddressPadding(address); - const whereClause = buildTraitWhereClause(traits); - const havingClause = traits.length - ? `HAVING COUNT(DISTINCT trait_name) = ${traits.length}` - : ""; - - return `SELECT trait_name, trait_value, COUNT(*) AS count -FROM token_attributes -WHERE token_id IN ( - SELECT token_id - FROM token_attributes - WHERE ${whereClause} - AND token_id LIKE '${paddedAddress}:%' - GROUP BY token_id - ${havingClause} + const collectionTokensCte = buildCollectionTokensCte(address); + const filteredTokenIds = buildFilteredTokenIdsQuery(traits); + return `WITH ${collectionTokensCte}, +filtered_tokens AS ( + ${filteredTokenIds} ) -GROUP BY trait_name, trait_value -ORDER BY trait_name, count DESC`; +SELECT ta.trait_name, ta.trait_value, COUNT(*) AS count +FROM token_attributes ta +JOIN filtered_tokens ft + ON ta.token_id = ft.token_id +GROUP BY ta.trait_name, ta.trait_value +ORDER BY ta.trait_name, count DESC`; }; const normalizeMetadataRow = (row: any): TraitMetadataRow | null => { diff --git a/packages/arcade-ts/src/marketplace/index.ts b/packages/arcade-ts/src/marketplace/index.ts index a8a14fba..0a149d09 100644 --- a/packages/arcade-ts/src/marketplace/index.ts +++ b/packages/arcade-ts/src/marketplace/index.ts @@ -4,3 +4,4 @@ export * from "./client"; export * from "./react"; export * from "./filters"; export * from "./runtime"; +export * from "./benchmark"; diff --git a/packages/arcade-ts/src/marketplace/react.test.tsx b/packages/arcade-ts/src/marketplace/react.test.tsx index 849d86d2..d465f464 100644 --- a/packages/arcade-ts/src/marketplace/react.test.tsx +++ b/packages/arcade-ts/src/marketplace/react.test.tsx @@ -36,6 +36,7 @@ describe("useMarketplaceCollectionTokens", () => { const createClient = (): MarketplaceClient => ({ getCollection: vi.fn(), listCollectionTokens: vi.fn().mockResolvedValue(mockResult), + getCollectionTokenMetadataBatch: vi.fn().mockResolvedValue([]), getCollectionOrders: vi.fn(), listCollectionListings: vi.fn(), getToken: vi.fn(), diff --git a/packages/arcade-ts/src/marketplace/sql-benchmark-helpers.test.ts b/packages/arcade-ts/src/marketplace/sql-benchmark-helpers.test.ts new file mode 100644 index 00000000..ff227b33 --- /dev/null +++ b/packages/arcade-ts/src/marketplace/sql-benchmark-helpers.test.ts @@ -0,0 +1,38 @@ +import { describe, expect, it } from "vitest"; + +describe("sql benchmark helper resolution", () => { + it("provides fallback benchmark helpers when dist module is missing them", async () => { + const helpersUrl = new URL( + "../../scripts/sql-benchmark-helpers.mjs", + import.meta.url, + ); + const { resolveBenchmarkHelpers } = await import(helpersUrl.href); + + const resolved = resolveBenchmarkHelpers({ + createMarketplaceClient: async () => ({}), + }); + + expect(resolved.createMarketplaceClient).toBeTypeOf("function"); + expect(resolved.runBenchmarkOperation).toBeTypeOf("function"); + expect(resolved.compareBenchmarkReports).toBeTypeOf("function"); + expect(resolved.evaluateBenchmarkRegressions).toBeTypeOf("function"); + expect(resolved.renderBenchmarkMarkdown).toBeTypeOf("function"); + + const outcome = await resolved.runBenchmarkOperation({ + name: "op", + warmup: 0, + iterations: 1, + execute: async () => ({ ok: true }), + now: (() => { + let n = 0; + return () => { + n += 5; + return n; + }; + })(), + }); + + expect(outcome.result?.name).toBe("op"); + expect(outcome.result?.stats.count).toBe(1); + }); +}); diff --git a/packages/arcade-ts/src/marketplace/tokens.test.ts b/packages/arcade-ts/src/marketplace/tokens.test.ts index 10adb65d..79b7da46 100644 --- a/packages/arcade-ts/src/marketplace/tokens.test.ts +++ b/packages/arcade-ts/src/marketplace/tokens.test.ts @@ -97,6 +97,67 @@ describe("fetchCollectionTokens", () => { expect(result.error?.error).toBeInstanceOf(Error); expect(result.error?.error.message).toBe("network error"); }); + + it("normalizes equivalent decimal and hex token ids to a single Torii filter id", async () => { + mockFetchToriis.mockResolvedValueOnce({ + data: [ + { + items: [], + next_cursor: null, + }, + ], + } as any); + + await fetchCollectionTokens({ + address: + "0x04f51290f2b0e16524084c27890711c7a955eb276cffec185d6f24f2a620b15f", + project: "projectA", + tokenIds: ["255", "0xff", "ff"], + fetchImages: false, + }); + + const callArgs = mockFetchToriis.mock.calls[0][1]; + const clientFn = callArgs.client; + expect(typeof clientFn).toBe("function"); + + const getTokens = vi.fn().mockResolvedValue({ + items: [], + next_cursor: null, + }); + await (clientFn as (params: any) => Promise)({ + client: { getTokens }, + } as any); + + expect(getTokens).toHaveBeenCalledWith( + expect.objectContaining({ + token_ids: [ + "00000000000000000000000000000000000000000000000000000000000000ff", + ], + }), + ); + }); + + it("omits metadata values when includeMetadata is false", async () => { + mockFetchToriis.mockResolvedValueOnce({ + data: [ + { + items: [sampleToken], + next_cursor: null, + }, + ], + } as any); + + const result = await fetchCollectionTokens({ + address: + "0x04f51290f2b0e16524084c27890711c7a955eb276cffec185d6f24f2a620b15f", + project: "projectA", + includeMetadata: false, + fetchImages: false, + }); + + expect(result.error).toBeNull(); + expect(result.page?.tokens[0]?.metadata).toBeUndefined(); + }); }); const sampleBalance = { diff --git a/packages/arcade-ts/src/marketplace/tokens.ts b/packages/arcade-ts/src/marketplace/tokens.ts index e96a2ccf..f1efed41 100644 --- a/packages/arcade-ts/src/marketplace/tokens.ts +++ b/packages/arcade-ts/src/marketplace/tokens.ts @@ -19,6 +19,7 @@ import type { TokenBalancesPage, } from "./types"; import { + canonicalizeTokenId, DEFAULT_PROJECT_ID, normalizeAttributeFilters, normalizeTokens, @@ -31,10 +32,29 @@ const DEFAULT_LIMIT = 100; const normalizeTokenIdsForQuery = (tokenIds?: string[]): string[] => { if (!tokenIds || tokenIds.length === 0) return []; - return tokenIds - .map(addAddressPadding) - .map((id) => (id.startsWith("0x") ? id.slice(2) : id)) - .filter((id) => id.length > 0); + + const normalized = tokenIds + .map((tokenId) => canonicalizeTokenId(tokenId)) + .flatMap((tokenId) => { + if (!tokenId) return []; + + try { + const hexTokenId = `0x${BigInt(tokenId).toString(16)}`; + return [addAddressPadding(hexTokenId).slice(2)]; + } catch (_error) { + try { + const padded = addAddressPadding(tokenId); + return [padded.startsWith("0x") ? padded.slice(2) : padded]; + } catch (_paddingError) { + const fallback = tokenId.startsWith("0x") + ? tokenId.slice(2) + : tokenId; + return fallback.length > 0 ? [fallback] : []; + } + } + }); + + return Array.from(new Set(normalized)); }; export async function fetchCollectionTokens( @@ -47,6 +67,7 @@ export async function fetchCollectionTokens( attributeFilters, tokenIds, limit = DEFAULT_LIMIT, + includeMetadata = true, fetchImages = false, resolveTokenImage, } = options; @@ -103,8 +124,15 @@ export async function fetchCollectionTokens( resolveTokenImage, }); + const pageTokens = includeMetadata + ? enriched + : enriched.map((token) => ({ + ...token, + metadata: undefined, + })); + const page: CollectionTokensPage = { - tokens: enriched, + tokens: pageTokens, nextCursor, }; diff --git a/packages/arcade-ts/src/marketplace/types.ts b/packages/arcade-ts/src/marketplace/types.ts index 01fea37f..e6ff053c 100644 --- a/packages/arcade-ts/src/marketplace/types.ts +++ b/packages/arcade-ts/src/marketplace/types.ts @@ -50,6 +50,7 @@ export interface FetchCollectionTokensOptions { attributeFilters?: AttributeFilterInput; tokenIds?: string[]; limit?: number; + includeMetadata?: boolean; fetchImages?: boolean; resolveTokenImage?: ResolveTokenImage; defaultProjectId?: string; @@ -60,6 +61,13 @@ export interface FetchCollectionTokensResult { error: CollectionTokensError | null; } +export interface CollectionTokenMetadataBatchOptions { + address: string; + tokenIds: string[]; + project?: string; + fetchImages?: boolean; +} + export interface NormalizedCollection { projectId: string; address: string; @@ -144,6 +152,9 @@ export interface MarketplaceClient { listCollectionTokens( options: FetchCollectionTokensOptions, ): Promise; + getCollectionTokenMetadataBatch( + options: CollectionTokenMetadataBatchOptions, + ): Promise; getCollectionOrders(options: CollectionOrdersOptions): Promise; listCollectionListings( options: CollectionListingsOptions, diff --git a/packages/arcade-ts/src/marketplace/utils.test.ts b/packages/arcade-ts/src/marketplace/utils.test.ts new file mode 100644 index 00000000..35c71a13 --- /dev/null +++ b/packages/arcade-ts/src/marketplace/utils.test.ts @@ -0,0 +1,46 @@ +import { describe, expect, it, vi } from "vitest"; + +const { mockedGetChecksumAddress } = vi.hoisted(() => ({ + mockedGetChecksumAddress: vi.fn((address: string) => `ck:${address}`), +})); + +vi.mock("starknet", async () => { + const actual = await vi.importActual("starknet"); + return { + ...actual, + getChecksumAddress: mockedGetChecksumAddress, + }; +}); + +import { normalizeTokens } from "./utils"; + +describe("marketplace utils", () => { + it("memoizes checksum resolution for repeated contract addresses", async () => { + mockedGetChecksumAddress.mockClear(); + + await normalizeTokens( + [ + { + contract_address: "0xabc", + token_id: "1", + metadata: "{}", + name: "", + symbol: "", + decimals: 0, + }, + { + contract_address: "0xabc", + token_id: "2", + metadata: "{}", + name: "", + symbol: "", + decimals: 0, + }, + ] as any, + "arcade-main", + { fetchImages: false }, + ); + + expect(mockedGetChecksumAddress).toHaveBeenCalledTimes(1); + }); +}); diff --git a/packages/arcade-ts/src/marketplace/utils.ts b/packages/arcade-ts/src/marketplace/utils.ts index 50397209..3df4eae2 100644 --- a/packages/arcade-ts/src/marketplace/utils.ts +++ b/packages/arcade-ts/src/marketplace/utils.ts @@ -23,6 +23,11 @@ const IMAGE_CANDIDATE_KEYS = [ "animation_url", ] as const; +const DECIMAL_TOKEN_ID_PATTERN = /^[0-9]+$/; +const HEX_PREFIXED_TOKEN_ID_PATTERN = /^0x[0-9a-f]+$/i; +const BARE_HEX_TOKEN_ID_PATTERN = /^[0-9a-f]+$/i; +const HAS_HEX_ALPHA_PATTERN = /[a-f]/i; + export function ensureArray( value: AttributeFilterInputValue, ): Array { @@ -108,16 +113,36 @@ export function resolveProjects( return Array.from(new Set(projects.filter(Boolean))); } +export function canonicalizeTokenId(tokenId: string): string | null { + const normalized = tokenId.trim(); + if (!normalized) return null; + + try { + if (HEX_PREFIXED_TOKEN_ID_PATTERN.test(normalized)) { + return BigInt(normalized).toString(); + } + + if (DECIMAL_TOKEN_ID_PATTERN.test(normalized)) { + return BigInt(normalized).toString(); + } + + if ( + BARE_HEX_TOKEN_ID_PATTERN.test(normalized) && + HAS_HEX_ALPHA_PATTERN.test(normalized) + ) { + return BigInt(`0x${normalized}`).toString(); + } + + return BigInt(normalized).toString(); + } catch (_error) { + return normalized; + } +} + export function normalizeTokenIds(tokenIds?: string[]): string[] { if (!tokenIds || tokenIds.length === 0) return []; return tokenIds - .map((id) => { - try { - return BigInt(id).toString(); - } catch (_error) { - return id; - } - }) + .map((id) => canonicalizeTokenId(id)) .filter((id): id is string => typeof id === "string" && id.length > 0); } @@ -131,10 +156,15 @@ export async function normalizeTokens( ): Promise { const { fetchImages, resolveTokenImage } = options; const resolver = resolveTokenImage ?? defaultResolveTokenImage; + const checksumByAddress = new Map(); const resolved = await Promise.all( tokens.map(async (token) => { - const checksumAddress = getChecksumAddress(token.contract_address); + let checksumAddress = checksumByAddress.get(token.contract_address); + if (!checksumAddress) { + checksumAddress = getChecksumAddress(token.contract_address); + checksumByAddress.set(token.contract_address, checksumAddress); + } const metadata = parseJsonSafe(token.metadata, token.metadata); let image: string | undefined;