Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion params/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
const (
VersionMajor = 5 // Major version component of the current release
VersionMinor = 9 // Minor version component of the current release
VersionPatch = 7 // Patch version component of the current release
VersionPatch = 8 // Patch version component of the current release
VersionMeta = "mainnet" // Version metadata to append to the version string
)

Expand Down
82 changes: 81 additions & 1 deletion rollup/fees/rollup_fee.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,34 @@ func estimateTxCompressionRatio(data []byte, blockNumber uint64, blockTime uint6
return ratio, nil
}

// calculateTxCompressedSize calculates the size of `data` after compression using da-codec.
// We constrain compressed_size so that it cannot exceed the original size:
//
// compressed_size(tx) = min(size(zstd(rlp(tx))), size(rlp(tx)))
//
// This provides an upper bound on the rollup fee for a given transaction, regardless
// what compression algorithm the sequencer/prover uses.
func calculateTxCompressedSize(data []byte, blockNumber uint64, blockTime uint64, config *params.ChainConfig) (*big.Int, error) {
// Compressed size of empty data is 0.
// In practice, the rlp-encoded transaction is always non-empty.
if len(data) == 0 {
return common.Big0, nil
}

// Compress data using da-codec
compressed, err := encoding.CompressScrollBatchBytes(data, blockNumber, blockTime, config)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just as a reminder: we need to watch out in case we change the zstd compression level here for batch submission

if err != nil {
log.Error("Transaction compression failed", "error", err, "data size", len(data), "data", common.Bytes2Hex(data), "blockNumber", blockNumber, "blockTime", blockTime, "galileoTime", config.GalileoTime)
return nil, fmt.Errorf("transaction compression failed: %w", err)
}

if len(compressed) < len(data) {
return new(big.Int).SetUint64(uint64(len(compressed))), nil
} else {
return new(big.Int).SetUint64(uint64(len(data))), nil
}
}

// calculatePenalty computes the penalty multiplier based on compression ratio
// penalty(tx) = compression_ratio(tx) >= penalty_threshold ? 1 * PRECISION : penalty_factor
func calculatePenalty(compressionRatio, penaltyThreshold, penaltyFactor *big.Int) *big.Int {
Expand Down Expand Up @@ -290,6 +318,43 @@ func calculateEncodedL1DataFeeFeynman(
return l1DataFee
}

// calculateEncodedL1DataFeeGalileo computes the rollup fee for an RLP-encoded tx, post Galileo
//
// Post Galileo rollup fee formula:
// rollupFee(tx) = feePerByte * compressedSize(tx) * (1 + penalty(tx)) / PRECISION
//
// Where:
// feePerByte = (execScalar * l1BaseFee + blobScalar * l1BlobBaseFee)
// compressedSize(tx) = min(len(zstd(rlp(tx))), len(rlp(tx)))
// penalty(tx) = compressedSize(tx) / penaltyFactor
func calculateEncodedL1DataFeeGalileo(
l1BaseFee *big.Int,
l1BlobBaseFee *big.Int,
execScalar *big.Int,
blobScalar *big.Int,
penaltyFactor *big.Int,
compressedSize *big.Int,
) *big.Int {
// feePerByte = (execScalar * l1BaseFee) + (blobScalar * l1BlobBaseFee)
execGas := new(big.Int).Mul(execScalar, l1BaseFee)
blobGas := new(big.Int).Mul(blobScalar, l1BlobBaseFee)
feePerByte := new(big.Int).Add(execGas, blobGas)

// baseTerm = feePerByte * compressedSize
baseTerm := new(big.Int).Mul(feePerByte, compressedSize)

// penaltyTerm = (baseTerm * compressedSize) / penaltyFactor
// Note: We divide by penaltyFactor after multiplication to preserve precision.
penaltyTerm := new(big.Int).Mul(baseTerm, compressedSize)
penaltyTerm.Div(penaltyTerm, penaltyFactor)

// rollupFee = (baseTerm + penaltyTerm) / PRECISION
rollupFee := new(big.Int).Add(baseTerm, penaltyTerm)
rollupFee.Div(rollupFee, rcfg.Precision) // execScalar and blobScalar are scaled by PRECISION

return rollupFee
}

// calculateL1GasUsed computes the L1 gas used based on the calldata and
// constant sized overhead. The overhead can be decreased as the cost of the
// batch submission goes down via contract optimizations. This will not overflow
Expand Down Expand Up @@ -341,7 +406,7 @@ func CalculateL1DataFee(tx *types.Transaction, state StateDB, config *params.Cha
l1DataFee = calculateEncodedL1DataFee(raw, gpoState.overhead, gpoState.l1BaseFee, gpoState.scalar)
} else if !config.IsFeynman(blockTime) {
l1DataFee = calculateEncodedL1DataFeeCurie(raw, gpoState.l1BaseFee, gpoState.l1BlobBaseFee, gpoState.commitScalar, gpoState.blobScalar)
} else {
} else if !config.IsGalileo(blockTime) {
// Calculate compression ratio for Feynman
// Note: We compute the transaction ratio on tx.data, not on the full encoded transaction.
compressionRatio, err := estimateTxCompressionRatio(tx.Data(), blockNumber.Uint64(), blockTime, config)
Expand All @@ -360,6 +425,21 @@ func CalculateL1DataFee(tx *types.Transaction, state StateDB, config *params.Cha
gpoState.penaltyFactor,
compressionRatio,
)
} else {
// Note: In Galileo, we take the compressed size of the full RLP-encoded transaction.
compressedSize, err := calculateTxCompressedSize(raw, blockNumber.Uint64(), blockTime, config)
if err != nil {
return nil, fmt.Errorf("failed to calculate compressed size: tx hash=%s: %w", tx.Hash().Hex(), err)
}

l1DataFee = calculateEncodedL1DataFeeGalileo(
gpoState.l1BaseFee,
gpoState.l1BlobBaseFee,
gpoState.commitScalar, // now represents execScalar
gpoState.blobScalar,
gpoState.penaltyFactor, // in Galileo, penaltyFactor is repurposed as a coefficient of the blob utilization penalty
compressedSize,
)
}

// ensure l1DataFee fits into uint64 for circuit compatibility
Expand Down
Loading
Loading