@@ -87,8 +87,9 @@ type blobTxMeta struct {
8787 hash common.Hash // Transaction hash to maintain the lookup table
8888 vhashes []common.Hash // Blob versioned hashes to maintain the lookup table
8989
90- id uint64 // Storage ID in the pool's persistent store
91- size uint32 // Byte size in the pool's persistent store
90+ id uint64 // Storage ID in the pool's persistent store
91+ storageSize uint32 // Byte size in the pool's persistent store
92+ size uint64 // RLP-encoded size of transaction including the attached blob
9293
9394 nonce uint64 // Needed to prioritize inclusion order within an account
9495 costCap * uint256.Int // Needed to validate cumulative balance sufficiency
@@ -108,19 +109,20 @@ type blobTxMeta struct {
108109
109110// newBlobTxMeta retrieves the indexed metadata fields from a blob transaction
110111// and assembles a helper struct to track in memory.
111- func newBlobTxMeta (id uint64 , size uint32 , tx * types.Transaction ) * blobTxMeta {
112+ func newBlobTxMeta (id uint64 , size uint64 , storageSize uint32 , tx * types.Transaction ) * blobTxMeta {
112113 meta := & blobTxMeta {
113- hash : tx .Hash (),
114- vhashes : tx .BlobHashes (),
115- id : id ,
116- size : size ,
117- nonce : tx .Nonce (),
118- costCap : uint256 .MustFromBig (tx .Cost ()),
119- execTipCap : uint256 .MustFromBig (tx .GasTipCap ()),
120- execFeeCap : uint256 .MustFromBig (tx .GasFeeCap ()),
121- blobFeeCap : uint256 .MustFromBig (tx .BlobGasFeeCap ()),
122- execGas : tx .Gas (),
123- blobGas : tx .BlobGas (),
114+ hash : tx .Hash (),
115+ vhashes : tx .BlobHashes (),
116+ id : id ,
117+ storageSize : storageSize ,
118+ size : size ,
119+ nonce : tx .Nonce (),
120+ costCap : uint256 .MustFromBig (tx .Cost ()),
121+ execTipCap : uint256 .MustFromBig (tx .GasTipCap ()),
122+ execFeeCap : uint256 .MustFromBig (tx .GasFeeCap ()),
123+ blobFeeCap : uint256 .MustFromBig (tx .BlobGasFeeCap ()),
124+ execGas : tx .Gas (),
125+ blobGas : tx .BlobGas (),
124126 }
125127 meta .basefeeJumps = dynamicFeeJumps (meta .execFeeCap )
126128 meta .blobfeeJumps = dynamicFeeJumps (meta .blobFeeCap )
@@ -480,7 +482,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
480482 return errors .New ("missing blob sidecar" )
481483 }
482484
483- meta := newBlobTxMeta (id , size , tx )
485+ meta := newBlobTxMeta (id , tx . Size (), size , tx )
484486 if p .lookup .exists (meta .hash ) {
485487 // This path is only possible after a crash, where deleted items are not
486488 // removed via the normal shutdown-startup procedure and thus may get
@@ -507,7 +509,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
507509 p .spent [sender ] = new (uint256.Int ).Add (p .spent [sender ], meta .costCap )
508510
509511 p .lookup .track (meta )
510- p .stored += uint64 (meta .size )
512+ p .stored += uint64 (meta .storageSize )
511513 return nil
512514}
513515
@@ -539,7 +541,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
539541 ids = append (ids , txs [i ].id )
540542 nonces = append (nonces , txs [i ].nonce )
541543
542- p .stored -= uint64 (txs [i ].size )
544+ p .stored -= uint64 (txs [i ].storageSize )
543545 p .lookup .untrack (txs [i ])
544546
545547 // Included transactions blobs need to be moved to the limbo
@@ -580,7 +582,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
580582 nonces = append (nonces , txs [0 ].nonce )
581583
582584 p .spent [addr ] = new (uint256.Int ).Sub (p .spent [addr ], txs [0 ].costCap )
583- p .stored -= uint64 (txs [0 ].size )
585+ p .stored -= uint64 (txs [0 ].storageSize )
584586 p .lookup .untrack (txs [0 ])
585587
586588 // Included transactions blobs need to be moved to the limbo
@@ -636,7 +638,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
636638 dropRepeatedMeter .Mark (1 )
637639
638640 p .spent [addr ] = new (uint256.Int ).Sub (p .spent [addr ], txs [i ].costCap )
639- p .stored -= uint64 (txs [i ].size )
641+ p .stored -= uint64 (txs [i ].storageSize )
640642 p .lookup .untrack (txs [i ])
641643
642644 if err := p .store .Delete (id ); err != nil {
@@ -658,7 +660,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
658660 nonces = append (nonces , txs [j ].nonce )
659661
660662 p .spent [addr ] = new (uint256.Int ).Sub (p .spent [addr ], txs [j ].costCap )
661- p .stored -= uint64 (txs [j ].size )
663+ p .stored -= uint64 (txs [j ].storageSize )
662664 p .lookup .untrack (txs [j ])
663665 }
664666 txs = txs [:i ]
@@ -696,7 +698,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
696698 nonces = append (nonces , last .nonce )
697699
698700 p .spent [addr ] = new (uint256.Int ).Sub (p .spent [addr ], last .costCap )
699- p .stored -= uint64 (last .size )
701+ p .stored -= uint64 (last .storageSize )
700702 p .lookup .untrack (last )
701703 }
702704 if len (txs ) == 0 {
@@ -736,7 +738,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
736738 nonces = append (nonces , last .nonce )
737739
738740 p .spent [addr ] = new (uint256.Int ).Sub (p .spent [addr ], last .costCap )
739- p .stored -= uint64 (last .size )
741+ p .stored -= uint64 (last .storageSize )
740742 p .lookup .untrack (last )
741743 }
742744 p .index [addr ] = txs
@@ -1002,7 +1004,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
10021004 }
10031005
10041006 // Update the indices and metrics
1005- meta := newBlobTxMeta (id , p .store .Size (id ), tx )
1007+ meta := newBlobTxMeta (id , tx . Size (), p .store .Size (id ), tx )
10061008 if _ , ok := p .index [addr ]; ! ok {
10071009 if err := p .reserve (addr , true ); err != nil {
10081010 log .Warn ("Failed to reserve account for blob pool" , "tx" , tx .Hash (), "from" , addr , "err" , err )
@@ -1016,7 +1018,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
10161018 p .spent [addr ] = new (uint256.Int ).Add (p .spent [addr ], meta .costCap )
10171019 }
10181020 p .lookup .track (meta )
1019- p .stored += uint64 (meta .size )
1021+ p .stored += uint64 (meta .storageSize )
10201022 return nil
10211023}
10221024
@@ -1041,7 +1043,7 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
10411043 nonces = []uint64 {tx .nonce }
10421044 )
10431045 p .spent [addr ] = new (uint256.Int ).Sub (p .spent [addr ], txs [i ].costCap )
1044- p .stored -= uint64 (tx .size )
1046+ p .stored -= uint64 (tx .storageSize )
10451047 p .lookup .untrack (tx )
10461048 txs [i ] = nil
10471049
@@ -1051,7 +1053,7 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
10511053 nonces = append (nonces , tx .nonce )
10521054
10531055 p .spent [addr ] = new (uint256.Int ).Sub (p .spent [addr ], tx .costCap )
1054- p .stored -= uint64 (tx .size )
1056+ p .stored -= uint64 (tx .storageSize )
10551057 p .lookup .untrack (tx )
10561058 txs [i + 1 + j ] = nil
10571059 }
@@ -1236,6 +1238,25 @@ func (p *BlobPool) GetRLP(hash common.Hash) []byte {
12361238 return p .getRLP (hash )
12371239}
12381240
1241+ // GetMetadata returns the transaction type and transaction size with the
1242+ // given transaction hash.
1243+ //
1244+ // The size refers the length of the 'rlp encoding' of a blob transaction
1245+ // including the attached blobs.
1246+ func (p * BlobPool ) GetMetadata (hash common.Hash ) * txpool.TxMetadata {
1247+ p .lock .RLock ()
1248+ defer p .lock .RUnlock ()
1249+
1250+ size , ok := p .lookup .sizeOfTx (hash )
1251+ if ! ok {
1252+ return nil
1253+ }
1254+ return & txpool.TxMetadata {
1255+ Type : types .BlobTxType ,
1256+ Size : size ,
1257+ }
1258+ }
1259+
12391260// GetBlobs returns a number of blobs are proofs for the given versioned hashes.
12401261// This is a utility method for the engine API, enabling consensus clients to
12411262// retrieve blobs from the pools directly instead of the network.
@@ -1375,7 +1396,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
13751396 if err != nil {
13761397 return err
13771398 }
1378- meta := newBlobTxMeta (id , p .store .Size (id ), tx )
1399+ meta := newBlobTxMeta (id , tx . Size (), p .store .Size (id ), tx )
13791400
13801401 var (
13811402 next = p .state .GetNonce (from )
@@ -1403,7 +1424,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
14031424
14041425 p .lookup .untrack (prev )
14051426 p .lookup .track (meta )
1406- p .stored += uint64 (meta .size ) - uint64 (prev .size )
1427+ p .stored += uint64 (meta .storageSize ) - uint64 (prev .storageSize )
14071428 } else {
14081429 // Transaction extends previously scheduled ones
14091430 p .index [from ] = append (p .index [from ], meta )
@@ -1413,7 +1434,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
14131434 }
14141435 p .spent [from ] = new (uint256.Int ).Add (p .spent [from ], meta .costCap )
14151436 p .lookup .track (meta )
1416- p .stored += uint64 (meta .size )
1437+ p .stored += uint64 (meta .storageSize )
14171438 }
14181439 // Recompute the rolling eviction fields. In case of a replacement, this will
14191440 // recompute all subsequent fields. In case of an append, this will only do
@@ -1500,7 +1521,7 @@ func (p *BlobPool) drop() {
15001521 p .index [from ] = txs
15011522 p .spent [from ] = new (uint256.Int ).Sub (p .spent [from ], drop .costCap )
15021523 }
1503- p .stored -= uint64 (drop .size )
1524+ p .stored -= uint64 (drop .storageSize )
15041525 p .lookup .untrack (drop )
15051526
15061527 // Remove the transaction from the pool's eviction heap:
0 commit comments