From f373a6238e5ad560db702df2520fdfd05b422b5d Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 9 Sep 2025 13:58:41 +0200 Subject: [PATCH 01/56] Add DA proof support to daprovider interface This is part of a series of changes for the Custom DA project. Summary: - Added a new daprovider.Validator interface with methods for generating proofs for Custom DA systems. - Added a reference implementation of a Custom DA provider. - Added a daprovider factory that supports anytrust and referenceda modes. In a follow-up PR the nitro node startup sequence will be modified to use this. Currently only the separate daprovider server uses this. - Replaced the AnyTrust-specific (aka das) provider server with a unified provider server that works with anytrust or referenceda modes. - Extended the DA Client with new RPC methods for generating proofs. Notes: The separate provider server executable is a thin RPC server wrapper around the anytrust and referenceda implementations. The idea is that people wanting to integrate their own DA system can use this as a guide for how to implement their own RPC service that lives outside the nitro codebase; we won't be including support for any additional DA implementations in provider server executable that we distribute. For legacy AnyTrust deployments we will most likely continue to have nitro spawn the daprovider server in-process to avoid needing to run an extra service, but by channeling everything through the JSON-RPC interface it reduces surface area of what we have to support. The Reference DA (referenceda) implementation is a minimal working example of how one could implement a daprovider, including support for validating the certificate against trusted signers. It uses an in-memory data storage backend. In various places there is commented out code related to the osp contract bindings that haven't yet been committed to a nitro-contracts branch that we want to use in nitro master. This PR shouldn't change any existing functionality or behavior, except the daprovider executable (which isn't used in production) has some new configuration options: ``` --mode must be "anytrust" or "referenceda" --provider-server.* server config eg address, port, etc --anytrust.* was previosly called das-server --referenceda.* referenceda specific options ``` As much as possible we will try to rename references to "das" to "anytrust". When we launched Anytrust, we only had one offchain data availability mode so we just called it "das" at the time. This PR doesn't include new test code, but testing was done with the end-to-end block validator and challenge system tests on the custom-da branch. --- cmd/daprovider/daprovider.go | 155 +++++++++--- daprovider/daclient/daclient.go | 43 ++++ daprovider/factory/factory.go | 238 ++++++++++++++++++ daprovider/reader.go | 17 +- daprovider/referenceda/certificate.go | 113 +++++++++ daprovider/referenceda/config.go | 41 +++ daprovider/referenceda/reference_reader.go | 118 +++++++++ daprovider/referenceda/reference_validator.go | 120 +++++++++ daprovider/referenceda/reference_writer.go | 63 +++++ daprovider/referenceda/storage.go | 54 ++++ daprovider/server/provider_server.go | 193 ++++++++++++++ daprovider/util.go | 12 +- daprovider/validator.go | 26 ++ daprovider/writer.go | 2 +- 14 files changed, 1159 insertions(+), 36 deletions(-) create mode 100644 daprovider/factory/factory.go create mode 100644 daprovider/referenceda/certificate.go create mode 100644 daprovider/referenceda/config.go create mode 100644 daprovider/referenceda/reference_reader.go create mode 100644 daprovider/referenceda/reference_validator.go create mode 100644 daprovider/referenceda/reference_writer.go create mode 100644 daprovider/referenceda/storage.go create mode 100644 daprovider/server/provider_server.go create mode 100644 daprovider/validator.go diff --git a/cmd/daprovider/daprovider.go b/cmd/daprovider/daprovider.go index 5661edb125..92bfe44843 100644 --- a/cmd/daprovider/daprovider.go +++ b/cmd/daprovider/daprovider.go @@ -12,25 +12,35 @@ import ( koanfjson "github.com/knadh/koanf/parsers/json" flag "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" + "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/das" - "github.com/offchainlabs/nitro/daprovider/das/dasserver" + "github.com/offchainlabs/nitro/daprovider/factory" + "github.com/offchainlabs/nitro/daprovider/referenceda" + dapserver "github.com/offchainlabs/nitro/daprovider/server" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" ) type Config struct { - DASServer dasserver.ServerConfig `koanf:"das-server"` + Mode factory.DAProviderMode `koanf:"mode"` + ProviderServer dapserver.ServerConfig `koanf:"provider-server"` WithDataSigner bool `koanf:"with-data-signer"` DataSignerWallet genericconf.WalletConfig `koanf:"data-signer-wallet"` + // Mode-specific configs + Anytrust das.DataAvailabilityConfig `koanf:"anytrust"` + ReferenceDA referenceda.Config `koanf:"referenceda"` + Conf genericconf.ConfConfig `koanf:"conf"` LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` @@ -42,9 +52,12 @@ type Config struct { } var DefaultConfig = Config{ - DASServer: dasserver.DefaultServerConfig, + Mode: "", // Must be explicitly set + ProviderServer: dapserver.DefaultServerConfig, WithDataSigner: false, DataSignerWallet: arbnode.DefaultBatchPosterL1WalletConfig, + Anytrust: das.DefaultDataAvailabilityConfig, + ReferenceDA: referenceda.DefaultConfig, Conf: genericconf.ConfConfigDefault, LogLevel: "INFO", LogType: "plaintext", @@ -61,6 +74,7 @@ func printSampleUsage(progname string) { func parseDAProvider(args []string) (*Config, error) { f := flag.NewFlagSet("daprovider", flag.ContinueOnError) + f.String("mode", string(DefaultConfig.Mode), "DA provider mode (anytrust or referenceda) - REQUIRED") f.Bool("with-data-signer", DefaultConfig.WithDataSigner, "set to enable data signing when processing store requests. If enabled requires data-signer-wallet config") genericconf.WalletConfigAddOptions("data-signer-wallet", f, DefaultConfig.DataSignerWallet.Pathname) @@ -73,7 +87,12 @@ func parseDAProvider(args []string) (*Config, error) { f.String("log-level", DefaultConfig.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", DefaultConfig.LogType, "log type (plaintext or json)") - dasserver.ServerConfigAddOptions("das-server", f) + dapserver.ServerConfigAddOptions("provider-server", f) + + // Add mode-specific options + das.DataAvailabilityConfigAddDaserverOptions("anytrust", f) + referenceda.ConfigAddOptions("referenceda", f) + genericconf.ConfConfigAddOptions("conf", f) k, err := confighelpers.BeginCommonParse(f, args) @@ -81,7 +100,7 @@ func parseDAProvider(args []string) (*Config, error) { return nil, err } - if err = das.FixKeysetCLIParsing("das-server.data-availability.rpc-aggregator.backends", k); err != nil { + if err = das.FixKeysetCLIParsing("anytrust.rpc-aggregator.backends", k); err != nil { return nil, err } @@ -92,7 +111,7 @@ func parseDAProvider(args []string) (*Config, error) { if config.Conf.Dump { err = confighelpers.DumpConfig(k, map[string]interface{}{ - "das-server.data-availability.key.priv-key": "", + "anytrust.key.priv-key": "", }) if err != nil { return nil, fmt.Errorf("error removing extra parameters before dump: %w", err) @@ -124,6 +143,12 @@ func startup() error { if err != nil { confighelpers.PrintErrorAndExit(err, printSampleUsage) } + + // Validate mode + if config.Mode == "" { + return errors.New("--mode must be explicitly specified (anytrust or referenceda)") + } + logLevel, err := genericconf.ToSlogLevel(config.LogLevel) if err != nil { confighelpers.PrintErrorAndExit(err, printSampleUsage) @@ -154,50 +179,113 @@ func startup() error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if !config.DASServer.DataAvailability.Enable { - return errors.New("--das-server.data-availability.enable is a required to start a das-server") - } + // Mode-specific validation and setup + var l1Client *ethclient.Client + var l1Reader *headerreader.HeaderReader + var seqInboxAddr common.Address + var dataSigner signature.DataSignerFunc - if config.DASServer.DataAvailability.ParentChainNodeURL == "" || config.DASServer.DataAvailability.ParentChainNodeURL == "none" { - return errors.New("--das-server.data-availability.parent-chain-node-url is a required to start a das-server") - } + if config.Mode == factory.ModeAnyTrust { + if !config.Anytrust.Enable { + return errors.New("--anytrust.enable is required to start an AnyTrust provider server") + } + + if config.Anytrust.ParentChainNodeURL == "" || config.Anytrust.ParentChainNodeURL == "none" { + return errors.New("--anytrust.parent-chain-node-url is required to start an AnyTrust provider server") + } + + if config.Anytrust.SequencerInboxAddress == "" || config.Anytrust.SequencerInboxAddress == "none" { + return errors.New("--anytrust.sequencer-inbox-address must be set to a valid L1 contract address") + } + + l1Client, err = das.GetL1Client(ctx, config.Anytrust.ParentChainConnectionAttempts, config.Anytrust.ParentChainNodeURL) + if err != nil { + return err + } + + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) + if err != nil { + return err + } - if config.DASServer.DataAvailability.SequencerInboxAddress == "" || config.DASServer.DataAvailability.SequencerInboxAddress == "none" { - return errors.New("sequencer-inbox-address must be set to a valid L1 URL and contract address") + seqInboxAddrPtr, err := das.OptionalAddressFromString(config.Anytrust.SequencerInboxAddress) + if err != nil { + return err + } + if seqInboxAddrPtr == nil { + return errors.New("must provide --anytrust.sequencer-inbox-address set to a valid contract address") + } + seqInboxAddr = *seqInboxAddrPtr + + if config.WithDataSigner && config.ProviderServer.EnableDAWriter { + l1ChainId, err := l1Client.ChainID(ctx) + if err != nil { + return fmt.Errorf("couldn't read L1 chainid: %w", err) + } + if _, dataSigner, err = util.OpenWallet("data-signer", &config.DataSignerWallet, l1ChainId); err != nil { + return err + } + } + } else if config.Mode == factory.ModeReferenceDA { + if !config.ReferenceDA.Enable { + return errors.New("--referenceda.enable is required to start a ReferenceDA provider server") + } } - l1Client, err := das.GetL1Client(ctx, config.DASServer.DataAvailability.ParentChainConnectionAttempts, config.DASServer.DataAvailability.ParentChainNodeURL) + // Create DA provider factory based on mode + providerFactory, err := factory.NewDAProviderFactory( + config.Mode, + &config.Anytrust, + &config.ReferenceDA, + dataSigner, + l1Client, + l1Reader, + seqInboxAddr, + config.ProviderServer.EnableDAWriter, + ) if err != nil { return err } - arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) - if err != nil { + if err := providerFactory.ValidateConfig(); err != nil { return err } - seqInboxAddr, err := das.OptionalAddressFromString(config.DASServer.DataAvailability.SequencerInboxAddress) + // Create reader/writer/validator using factory + var cleanupFuncs []func() + + reader, readerCleanup, err := providerFactory.CreateReader(ctx) if err != nil { return err } - if seqInboxAddr == nil { - return errors.New("must provide --das-server.data-availability.sequencer-inbox-address set to a valid contract address or 'none'") + if readerCleanup != nil { + cleanupFuncs = append(cleanupFuncs, readerCleanup) } - var dataSigner signature.DataSignerFunc - if config.WithDataSigner && config.DASServer.EnableDAWriter { - l1ChainId, err := l1Client.ChainID(ctx) + var writer daprovider.Writer + if config.ProviderServer.EnableDAWriter { + var writerCleanup func() + writer, writerCleanup, err = providerFactory.CreateWriter(ctx) if err != nil { - return fmt.Errorf("couldn't read L1 chainid: %w", err) - } - if _, dataSigner, err = util.OpenWallet("data-signer", &config.DataSignerWallet, l1ChainId); err != nil { return err } + if writerCleanup != nil { + cleanupFuncs = append(cleanupFuncs, writerCleanup) + } + } + + // Create validator (may be nil for AnyTrust mode) + validator, validatorCleanup, err := providerFactory.CreateValidator(ctx) + if err != nil { + return err + } + if validatorCleanup != nil { + cleanupFuncs = append(cleanupFuncs, validatorCleanup) } - log.Info("Starting json rpc server", "addr", config.DASServer.Addr, "port", config.DASServer.Port) - dasServer, closeFn, err := dasserver.NewServer(ctx, &config.DASServer, dataSigner, l1Client, l1Reader, *seqInboxAddr) + log.Info("Starting json rpc server", "mode", config.Mode, "addr", config.ProviderServer.Addr, "port", config.ProviderServer.Port) + providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &config.ProviderServer, reader, writer, validator) if err != nil { return err } @@ -208,12 +296,15 @@ func startup() error { <-sigint - if err = dasServer.Shutdown(ctx); err != nil { + if err = providerServer.Shutdown(ctx); err != nil { return err } - if closeFn != nil { - closeFn() + + // Call all cleanup functions + for _, cleanup := range cleanupFuncs { + cleanup() } + if l1Reader != nil && l1Reader.Started() { l1Reader.StopAndWait() } diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 47f8b342c1..adf473c9b3 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -1,3 +1,6 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + package daclient import ( @@ -10,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/util/rpcclient" ) @@ -101,3 +105,42 @@ func (c *Client) Store( } return storeResult.SerializedDACert, nil } + +// GenerateReadPreimageProofResult is the result struct that data availability providers +// should use to respond with a proof for a specific preimage +type GenerateReadPreimageProofResult struct { + Proof hexutil.Bytes `json:"proof,omitempty"` +} + +// GenerateReadPreimageProof generates a proof for a specific preimage at a given offset +// This method calls the external DA provider's RPC endpoint to generate the proof +func (c *Client) GenerateReadPreimageProof( + ctx context.Context, + preimageType arbutil.PreimageType, + certHash common.Hash, + offset uint64, + certificate []byte, +) ([]byte, error) { + var generateProofResult GenerateReadPreimageProofResult + if err := c.CallContext(ctx, &generateProofResult, "daprovider_generateReadPreimageProof", hexutil.Uint(preimageType), certHash, hexutil.Uint64(offset), hexutil.Bytes(certificate)); err != nil { + return nil, fmt.Errorf("error returned from daprovider_generateProof rpc method, err: %w", err) + } + return generateProofResult.Proof, nil +} + +// GenerateCertificateValidityProofResult is the result struct that data availability providers should use to respond with validity proof +type GenerateCertificateValidityProofResult struct { + Proof hexutil.Bytes `json:"proof,omitempty"` +} + +func (c *Client) GenerateCertificateValidityProof( + ctx context.Context, + preimageType arbutil.PreimageType, + certificate []byte, +) ([]byte, error) { + var generateCertificateValidityProofResult GenerateCertificateValidityProofResult + if err := c.CallContext(ctx, &generateCertificateValidityProofResult, "daprovider_generateCertificateValidityProof", hexutil.Uint(preimageType), hexutil.Bytes(certificate)); err != nil { + return nil, fmt.Errorf("error returned from daprovider_generateCertificateValidityProof rpc method, err: %w", err) + } + return generateCertificateValidityProofResult.Proof, nil +} diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go new file mode 100644 index 0000000000..e639167b97 --- /dev/null +++ b/daprovider/factory/factory.go @@ -0,0 +1,238 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package factory + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/das" + "github.com/offchainlabs/nitro/daprovider/das/dasutil" + "github.com/offchainlabs/nitro/daprovider/referenceda" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/signature" +) + +type DAProviderMode string + +const ( + ModeAnyTrust DAProviderMode = "anytrust" + ModeReferenceDA DAProviderMode = "referenceda" +) + +type DAProviderFactory interface { + CreateReader(ctx context.Context) (daprovider.Reader, func(), error) + CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) + CreateValidator(ctx context.Context) (daprovider.Validator, func(), error) + ValidateConfig() error +} + +type AnyTrustFactory struct { + config *das.DataAvailabilityConfig + dataSigner signature.DataSignerFunc + l1Client *ethclient.Client + l1Reader *headerreader.HeaderReader + seqInboxAddr common.Address + enableWriter bool +} + +type ReferenceDAFactory struct { + config *referenceda.Config + enableWriter bool + dataSigner signature.DataSignerFunc + l1Client *ethclient.Client +} + +func NewDAProviderFactory( + mode DAProviderMode, + anytrust *das.DataAvailabilityConfig, + referencedaCfg *referenceda.Config, + dataSigner signature.DataSignerFunc, + l1Client *ethclient.Client, + l1Reader *headerreader.HeaderReader, + seqInboxAddr common.Address, + enableWriter bool, +) (DAProviderFactory, error) { + switch mode { + case ModeAnyTrust: + return &AnyTrustFactory{ + config: anytrust, + dataSigner: dataSigner, + l1Client: l1Client, + l1Reader: l1Reader, + seqInboxAddr: seqInboxAddr, + enableWriter: enableWriter, + }, nil + case ModeReferenceDA: + factory := &ReferenceDAFactory{ + config: referencedaCfg, + enableWriter: enableWriter, + dataSigner: dataSigner, + l1Client: l1Client, + } + return factory, nil + default: + return nil, fmt.Errorf("unsupported DA provider mode: %s", mode) + } +} + +// AnyTrust Factory Implementation +func (f *AnyTrustFactory) ValidateConfig() error { + if !f.config.Enable { + return errors.New("anytrust data availability must be enabled") + } + + if f.enableWriter { + if !f.config.RPCAggregator.Enable || !f.config.RestAggregator.Enable { + return errors.New("rpc-aggregator.enable and rest-aggregator.enable must be set when running writer mode") + } + } else { + if f.config.RPCAggregator.Enable { + return errors.New("rpc-aggregator is only for writer mode") + } + if !f.config.RestAggregator.Enable { + return errors.New("rest-aggregator.enable must be set for reader mode") + } + } + + return nil +} + +func (f *AnyTrustFactory) CreateReader(ctx context.Context) (daprovider.Reader, func(), error) { + if f.enableWriter { + _, daReader, keysetFetcher, lifecycleManager, err := das.CreateDAReaderAndWriter( + ctx, f.config, f.dataSigner, f.l1Client, f.seqInboxAddr) + if err != nil { + return nil, nil, err + } + + daReader = das.NewReaderTimeoutWrapper(daReader, f.config.RequestTimeout) + if f.config.PanicOnError { + daReader = das.NewReaderPanicWrapper(daReader) + } + + reader := dasutil.NewReaderForDAS(daReader, keysetFetcher) + cleanupFn := func() { + if lifecycleManager != nil { + lifecycleManager.StopAndWaitUntil(0) + } + } + return reader, cleanupFn, nil + } else { + daReader, keysetFetcher, lifecycleManager, err := das.CreateDAReader( + ctx, f.config, f.l1Reader, &f.seqInboxAddr) + if err != nil { + return nil, nil, err + } + + daReader = das.NewReaderTimeoutWrapper(daReader, f.config.RequestTimeout) + if f.config.PanicOnError { + daReader = das.NewReaderPanicWrapper(daReader) + } + + reader := dasutil.NewReaderForDAS(daReader, keysetFetcher) + cleanupFn := func() { + if lifecycleManager != nil { + lifecycleManager.StopAndWaitUntil(0) + } + } + return reader, cleanupFn, nil + } +} + +func (f *AnyTrustFactory) CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) { + if !f.enableWriter { + return nil, nil, nil + } + + daWriter, _, _, lifecycleManager, err := das.CreateDAReaderAndWriter( + ctx, f.config, f.dataSigner, f.l1Client, f.seqInboxAddr) + if err != nil { + return nil, nil, err + } + + if f.config.PanicOnError { + daWriter = das.NewWriterPanicWrapper(daWriter) + } + + writer := dasutil.NewWriterForDAS(daWriter) + cleanupFn := func() { + if lifecycleManager != nil { + lifecycleManager.StopAndWaitUntil(0) + } + } + return writer, cleanupFn, nil +} + +func (f *AnyTrustFactory) CreateValidator(ctx context.Context) (daprovider.Validator, func(), error) { + // AnyTrust doesn't use the Validator interface + return nil, nil, nil +} + +// ReferenceDA Factory Implementation +func (f *ReferenceDAFactory) ValidateConfig() error { + if !f.config.Enable { + return errors.New("referenceda must be enabled") + } + return nil +} + +func (f *ReferenceDAFactory) CreateReader(ctx context.Context) (daprovider.Reader, func(), error) { + if f.config.ValidatorContract == "" { + return nil, nil, errors.New("validator-contract address not configured for reference DA reader") + } + validatorAddr := common.HexToAddress(f.config.ValidatorContract) + reader := referenceda.NewReader(f.l1Client, validatorAddr) + return reader, nil, nil +} + +func (f *ReferenceDAFactory) CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) { + if !f.enableWriter { + return nil, nil, nil + } + + if f.dataSigner == nil { + // Try to create signer from config + var signer signature.DataSignerFunc + if f.config.SigningKey.PrivateKey != "" { + privKey, err := crypto.HexToECDSA(f.config.SigningKey.PrivateKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid private key: %w", err) + } + signer = signature.DataSignerFromPrivateKey(privKey) + } else if f.config.SigningKey.KeyFile != "" { + keyData, err := os.ReadFile(f.config.SigningKey.KeyFile) + if err != nil { + return nil, nil, fmt.Errorf("failed to read key file: %w", err) + } + privKey, err := crypto.HexToECDSA(strings.TrimSpace(string(keyData))) + if err != nil { + return nil, nil, fmt.Errorf("invalid private key in file: %w", err) + } + signer = signature.DataSignerFromPrivateKey(privKey) + } else { + return nil, nil, errors.New("no signing key configured for reference DA writer") + } + f.dataSigner = signer + } + + writer := referenceda.NewWriter(f.dataSigner) + return writer, nil, nil +} + +func (f *ReferenceDAFactory) CreateValidator(ctx context.Context) (daprovider.Validator, func(), error) { + if f.config.ValidatorContract == "" { + return nil, nil, errors.New("validator-contract address not configured for reference DA validator") + } + validatorAddr := common.HexToAddress(f.config.ValidatorContract) + return referenceda.NewValidator(f.l1Client, validatorAddr), nil, nil +} diff --git a/daprovider/reader.go b/daprovider/reader.go index 5cfe6f9718..112da093c3 100644 --- a/daprovider/reader.go +++ b/daprovider/reader.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package daprovider @@ -6,6 +6,7 @@ package daprovider import ( "context" "fmt" + "strings" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -14,6 +15,20 @@ import ( "github.com/offchainlabs/nitro/util/blobs" ) +// CertificateValidationError represents an error in certificate validation +type CertificateValidationError struct { + Reason string +} + +func (e *CertificateValidationError) Error() string { + return e.Reason +} + +// IsCertificateValidationError checks if an error is a certificate validation error +func IsCertificateValidationError(err error) bool { + return err != nil && strings.Contains(err.Error(), "certificate validation failed") +} + type Reader interface { // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider IsValidHeaderByte(ctx context.Context, headerByte byte) bool diff --git a/daprovider/referenceda/certificate.go b/daprovider/referenceda/certificate.go new file mode 100644 index 0000000000..3d65e7f5ab --- /dev/null +++ b/daprovider/referenceda/certificate.go @@ -0,0 +1,113 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package referenceda + +import ( + "crypto/sha256" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/util/signature" +) + +// Certificate represents a ReferenceDA certificate with signature +type Certificate struct { + Header byte + DataHash [32]byte + V uint8 + R [32]byte + S [32]byte +} + +// NewCertificate creates a certificate from data and signs it +func NewCertificate(data []byte, signer signature.DataSignerFunc) (*Certificate, error) { + dataHash := sha256.Sum256(data) + + sig, err := signer(dataHash[:]) + if err != nil { + return nil, fmt.Errorf("failed to sign data hash: %w", err) + } + + cert := &Certificate{ + Header: daprovider.DACertificateMessageHeaderFlag, + DataHash: dataHash, + V: sig[64] + 27, + } + copy(cert.R[:], sig[0:32]) + copy(cert.S[:], sig[32:64]) + + return cert, nil +} + +// Serialize converts certificate to bytes (98 bytes total) +func (c *Certificate) Serialize() []byte { + result := make([]byte, 98) + result[0] = c.Header + copy(result[1:33], c.DataHash[:]) + result[33] = c.V + copy(result[34:66], c.R[:]) + copy(result[66:98], c.S[:]) + return result +} + +// Deserialize creates a certificate from bytes +func Deserialize(data []byte) (*Certificate, error) { + if len(data) != 98 { + return nil, fmt.Errorf("invalid certificate length: expected 98, got %d", len(data)) + } + + cert := &Certificate{ + Header: data[0], + V: data[33], + } + copy(cert.DataHash[:], data[1:33]) + copy(cert.R[:], data[34:66]) + copy(cert.S[:], data[66:98]) + + if cert.Header != daprovider.DACertificateMessageHeaderFlag { + return nil, fmt.Errorf("invalid certificate header: %x", cert.Header) + } + + return cert, nil +} + +// RecoverSigner recovers the signer address from the certificate +func (c *Certificate) RecoverSigner() (common.Address, error) { + sig := make([]byte, 65) + copy(sig[0:32], c.R[:]) + copy(sig[32:64], c.S[:]) + sig[64] = c.V - 27 + + pubKey, err := crypto.SigToPub(c.DataHash[:], sig) + if err != nil { + return common.Address{}, fmt.Errorf("failed to recover signer: %w", err) + } + + return crypto.PubkeyToAddress(*pubKey), nil +} + +// ValidateWithContract checks if the certificate is signed by a trusted signer using the contract +// TODO: Uncomment the following once we have merged customda contracts changes. +/* +func (c *Certificate) ValidateWithContract(validator *ospgen.ReferenceDAProofValidator, opts *bind.CallOpts) error { + signer, err := c.RecoverSigner() + if err != nil { + return err + } + + isTrusted, err := validator.TrustedSigners(opts, signer) + if err != nil { + return fmt.Errorf("failed to check trusted signer: %w", err) + } + + if !isTrusted { + return fmt.Errorf("certificate signed by untrusted signer: %s", signer.Hex()) + } + + return nil + } +*/ diff --git a/daprovider/referenceda/config.go b/daprovider/referenceda/config.go new file mode 100644 index 0000000000..6f0ad5c76d --- /dev/null +++ b/daprovider/referenceda/config.go @@ -0,0 +1,41 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + flag "github.com/spf13/pflag" +) + +type Config struct { + Enable bool `koanf:"enable"` + SigningKey SigningKeyConfig `koanf:"signing-key"` + ValidatorContract string `koanf:"validator-contract"` +} + +type SigningKeyConfig struct { + PrivateKey string `koanf:"private-key"` + KeyFile string `koanf:"key-file"` +} + +var DefaultSigningKeyConfig = SigningKeyConfig{ + PrivateKey: "", + KeyFile: "", +} + +var DefaultConfig = Config{ + Enable: false, + SigningKey: DefaultSigningKeyConfig, + ValidatorContract: "", +} + +func SigningKeyConfigAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".private-key", DefaultSigningKeyConfig.PrivateKey, "hex-encoded private key for signing certificates") + f.String(prefix+".key-file", DefaultSigningKeyConfig.KeyFile, "path to file containing private key") +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultConfig.Enable, "enable reference DA provider implementation") + SigningKeyConfigAddOptions(prefix+".signing-key", f) + f.String(prefix+".validator-contract", DefaultConfig.ValidatorContract, "address of the ReferenceDAProofValidator contract") +} diff --git a/daprovider/referenceda/reference_reader.go b/daprovider/referenceda/reference_reader.go new file mode 100644 index 0000000000..e788eec2ac --- /dev/null +++ b/daprovider/referenceda/reference_reader.go @@ -0,0 +1,118 @@ +// Copyright 2021-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "crypto/sha256" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +// Reader implements the daprovider.Reader interface for ReferenceDA +type Reader struct { + storage *InMemoryStorage + l1Client *ethclient.Client + validatorAddr common.Address +} + +func NewReader(l1Client *ethclient.Client, validatorAddr common.Address) *Reader { + return &Reader{ + storage: GetInMemoryStorage(), + l1Client: l1Client, + validatorAddr: validatorAddr, + } +} + +// IsValidHeaderByte returns true if the header byte indicates a CustomDA message +func (r *Reader) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { + return daprovider.IsDACertificateMessageHeaderByte(headerByte) +} + +// RecoverPayloadFromBatch fetches the batch data from the ReferenceDA storage +func (r *Reader) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages daprovider.PreimagesMap, + validateSeqMsg bool, +) ([]byte, daprovider.PreimagesMap, error) { + if len(sequencerMsg) <= 40 { + return nil, nil, fmt.Errorf("sequencer message too small") + } + + // Skip the 40-byte L1 header and get the certificate + certBytes := sequencerMsg[40:] + + // Deserialize certificate + cert, err := Deserialize(certBytes) + if err != nil { + return nil, nil, fmt.Errorf("failed to deserialize certificate: %w", err) + } + + // Validate certificate if requested + // TODO: Uncomment the following once we have merged customda contracts changes. + /* + if validateSeqMsg { + // Create contract binding + validator, err := ospgen.NewReferenceDAProofValidator(r.validatorAddr, r.l1Client) + if err != nil { + return nil, nil, fmt.Errorf("failed to create validator binding: %w", err) + } + + // Validate using contract + callOpts := &bind.CallOpts{Context: ctx} + err = cert.ValidateWithContract(validator, callOpts) + if err != nil { + return nil, nil, fmt.Errorf("certificate validation failed: %w", err) + } + } + */ + + log.Debug("ReferenceDA reader extracting hash", + "certificateLen", len(certBytes), + "sha256Hash", common.Hash(cert.DataHash).Hex(), + "certificateHex", fmt.Sprintf("0x%x", certBytes)) + + // Retrieve the data from storage using the hash + payload, err := r.storage.GetByHash(ctx, cert.DataHash) + if err != nil { + return nil, nil, fmt.Errorf("failed to retrieve data from storage: %w", err) + } + if payload == nil { + return nil, nil, fmt.Errorf("data not found in storage for hash %s", common.Hash(cert.DataHash).Hex()) + } + + // Verify data matches certificate hash (SHA256) + actualHash := sha256.Sum256(payload) + if actualHash != cert.DataHash { + return nil, nil, fmt.Errorf("data hash mismatch: expected %s, got %s", common.Hash(cert.DataHash).Hex(), common.Hash(actualHash).Hex()) + } + + // Record preimages if needed + if preimages != nil { + preimageRecorder := daprovider.RecordPreimagesTo(preimages) + + // Record the mapping from certificate hash to actual payload data + // This is what the replay binary expects: keccak256(certificate) -> payload + certHash := crypto.Keccak256Hash(certBytes) + preimageRecorder(certHash, payload, arbutil.DACertificatePreimageType) + } + + log.Debug("ReferenceDA batch recovery completed", + "batchNum", batchNum, + "blockHash", batchBlockHash, + "sha256", common.Hash(cert.DataHash).Hex(), + "payloadSize", len(payload)) + + return payload, preimages, nil +} diff --git a/daprovider/referenceda/reference_validator.go b/daprovider/referenceda/reference_validator.go new file mode 100644 index 0000000000..579cc29b2d --- /dev/null +++ b/daprovider/referenceda/reference_validator.go @@ -0,0 +1,120 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "encoding/binary" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + + "github.com/offchainlabs/nitro/arbutil" +) + +type Validator struct { + storage *InMemoryStorage + l1Client *ethclient.Client + validatorAddr common.Address +} + +func NewValidator(l1Client *ethclient.Client, validatorAddr common.Address) *Validator { + return &Validator{ + storage: GetInMemoryStorage(), + l1Client: l1Client, + validatorAddr: validatorAddr, + } +} + +// GenerateReadPreimageProof creates a ReadPreimage proof for ReferenceDA +// The proof enhancer will prepend the standardized header [certKeccak256, offset, certSize, certificate] +// So we only need to return the custom data: [Version(1), PreimageSize(8), PreimageData] +func (v *Validator) GenerateReadPreimageProof(ctx context.Context, preimageType arbutil.PreimageType, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) { + if preimageType != arbutil.DACertificatePreimageType { + return nil, fmt.Errorf("unsupported preimage type: %v", preimageType) + } + + // Deserialize certificate to extract data hash + cert, err := Deserialize(certificate) + if err != nil { + return nil, fmt.Errorf("failed to deserialize certificate: %w", err) + } + + // Extract data hash (SHA256) from certificate + dataHash := cert.DataHash + + // Get preimage from storage using SHA256 hash + preimage, err := v.storage.GetByHash(ctx, dataHash) + if err != nil { + return nil, fmt.Errorf("failed to get preimage: %w", err) + } + if preimage == nil { + return nil, fmt.Errorf("preimage not found for hash %x", dataHash) + } + + // Build custom proof data: [Version(1), PreimageSize(8), PreimageData] + // The certificate is NOT included here as it's already in the standardized header + proof := make([]byte, 1+8+len(preimage)) + proof[0] = 1 // Version + binary.BigEndian.PutUint64(proof[1:9], uint64(len(preimage))) + copy(proof[9:], preimage) + + return proof, nil +} + +// GenerateCertificateValidityProof creates a certificate validity proof for ReferenceDA +// The ReferenceDA implementation returns a two-byte proof with: +// - claimedValid (1 byte): 1 if valid, 0 if invalid +// - version (1 byte): 0x01 for version 1 +// +// This validates the certificate signature against trusted signers from the contract. +// Invalid certificates (wrong format, untrusted signer) return claimedValid=0. +// Only transient errors (like RPC failures) return an error. +func (v *Validator) GenerateCertificateValidityProof(ctx context.Context, preimageType arbutil.PreimageType, certificate []byte) ([]byte, error) { + + // Try to deserialize certificate + cert, err := Deserialize(certificate) + if err != nil { + // Certificate is malformed (wrong length, etc.) + // We return invalid proof rather than error for validation failures + return []byte{0, 0x01}, nil //nolint:nilerr // Invalid certificate, version 1 + } + + // Check if signer is trusted using contract + signer, err := cert.RecoverSigner() + if err != nil { + // Invalid signature - can't recover signer + // We return invalid proof rather than error for validation failures + return []byte{0, 0x01}, nil //nolint:nilerr // Invalid certificate, version 1 + } + + // TODO: Remove/uncomment the following once we have merged customda contracts changes. + // For now we will always just say the cert is untrusted. + _ = signer + isTrusted := false + /* + // Create contract binding + validator, err := ospgen.NewReferenceDAProofValidator(v.validatorAddr, v.l1Client) + if err != nil { + // This is a transient error - can't connect to contract + return nil, fmt.Errorf("failed to create validator binding: %w", err) + } + + // Query contract to check if signer is trusted + isTrusted, err = validator.TrustedSigners(&bind.CallOpts{Context: ctx}, signer) + if err != nil { + // This is a transient error - RPC call failed + return nil, fmt.Errorf("failed to check trusted signer: %w", err) + } + */ + + if !isTrusted { + // Signer is not trusted + return []byte{0, 0x01}, nil // Invalid certificate, version 1 + } + + // Certificate is valid (signed by trusted signer) + return []byte{1, 0x01}, nil // Valid certificate, version 1 +} diff --git a/daprovider/referenceda/reference_writer.go b/daprovider/referenceda/reference_writer.go new file mode 100644 index 0000000000..5730aaebb5 --- /dev/null +++ b/daprovider/referenceda/reference_writer.go @@ -0,0 +1,63 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/offchainlabs/nitro/util/signature" +) + +// Writer implements the daprovider.Writer interface for ReferenceDA +type Writer struct { + storage *InMemoryStorage + signer signature.DataSignerFunc +} + +// NewWriter creates a new ReferenceDA writer +func NewWriter(signer signature.DataSignerFunc) *Writer { + return &Writer{ + storage: GetInMemoryStorage(), + signer: signer, + } +} + +func (w *Writer) Store( + ctx context.Context, + message []byte, + timeout uint64, + disableFallbackStoreDataOnChain bool, +) ([]byte, error) { + if w.signer == nil { + return nil, fmt.Errorf("no signer configured") + } + + // Create and sign certificate + cert, err := NewCertificate(message, w.signer) + if err != nil { + return nil, err + } + + // Store the message in the singleton storage + err = w.storage.Store(ctx, message) + if err != nil { + return nil, err + } + + // Serialize certificate for on-chain storage + certificate := cert.Serialize() + hashKey := common.BytesToHash(cert.DataHash[:]) + + log.Debug("ReferenceDA batch stored with signature", + "sha256", hashKey.Hex(), + "certificateSize", len(certificate), + "batchSize", len(message), + ) + + return certificate, nil +} diff --git a/daprovider/referenceda/storage.go b/daprovider/referenceda/storage.go new file mode 100644 index 0000000000..8daa4ade43 --- /dev/null +++ b/daprovider/referenceda/storage.go @@ -0,0 +1,54 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "crypto/sha256" + "sync" + + "github.com/ethereum/go-ethereum/common" +) + +// InMemoryStorage implements PreimageStorage interface for in-memory storage +type InMemoryStorage struct { + mu sync.RWMutex + preimages map[common.Hash][]byte +} + +var ( + // singleton instance of InMemoryStorage + storageInstance *InMemoryStorage + storageOnce sync.Once +) + +// GetInMemoryStorage returns the singleton instance of InMemoryStorage +func GetInMemoryStorage() *InMemoryStorage { + storageOnce.Do(func() { + storageInstance = &InMemoryStorage{ + preimages: make(map[common.Hash][]byte), + } + }) + return storageInstance +} + +func (s *InMemoryStorage) Store(ctx context.Context, data []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + hash := sha256.Sum256(data) + s.preimages[common.BytesToHash(hash[:])] = data + return nil +} + +func (s *InMemoryStorage) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + data, exists := s.preimages[hash] + if !exists { + return nil, nil + } + return data, nil +} diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go new file mode 100644 index 0000000000..cf2a7bf423 --- /dev/null +++ b/daprovider/server/provider_server.go @@ -0,0 +1,193 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package dapserver + +import ( + "context" + "errors" + "fmt" + "hash/crc32" + "net" + "net/http" + "os" + "strings" + + flag "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/daclient" +) + +type Server struct { + reader daprovider.Reader + writer daprovider.Writer + validator daprovider.Validator +} + +type ServerConfig struct { + Addr string `koanf:"addr"` + Port uint64 `koanf:"port"` + JWTSecret string `koanf:"jwtsecret"` + EnableDAWriter bool `koanf:"enable-da-writer"` + ServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"server-timeouts"` + RPCServerBodyLimit int `koanf:"rpc-server-body-limit"` +} + +var DefaultServerConfig = ServerConfig{ + Addr: "localhost", + Port: 9880, + JWTSecret: "", + EnableDAWriter: false, + ServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, + RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, +} + +func ServerConfigAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".addr", DefaultServerConfig.Addr, "JSON rpc server listening interface") + f.Uint64(prefix+".port", DefaultServerConfig.Port, "JSON rpc server listening port") + f.String(prefix+".jwtsecret", DefaultServerConfig.JWTSecret, "path to file with jwtsecret for validation") + f.Bool(prefix+".enable-da-writer", DefaultServerConfig.EnableDAWriter, "implies if the das server supports daprovider's writer interface") + f.Int(prefix+".rpc-server-body-limit", DefaultServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") + genericconf.HTTPServerTimeoutConfigAddOptions(prefix+".server-timeouts", f) +} + +func fetchJWTSecret(fileName string) ([]byte, error) { + if data, err := os.ReadFile(fileName); err == nil { + jwtSecret := common.FromHex(strings.TrimSpace(string(data))) + if len(jwtSecret) == 32 { + log.Info("Loaded JWT secret file", "path", fileName, "crc32", fmt.Sprintf("%#x", crc32.ChecksumIEEE(jwtSecret))) + return jwtSecret, nil + } + log.Error("Invalid JWT secret", "path", fileName, "length", len(jwtSecret)) + return nil, errors.New("invalid JWT secret") + } + return nil, errors.New("JWT secret file not found") +} + +// NewServerWithDAPProvider creates a new server with pre-created reader/writer/validator components +func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader daprovider.Reader, writer daprovider.Writer, validator daprovider.Validator) (*http.Server, error) { + listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", config.Addr, config.Port)) + if err != nil { + return nil, err + } + + rpcServer := rpc.NewServer() + if config.RPCServerBodyLimit > 0 { + rpcServer.SetHTTPBodyLimit(config.RPCServerBodyLimit) + } + + server := &Server{ + reader: reader, + writer: writer, + validator: validator, + } + if err = rpcServer.RegisterName("daprovider", server); err != nil { + return nil, err + } + + addr, ok := listener.Addr().(*net.TCPAddr) + if !ok { + return nil, errors.New("failed getting provider server address from listener") + } + + var handler http.Handler + if config.JWTSecret != "" { + jwt, err := fetchJWTSecret(config.JWTSecret) + if err != nil { + return nil, fmt.Errorf("failed creating new provider server: %w", err) + } + handler = node.NewHTTPHandlerStack(rpcServer, nil, nil, jwt) + } else { + handler = rpcServer + } + + srv := &http.Server{ + Addr: "http://" + addr.String(), + Handler: handler, + ReadTimeout: config.ServerTimeouts.ReadTimeout, + ReadHeaderTimeout: config.ServerTimeouts.ReadHeaderTimeout, + WriteTimeout: config.ServerTimeouts.WriteTimeout, + IdleTimeout: config.ServerTimeouts.IdleTimeout, + } + go func() { + if err := srv.Serve(listener); err != nil && + !errors.Is(err, http.ErrServerClosed) { + log.Error("provider server's Serve method returned a non http.ErrServerClosed error", "err", err) + } + }() + + go func() { + <-ctx.Done() + _ = srv.Shutdown(context.Background()) + }() + + return srv, nil +} + +func (s *Server) IsValidHeaderByte(ctx context.Context, headerByte byte) (*daclient.IsValidHeaderByteResult, error) { + return &daclient.IsValidHeaderByteResult{IsValid: s.reader.IsValidHeaderByte(ctx, headerByte)}, nil +} + +func (s *Server) RecoverPayloadFromBatch( + ctx context.Context, + batchNum hexutil.Uint64, + batchBlockHash common.Hash, + sequencerMsg hexutil.Bytes, + preimages daprovider.PreimagesMap, + validateSeqMsg bool, +) (*daclient.RecoverPayloadFromBatchResult, error) { + payload, preimages, err := s.reader.RecoverPayloadFromBatch(ctx, uint64(batchNum), batchBlockHash, sequencerMsg, preimages, validateSeqMsg) + if err != nil { + return nil, err + } + return &daclient.RecoverPayloadFromBatchResult{ + Payload: payload, + Preimages: preimages, + }, nil +} + +func (s *Server) Store( + ctx context.Context, + message hexutil.Bytes, + timeout hexutil.Uint64, + disableFallbackStoreDataOnChain bool, +) (*daclient.StoreResult, error) { + serializedDACert, err := s.writer.Store(ctx, message, uint64(timeout), disableFallbackStoreDataOnChain) + if err != nil { + return nil, err + } + return &daclient.StoreResult{SerializedDACert: serializedDACert}, nil +} + +func (s *Server) GenerateReadPreimageProof(ctx context.Context, preimageType hexutil.Uint, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*daclient.GenerateReadPreimageProofResult, error) { + if s.validator == nil { + return nil, errors.New("validator not available") + } + // #nosec G115 + proof, err := s.validator.GenerateReadPreimageProof(ctx, arbutil.PreimageType(uint8(preimageType)), certHash, uint64(offset), certificate) + if err != nil { + return nil, err + } + return &daclient.GenerateReadPreimageProofResult{Proof: hexutil.Bytes(proof)}, nil +} + +func (s *Server) GenerateCertificateValidityProof(ctx context.Context, preimageType hexutil.Uint, certificate hexutil.Bytes) (*daclient.GenerateCertificateValidityProofResult, error) { + if s.validator == nil { + return nil, errors.New("validator not available") + } + // #nosec G115 + proof, err := s.validator.GenerateCertificateValidityProof(ctx, arbutil.PreimageType(uint8(preimageType)), certificate) + if err != nil { + return nil, err + } + return &daclient.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(proof)}, nil +} diff --git a/daprovider/util.go b/daprovider/util.go index e0454cc6b0..be7c85f1de 100644 --- a/daprovider/util.go +++ b/daprovider/util.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package daprovider @@ -75,8 +75,12 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5 // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 +// DACertificateMessageHeaderFlag indicates that this message uses a custom data availability system. +// Anytrust uses the legacy TreeDASMessageHeaderFlag instead despite also having a certificate. +const DACertificateMessageHeaderFlag byte = 0x01 + // KnownHeaderBits is all header bits with known meaning to this nitro version -const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte +const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte | DACertificateMessageHeaderFlag var DefaultDASRetentionPeriod time.Duration = time.Hour * 24 * 15 @@ -105,6 +109,10 @@ func IsBlobHashesHeaderByte(header byte) bool { return hasBits(header, BlobHashesHeaderFlag) } +func IsDACertificateMessageHeaderByte(header byte) bool { + return header == DACertificateMessageHeaderFlag +} + func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } diff --git a/daprovider/validator.go b/daprovider/validator.go new file mode 100644 index 0000000000..92d39aa12a --- /dev/null +++ b/daprovider/validator.go @@ -0,0 +1,26 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package daprovider + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + + "github.com/offchainlabs/nitro/arbutil" +) + +// Validator defines the interface for custom data availability systems. +// This interface is used to validate and generate proofs for DACertificate preimages. +type Validator interface { + // GenerateReadPreimageProof generates a proof for a specific preimage at a given offset. + // The proof format depends on the implementation and must be compatible with the Solidity + // IDACertificateValidator contract. + // certHash is the keccak256 hash of the certificate. + GenerateReadPreimageProof(ctx context.Context, preimageType arbutil.PreimageType, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) + + // GenerateCertificateValidityProof returns a proof of whether the certificate + // is valid according to the DA system's rules. + GenerateCertificateValidityProof(ctx context.Context, preimageType arbutil.PreimageType, certificate []byte) ([]byte, error) +} diff --git a/daprovider/writer.go b/daprovider/writer.go index f49351e9b5..0008f19979 100644 --- a/daprovider/writer.go +++ b/daprovider/writer.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package daprovider From 9f415d7b3f92e9a2ae26669f132d2e7953398bf0 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Fri, 12 Sep 2025 17:41:17 +0200 Subject: [PATCH 02/56] Reduce code duplication in factory, more cert validation --- daprovider/factory/factory.go | 52 +++++++++++---------------- daprovider/referenceda/certificate.go | 4 +++ 2 files changed, 25 insertions(+), 31 deletions(-) diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go index e639167b97..e110009505 100644 --- a/daprovider/factory/factory.go +++ b/daprovider/factory/factory.go @@ -108,45 +108,35 @@ func (f *AnyTrustFactory) ValidateConfig() error { } func (f *AnyTrustFactory) CreateReader(ctx context.Context) (daprovider.Reader, func(), error) { + var daReader das.DataAvailabilityServiceReader + var keysetFetcher *das.KeysetFetcher + var lifecycleManager *das.LifecycleManager + var err error + if f.enableWriter { - _, daReader, keysetFetcher, lifecycleManager, err := das.CreateDAReaderAndWriter( + _, daReader, keysetFetcher, lifecycleManager, err = das.CreateDAReaderAndWriter( ctx, f.config, f.dataSigner, f.l1Client, f.seqInboxAddr) - if err != nil { - return nil, nil, err - } - - daReader = das.NewReaderTimeoutWrapper(daReader, f.config.RequestTimeout) - if f.config.PanicOnError { - daReader = das.NewReaderPanicWrapper(daReader) - } - - reader := dasutil.NewReaderForDAS(daReader, keysetFetcher) - cleanupFn := func() { - if lifecycleManager != nil { - lifecycleManager.StopAndWaitUntil(0) - } - } - return reader, cleanupFn, nil } else { - daReader, keysetFetcher, lifecycleManager, err := das.CreateDAReader( + daReader, keysetFetcher, lifecycleManager, err = das.CreateDAReader( ctx, f.config, f.l1Reader, &f.seqInboxAddr) - if err != nil { - return nil, nil, err - } + } - daReader = das.NewReaderTimeoutWrapper(daReader, f.config.RequestTimeout) - if f.config.PanicOnError { - daReader = das.NewReaderPanicWrapper(daReader) - } + if err != nil { + return nil, nil, err + } - reader := dasutil.NewReaderForDAS(daReader, keysetFetcher) - cleanupFn := func() { - if lifecycleManager != nil { - lifecycleManager.StopAndWaitUntil(0) - } + daReader = das.NewReaderTimeoutWrapper(daReader, f.config.RequestTimeout) + if f.config.PanicOnError { + daReader = das.NewReaderPanicWrapper(daReader) + } + + reader := dasutil.NewReaderForDAS(daReader, keysetFetcher) + cleanupFn := func() { + if lifecycleManager != nil { + lifecycleManager.StopAndWaitUntil(0) } - return reader, cleanupFn, nil } + return reader, cleanupFn, nil } func (f *AnyTrustFactory) CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) { diff --git a/daprovider/referenceda/certificate.go b/daprovider/referenceda/certificate.go index 3d65e7f5ab..91d05babf3 100644 --- a/daprovider/referenceda/certificate.go +++ b/daprovider/referenceda/certificate.go @@ -77,6 +77,10 @@ func Deserialize(data []byte) (*Certificate, error) { // RecoverSigner recovers the signer address from the certificate func (c *Certificate) RecoverSigner() (common.Address, error) { + if c.V < 27 { + return common.Address{}, fmt.Errorf("invalid signature V value: %d (must be >= 27)", c.V) + } + sig := make([]byte, 65) copy(sig[0:32], c.R[:]) copy(sig[32:64], c.S[:]) From 3a79a1827b0bf7ebf06df3e88c5877856bd2463a Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Fri, 12 Sep 2025 18:07:56 +0200 Subject: [PATCH 03/56] Fix for interface change on master --- daprovider/factory/factory.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go index e110009505..776698570a 100644 --- a/daprovider/factory/factory.go +++ b/daprovider/factory/factory.go @@ -108,7 +108,7 @@ func (f *AnyTrustFactory) ValidateConfig() error { } func (f *AnyTrustFactory) CreateReader(ctx context.Context) (daprovider.Reader, func(), error) { - var daReader das.DataAvailabilityServiceReader + var daReader dasutil.DASReader var keysetFetcher *das.KeysetFetcher var lifecycleManager *das.LifecycleManager var err error From c1f5d1d4e00a5b7a4c5dd8545b95f786b66853d9 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 22 Sep 2025 14:34:32 +0600 Subject: [PATCH 04/56] Remove DACert flag from KnownHeaderBits for now We will add DACertificateMessageHeaderFlag back to KnownHeaderBits in a future PR; removing it for now avoids changing the replay binary unnecessarily when merging in this PR. --- daprovider/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daprovider/util.go b/daprovider/util.go index be7c85f1de..44aadec5a3 100644 --- a/daprovider/util.go +++ b/daprovider/util.go @@ -80,7 +80,7 @@ const BrotliMessageHeaderByte byte = 0 const DACertificateMessageHeaderFlag byte = 0x01 // KnownHeaderBits is all header bits with known meaning to this nitro version -const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte | DACertificateMessageHeaderFlag +const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte var DefaultDASRetentionPeriod time.Duration = time.Hour * 24 * 15 From 08e9cec1ac23874152fd00d03fbc4b9b174523f8 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 22 Sep 2025 15:41:53 +0600 Subject: [PATCH 05/56] Remove preimageType from daprovider.Validator --- daprovider/daclient/daclient.go | 7 ++----- daprovider/referenceda/reference_validator.go | 11 ++--------- daprovider/server/provider_server.go | 9 ++++----- daprovider/validator.go | 8 +++----- 4 files changed, 11 insertions(+), 24 deletions(-) diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index adf473c9b3..896196ea9d 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/util/rpcclient" ) @@ -116,13 +115,12 @@ type GenerateReadPreimageProofResult struct { // This method calls the external DA provider's RPC endpoint to generate the proof func (c *Client) GenerateReadPreimageProof( ctx context.Context, - preimageType arbutil.PreimageType, certHash common.Hash, offset uint64, certificate []byte, ) ([]byte, error) { var generateProofResult GenerateReadPreimageProofResult - if err := c.CallContext(ctx, &generateProofResult, "daprovider_generateReadPreimageProof", hexutil.Uint(preimageType), certHash, hexutil.Uint64(offset), hexutil.Bytes(certificate)); err != nil { + if err := c.CallContext(ctx, &generateProofResult, "daprovider_generateReadPreimageProof", certHash, hexutil.Uint64(offset), hexutil.Bytes(certificate)); err != nil { return nil, fmt.Errorf("error returned from daprovider_generateProof rpc method, err: %w", err) } return generateProofResult.Proof, nil @@ -135,11 +133,10 @@ type GenerateCertificateValidityProofResult struct { func (c *Client) GenerateCertificateValidityProof( ctx context.Context, - preimageType arbutil.PreimageType, certificate []byte, ) ([]byte, error) { var generateCertificateValidityProofResult GenerateCertificateValidityProofResult - if err := c.CallContext(ctx, &generateCertificateValidityProofResult, "daprovider_generateCertificateValidityProof", hexutil.Uint(preimageType), hexutil.Bytes(certificate)); err != nil { + if err := c.CallContext(ctx, &generateCertificateValidityProofResult, "daprovider_generateCertificateValidityProof", hexutil.Bytes(certificate)); err != nil { return nil, fmt.Errorf("error returned from daprovider_generateCertificateValidityProof rpc method, err: %w", err) } return generateCertificateValidityProofResult.Proof, nil diff --git a/daprovider/referenceda/reference_validator.go b/daprovider/referenceda/reference_validator.go index 579cc29b2d..06484b3f00 100644 --- a/daprovider/referenceda/reference_validator.go +++ b/daprovider/referenceda/reference_validator.go @@ -10,8 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" - - "github.com/offchainlabs/nitro/arbutil" ) type Validator struct { @@ -31,11 +29,7 @@ func NewValidator(l1Client *ethclient.Client, validatorAddr common.Address) *Val // GenerateReadPreimageProof creates a ReadPreimage proof for ReferenceDA // The proof enhancer will prepend the standardized header [certKeccak256, offset, certSize, certificate] // So we only need to return the custom data: [Version(1), PreimageSize(8), PreimageData] -func (v *Validator) GenerateReadPreimageProof(ctx context.Context, preimageType arbutil.PreimageType, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) { - if preimageType != arbutil.DACertificatePreimageType { - return nil, fmt.Errorf("unsupported preimage type: %v", preimageType) - } - +func (v *Validator) GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) { // Deserialize certificate to extract data hash cert, err := Deserialize(certificate) if err != nil { @@ -72,8 +66,7 @@ func (v *Validator) GenerateReadPreimageProof(ctx context.Context, preimageType // This validates the certificate signature against trusted signers from the contract. // Invalid certificates (wrong format, untrusted signer) return claimedValid=0. // Only transient errors (like RPC failures) return an error. -func (v *Validator) GenerateCertificateValidityProof(ctx context.Context, preimageType arbutil.PreimageType, certificate []byte) ([]byte, error) { - +func (v *Validator) GenerateCertificateValidityProof(ctx context.Context, certificate []byte) ([]byte, error) { // Try to deserialize certificate cert, err := Deserialize(certificate) if err != nil { diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index cf2a7bf423..f6cd456d43 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -21,7 +21,6 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/daclient" @@ -168,24 +167,24 @@ func (s *Server) Store( return &daclient.StoreResult{SerializedDACert: serializedDACert}, nil } -func (s *Server) GenerateReadPreimageProof(ctx context.Context, preimageType hexutil.Uint, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*daclient.GenerateReadPreimageProofResult, error) { +func (s *Server) GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*daclient.GenerateReadPreimageProofResult, error) { if s.validator == nil { return nil, errors.New("validator not available") } // #nosec G115 - proof, err := s.validator.GenerateReadPreimageProof(ctx, arbutil.PreimageType(uint8(preimageType)), certHash, uint64(offset), certificate) + proof, err := s.validator.GenerateReadPreimageProof(ctx, certHash, uint64(offset), certificate) if err != nil { return nil, err } return &daclient.GenerateReadPreimageProofResult{Proof: hexutil.Bytes(proof)}, nil } -func (s *Server) GenerateCertificateValidityProof(ctx context.Context, preimageType hexutil.Uint, certificate hexutil.Bytes) (*daclient.GenerateCertificateValidityProofResult, error) { +func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certificate hexutil.Bytes) (*daclient.GenerateCertificateValidityProofResult, error) { if s.validator == nil { return nil, errors.New("validator not available") } // #nosec G115 - proof, err := s.validator.GenerateCertificateValidityProof(ctx, arbutil.PreimageType(uint8(preimageType)), certificate) + proof, err := s.validator.GenerateCertificateValidityProof(ctx, certificate) if err != nil { return nil, err } diff --git a/daprovider/validator.go b/daprovider/validator.go index 92d39aa12a..d97ec87458 100644 --- a/daprovider/validator.go +++ b/daprovider/validator.go @@ -7,20 +7,18 @@ import ( "context" "github.com/ethereum/go-ethereum/common" - - "github.com/offchainlabs/nitro/arbutil" ) // Validator defines the interface for custom data availability systems. -// This interface is used to validate and generate proofs for DACertificate preimages. +// This interface is used to generate proofs for DACertificate certificates and preimages. type Validator interface { // GenerateReadPreimageProof generates a proof for a specific preimage at a given offset. // The proof format depends on the implementation and must be compatible with the Solidity // IDACertificateValidator contract. // certHash is the keccak256 hash of the certificate. - GenerateReadPreimageProof(ctx context.Context, preimageType arbutil.PreimageType, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) + GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) // GenerateCertificateValidityProof returns a proof of whether the certificate // is valid according to the DA system's rules. - GenerateCertificateValidityProof(ctx context.Context, preimageType arbutil.PreimageType, certificate []byte) ([]byte, error) + GenerateCertificateValidityProof(ctx context.Context, certificate []byte) ([]byte, error) } From 6fb4296ac34f3ff46f6f240806ca7b0557f6ef8f Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 22 Sep 2025 18:31:00 +0600 Subject: [PATCH 06/56] Remove IsValidHeaderByte from RPC API daprovider.Readers are now registered with their respective header bytes at initialization. For external DA providers where the RPC client is used this becomes single call at init to daprovider_getSupportedHeaderBytes. Where previously in the inbox and block validator components we had loops over each provider, checking each provider if they handle that type of message, we now just check if we have a provider registered for that message. This means less RPC calls and makes the interface that providers need to implement a bit simpler, with room for taking it out completely for CustomDA if we can detect from other config that the external provider is expected to be a CustomDA provider. --- arbnode/batch_poster.go | 31 ++++++-- arbnode/inbox_tracker.go | 4 +- .../extraction/message_extraction_function.go | 4 +- .../message_extraction_function_test.go | 6 +- arbnode/mel/extraction/types.go | 2 +- arbnode/mel/runner/mel.go | 4 +- arbnode/mel/runner/mel_test.go | 2 +- arbnode/node.go | 26 +++++-- arbstate/inbox.go | 55 +++++++------- cmd/daprovider/daprovider.go | 3 +- cmd/replay/main.go | 12 +++- daprovider/daclient/daclient.go | 18 +++-- daprovider/das/dasserver/dasserver.go | 7 +- daprovider/das/dasutil/dasutil.go | 4 -- daprovider/factory/factory.go | 9 +++ daprovider/reader.go | 7 -- daprovider/referenceda/reference_reader.go | 5 -- daprovider/registry.go | 72 +++++++++++++++++++ daprovider/server/provider_server.go | 22 +++--- staker/stateless_block_validator.go | 48 ++++++------- 20 files changed, 225 insertions(+), 116 deletions(-) create mode 100644 daprovider/registry.go diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index cfc72bfece..2e2c9cd440 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -107,7 +107,7 @@ type BatchPoster struct { gasRefunderAddr common.Address building *buildingBatch dapWriter daprovider.Writer - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] @@ -330,7 +330,7 @@ type BatchPosterOpts struct { TransactOpts *bind.TransactOpts DAPWriter daprovider.Writer ParentChainID *big.Int - DAPReaders []daprovider.Reader + DAPReaders *daprovider.ReaderRegistry } func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) { @@ -1793,9 +1793,32 @@ func (b *BatchPoster) MaybePostSequencerBatch(ctx context.Context) (bool, error) } if config.CheckBatchCorrectness { - dapReaders := b.dapReaders + // Create a new registry for checking batch correctness + // We need to copy existing readers and potentially add a simulated blob reader + dapReaders := daprovider.NewReaderRegistry() + + // Copy all existing readers from the batch poster's registry + // These readers can fetch data that was already posted to + // external DA systems (eg AnyTrust) before this batch transaction + if b.dapReaders != nil { + for _, headerByte := range b.dapReaders.SupportedHeaderBytes() { + if reader, found := b.dapReaders.GetByHeaderByte(headerByte); found { + if err := dapReaders.Register(headerByte, reader); err != nil { + return false, fmt.Errorf("failed to register reader for header byte 0x%02x: %w", headerByte, err) + } + } + } + } + + // For EIP-4844 blob transactions, the blobs are created locally and will be + // included with the L1 transaction itself (as blob sidecars). Since these blobs + // don't exist on L1 yet, we need a simulated reader that can "read" from the + // local kzgBlobs we just created. This is different from other DA systems where + // data is posted externally first and only a reference is included in the L1 tx. if b.building.use4844 { - dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&simulatedBlobReader{kzgBlobs})) + if err := dapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(&simulatedBlobReader{kzgBlobs})); err != nil { + return false, fmt.Errorf("failed to register simulated blob reader: %w", err) + } } seqMsg := binary.BigEndian.AppendUint64([]byte{}, l1BoundMinTimestamp) seqMsg = binary.BigEndian.AppendUint64(seqMsg, l1BoundMaxTimestamp) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 991d443805..7aa56b7db2 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -39,14 +39,14 @@ type InboxTracker struct { txStreamer *TransactionStreamer mutex sync.Mutex validator *staker.BlockValidator - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry snapSyncConfig SnapSyncConfig batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders []daprovider.Reader, snapSyncConfig SnapSyncConfig) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders *daprovider.ReaderRegistry, snapSyncConfig SnapSyncConfig) (*InboxTracker, error) { tracker := &InboxTracker{ db: db, txStreamer: txStreamer, diff --git a/arbnode/mel/extraction/message_extraction_function.go b/arbnode/mel/extraction/message_extraction_function.go index fbd36cc131..c2261f48a2 100644 --- a/arbnode/mel/extraction/message_extraction_function.go +++ b/arbnode/mel/extraction/message_extraction_function.go @@ -51,7 +51,7 @@ func ExtractMessages( ctx context.Context, inputState *mel.State, parentChainHeader *types.Header, - dataProviders []daprovider.Reader, + dataProviders *daprovider.ReaderRegistry, delayedMsgDatabase DelayedMessageDatabase, receiptFetcher ReceiptFetcher, txsFetcher TransactionsFetcher, @@ -81,7 +81,7 @@ func extractMessagesImpl( ctx context.Context, inputState *mel.State, parentChainHeader *types.Header, - dataProviders []daprovider.Reader, + dataProviders *daprovider.ReaderRegistry, delayedMsgDatabase DelayedMessageDatabase, txsFetcher TransactionsFetcher, receiptFetcher ReceiptFetcher, diff --git a/arbnode/mel/extraction/message_extraction_function_test.go b/arbnode/mel/extraction/message_extraction_function_test.go index 97b82b4210..2404b947b8 100644 --- a/arbnode/mel/extraction/message_extraction_function_test.go +++ b/arbnode/mel/extraction/message_extraction_function_test.go @@ -31,7 +31,7 @@ func TestExtractMessages(t *testing.T) { lookupDelayedMsgs func(context.Context, *mel.State, *types.Header, ReceiptFetcher, TransactionsFetcher) ([]*mel.DelayedInboxMessage, error) serializer func(context.Context, *mel.SequencerInboxBatch, *types.Transaction, uint, ReceiptFetcher) ([]byte, error) parseReport func(io.Reader) (*big.Int, common.Address, common.Hash, uint64, *big.Int, uint64, error) - parseSequencerMsg func(context.Context, uint64, common.Hash, []byte, []daprovider.Reader, daprovider.KeysetValidationMode) (*arbstate.SequencerMessage, error) + parseSequencerMsg func(context.Context, uint64, common.Hash, []byte, *daprovider.ReaderRegistry, daprovider.KeysetValidationMode) (*arbstate.SequencerMessage, error) extractBatchMessages func(context.Context, *mel.State, *arbstate.SequencerMessage, DelayedMessageDatabase) ([]*arbostypes.MessageWithMetadata, error) expectedError string expectedMsgCount uint64 @@ -319,7 +319,7 @@ func successfulParseSequencerMsg( batchNum uint64, batchBlockHash common.Hash, data []byte, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode, ) (*arbstate.SequencerMessage, error) { return nil, nil @@ -330,7 +330,7 @@ func failingParseSequencerMsg( batchNum uint64, batchBlockHash common.Hash, data []byte, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode, ) (*arbstate.SequencerMessage, error) { return nil, errors.New("failed to parse sequencer message") diff --git a/arbnode/mel/extraction/types.go b/arbnode/mel/extraction/types.go index 1b1de95931..a628e1b9e5 100644 --- a/arbnode/mel/extraction/types.go +++ b/arbnode/mel/extraction/types.go @@ -62,7 +62,7 @@ type sequencerMessageParserFunc func( batchNum uint64, batchBlockHash common.Hash, data []byte, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode, ) (*arbstate.SequencerMessage, error) diff --git a/arbnode/mel/runner/mel.go b/arbnode/mel/runner/mel.go index 9e7134c053..ddbed50c79 100644 --- a/arbnode/mel/runner/mel.go +++ b/arbnode/mel/runner/mel.go @@ -40,7 +40,7 @@ type MessageExtractor struct { addrs *chaininfo.RollupAddresses melDB *Database msgConsumer mel.MessageConsumer - dataProviders []daprovider.Reader + dataProviders *daprovider.ReaderRegistry startParentChainBlockHash common.Hash fsm *fsm.Fsm[action, FSMState] retryInterval time.Duration @@ -54,7 +54,7 @@ func NewMessageExtractor( rollupAddrs *chaininfo.RollupAddresses, melDB *Database, msgConsumer mel.MessageConsumer, - dataProviders []daprovider.Reader, + dataProviders *daprovider.ReaderRegistry, startParentChainBlockHash common.Hash, retryInterval time.Duration, ) (*MessageExtractor, error) { diff --git a/arbnode/mel/runner/mel_test.go b/arbnode/mel/runner/mel_test.go index 89b19cf502..8823576a06 100644 --- a/arbnode/mel/runner/mel_test.go +++ b/arbnode/mel/runner/mel_test.go @@ -42,7 +42,7 @@ func TestMessageExtractor(t *testing.T) { &chaininfo.RollupAddresses{}, melDb, messageConsumer, - []daprovider.Reader{}, + daprovider.NewReaderRegistry(), common.Hash{}, 0, ) diff --git a/arbnode/node.go b/arbnode/node.go index 78e1c46ada..872b95ed8b 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -560,7 +560,7 @@ func getDAS( dataSigner signature.DataSignerFunc, l1client *ethclient.Client, stack *node.Node, -) (daprovider.Writer, func(), []daprovider.Reader, error) { +) (daprovider.Writer, func(), *daprovider.ReaderRegistry, error) { if config.DAProvider.Enable && config.DataAvailability.Enable { return nil, nil, nil, errors.New("da-provider and data-availability cannot be enabled together") } @@ -620,13 +620,25 @@ func getDAS( if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daClient == nil { return nil, nil, nil, errors.New("data availability service required but unconfigured") } - var dapReaders []daprovider.Reader + + dapReaders := daprovider.NewReaderRegistry() if daClient != nil { - dapReaders = append(dapReaders, daClient) + headerBytes, err := daClient.GetSupportedHeaderBytes(ctx) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get supported header bytes from DA client: %w", err) + } + if err := dapReaders.RegisterAll(headerBytes, daClient); err != nil { + return nil, nil, nil, fmt.Errorf("failed to register DA client: %w", err) + } } if blobReader != nil { - dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) + if err := dapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(blobReader)); err != nil { + return nil, nil, nil, fmt.Errorf("failed to register blob reader: %w", err) + } } + // AnyTrust now always uses the daClient, which is already registered, + // so we don't need to register it separately here. + if withDAWriter { return daClient, dasServerCloseFn, dapReaders, nil } @@ -637,7 +649,7 @@ func getInboxTrackerAndReader( ctx context.Context, arbDb ethdb.Database, txStreamer *TransactionStreamer, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, config *Config, configFetcher ConfigFetcher, l1client *ethclient.Client, @@ -849,7 +861,7 @@ func getStatelessBlockValidator( txStreamer *TransactionStreamer, exec execution.ExecutionRecorder, arbDb ethdb.Database, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, stack *node.Node, latestWasmModuleRoot common.Hash, ) (*staker.StatelessBlockValidator, error) { @@ -899,7 +911,7 @@ func getBatchPoster( syncMonitor *SyncMonitor, deployInfo *chaininfo.RollupAddresses, parentChainID *big.Int, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, stakerAddr common.Address, ) (*BatchPoster, error) { var batchPoster *BatchPoster diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 022b99e419..07e6a16750 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -51,7 +51,7 @@ const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*SequencerMessage, error) { +func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode) (*SequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -76,40 +76,37 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - // We try to extract payload from the first occurring valid DA reader in the dapReaders list - if len(payload) > 0 { - foundDA := false - var err error - for _, dapReader := range dapReaders { - if dapReader != nil && dapReader.IsValidHeaderByte(ctx, payload[0]) { - payload, _, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) - if err != nil { - // Matches the way keyset validation was done inside DAS readers i.e logging the error - // But other daproviders might just want to return the error - if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(payload[0]) { - if keysetValidationMode == daprovider.KeysetPanicIfInvalid { - panic(err.Error()) - } else { - log.Error(err.Error()) - } + // Use the registry to find the appropriate reader for the header byte + if len(payload) > 0 && dapReaders != nil { + if dapReader, found := dapReaders.GetByHeaderByte(payload[0]); found { + var err error + payload, _, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) + if err != nil { + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(payload[0]) { + if keysetValidationMode == daprovider.KeysetPanicIfInvalid { + panic(err.Error()) } else { - return nil, err + log.Error(err.Error()) } + } else { + return nil, err } - if payload == nil { - return parsedMsg, nil - } - foundDA = true - break } - } - - if !foundDA { + if payload == nil { + return parsedMsg, nil + } + } else { + // No reader found for this header byte - check if it's a known type if daprovider.IsDASMessageHeaderByte(payload[0]) { - log.Error("No DAS Reader configured, but sequencer message found with DAS header") + return nil, fmt.Errorf("no DAS reader configured for DAS message (header byte 0x%02x)", payload[0]) } else if daprovider.IsBlobHashesHeaderByte(payload[0]) { return nil, daprovider.ErrNoBlobReader + } else if daprovider.IsDACertificateMessageHeaderByte(payload[0]) { + return nil, fmt.Errorf("no DACertificate reader configured for certificate message (header byte 0x%02x)", payload[0]) } + // Otherwise it's not a DA message, continue processing } } @@ -167,7 +164,7 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry cachedSequencerMessage *SequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -177,7 +174,7 @@ type inboxMultiplexer struct { keysetValidationMode daprovider.KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, diff --git a/cmd/daprovider/daprovider.go b/cmd/daprovider/daprovider.go index f1b8248ea0..c98bc248bb 100644 --- a/cmd/daprovider/daprovider.go +++ b/cmd/daprovider/daprovider.go @@ -285,7 +285,8 @@ func startup() error { } log.Info("Starting json rpc server", "mode", config.Mode, "addr", config.ProviderServer.Addr, "port", config.ProviderServer.Port) - providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &config.ProviderServer, reader, writer, validator) + headerBytes := providerFactory.GetSupportedHeaderBytes() + providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &config.ProviderServer, reader, writer, validator, headerBytes) if err != nil { return err } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 15a56e8858..8b29489b16 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -240,11 +240,17 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = daprovider.KeysetDontValidate } - var dapReaders []daprovider.Reader + dapReaders := daprovider.NewReaderRegistry() if dasReader != nil { - dapReaders = append(dapReaders, dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher)) + err = dapReaders.SetupDASReader(dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher)) + if err != nil { + panic(fmt.Sprintf("Failed to register DAS reader: %v", err)) + } + } + err = dapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) + if err != nil { + panic(fmt.Sprintf("Failed to register blob reader: %v", err)) } - dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 896196ea9d..9c18c00d76 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/util/rpcclient" @@ -52,18 +51,17 @@ func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher) (*Clie return client, nil } -// IsValidHeaderByteResult is the result struct that data availability providers should use to respond if the given headerByte corresponds to their DA service -type IsValidHeaderByteResult struct { - IsValid bool `json:"is-valid,omitempty"` +// SupportedHeaderBytesResult is the result struct that data availability providers should use to respond with their supported header bytes +type SupportedHeaderBytesResult struct { + HeaderBytes hexutil.Bytes `json:"headerBytes,omitempty"` } -func (c *Client) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - var isValidHeaderByteResult IsValidHeaderByteResult - if err := c.CallContext(ctx, &isValidHeaderByteResult, "daprovider_isValidHeaderByte", headerByte); err != nil { - log.Error("Error returned from daprovider_isValidHeaderByte rpc method, defaulting to result as false", "err", err) - return false +func (c *Client) GetSupportedHeaderBytes(ctx context.Context) ([]byte, error) { + var result SupportedHeaderBytesResult + if err := c.CallContext(ctx, &result, "daprovider_getSupportedHeaderBytes"); err != nil { + return nil, fmt.Errorf("error returned from daprovider_getSupportedHeaderBytes rpc method: %w", err) } - return isValidHeaderByteResult.IsValid + return result.HeaderBytes, nil } // RecoverPayloadFromBatchResult is the result struct that data availability providers should use to respond with underlying payload and updated preimages map to a RecoverPayloadFromBatch fetch request diff --git a/daprovider/das/dasserver/dasserver.go b/daprovider/das/dasserver/dasserver.go index e38e6c4bda..b0d592905a 100644 --- a/daprovider/das/dasserver/dasserver.go +++ b/daprovider/das/dasserver/dasserver.go @@ -167,8 +167,11 @@ func NewServer(ctx context.Context, config *ServerConfig, dataSigner signature.D }, nil } -func (s *Server) IsValidHeaderByte(ctx context.Context, headerByte byte) (*daclient.IsValidHeaderByteResult, error) { - return &daclient.IsValidHeaderByteResult{IsValid: s.reader.IsValidHeaderByte(ctx, headerByte)}, nil +func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*daclient.SupportedHeaderBytesResult, error) { + // DAS supports the DAS message header byte + return &daclient.SupportedHeaderBytesResult{ + HeaderBytes: []byte{daprovider.DASMessageHeaderFlag}, + }, nil } func (s *Server) RecoverPayloadFromBatch( diff --git a/daprovider/das/dasutil/dasutil.go b/daprovider/das/dasutil/dasutil.go index 7c24508768..10a6baa6e0 100644 --- a/daprovider/das/dasutil/dasutil.go +++ b/daprovider/das/dasutil/dasutil.go @@ -52,10 +52,6 @@ type readerForDAS struct { keysetFetcher DASKeysetFetcher } -func (d *readerForDAS) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - return daprovider.IsDASMessageHeaderByte(headerByte) -} - func (d *readerForDAS) RecoverPayloadFromBatch( ctx context.Context, batchNum uint64, diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go index 776698570a..42d2c80dc2 100644 --- a/daprovider/factory/factory.go +++ b/daprovider/factory/factory.go @@ -34,6 +34,7 @@ type DAProviderFactory interface { CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) CreateValidator(ctx context.Context) (daprovider.Validator, func(), error) ValidateConfig() error + GetSupportedHeaderBytes() []byte } type AnyTrustFactory struct { @@ -86,6 +87,10 @@ func NewDAProviderFactory( } // AnyTrust Factory Implementation +func (f *AnyTrustFactory) GetSupportedHeaderBytes() []byte { + return []byte{daprovider.DASMessageHeaderFlag} +} + func (f *AnyTrustFactory) ValidateConfig() error { if !f.config.Enable { return errors.New("anytrust data availability must be enabled") @@ -169,6 +174,10 @@ func (f *AnyTrustFactory) CreateValidator(ctx context.Context) (daprovider.Valid } // ReferenceDA Factory Implementation +func (f *ReferenceDAFactory) GetSupportedHeaderBytes() []byte { + return []byte{daprovider.DACertificateMessageHeaderFlag} +} + func (f *ReferenceDAFactory) ValidateConfig() error { if !f.config.Enable { return errors.New("referenceda must be enabled") diff --git a/daprovider/reader.go b/daprovider/reader.go index 112da093c3..5f6ea8dc39 100644 --- a/daprovider/reader.go +++ b/daprovider/reader.go @@ -30,9 +30,6 @@ func IsCertificateValidationError(err error) bool { } type Reader interface { - // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider - IsValidHeaderByte(ctx context.Context, headerByte byte) bool - // RecoverPayloadFromBatch fetches the underlying payload and a map of preimages from the DA provider given the batch header information RecoverPayloadFromBatch( ctx context.Context, @@ -54,10 +51,6 @@ type readerForBlobReader struct { blobReader BlobReader } -func (b *readerForBlobReader) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - return IsBlobHashesHeaderByte(headerByte) -} - func (b *readerForBlobReader) RecoverPayloadFromBatch( ctx context.Context, batchNum uint64, diff --git a/daprovider/referenceda/reference_reader.go b/daprovider/referenceda/reference_reader.go index e788eec2ac..ab4ed970b9 100644 --- a/daprovider/referenceda/reference_reader.go +++ b/daprovider/referenceda/reference_reader.go @@ -32,11 +32,6 @@ func NewReader(l1Client *ethclient.Client, validatorAddr common.Address) *Reader } } -// IsValidHeaderByte returns true if the header byte indicates a CustomDA message -func (r *Reader) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - return daprovider.IsDACertificateMessageHeaderByte(headerByte) -} - // RecoverPayloadFromBatch fetches the batch data from the ReferenceDA storage func (r *Reader) RecoverPayloadFromBatch( ctx context.Context, diff --git a/daprovider/registry.go b/daprovider/registry.go new file mode 100644 index 0000000000..97957d79f4 --- /dev/null +++ b/daprovider/registry.go @@ -0,0 +1,72 @@ +// Copyright 2021-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package daprovider + +import ( + "fmt" +) + +// ReaderRegistry maintains a mapping of header bytes to their corresponding readers +type ReaderRegistry struct { + readers map[byte]Reader +} + +// NewReaderRegistry creates a new reader registry +func NewReaderRegistry() *ReaderRegistry { + return &ReaderRegistry{ + readers: make(map[byte]Reader), + } +} + +// Register associates a header byte with a reader +func (r *ReaderRegistry) Register(headerByte byte, reader Reader) error { + if reader == nil { + return fmt.Errorf("cannot register nil reader") + } + if existing, exists := r.readers[headerByte]; exists && existing != reader { + return fmt.Errorf("header byte 0x%02x already registered", headerByte) + } + r.readers[headerByte] = reader + return nil +} + +// RegisterAll associates multiple header bytes with a reader +func (r *ReaderRegistry) RegisterAll(headerBytes []byte, reader Reader) error { + for _, headerByte := range headerBytes { + if err := r.Register(headerByte, reader); err != nil { + return err + } + } + return nil +} + +// GetByHeaderByte returns the reader associated with the given header byte +func (r *ReaderRegistry) GetByHeaderByte(headerByte byte) (Reader, bool) { + reader, exists := r.readers[headerByte] + return reader, exists +} + +// SupportedHeaderBytes returns all registered header bytes +func (r *ReaderRegistry) SupportedHeaderBytes() []byte { + bytes := make([]byte, 0, len(r.readers)) + for b := range r.readers { + bytes = append(bytes, b) + } + return bytes +} + +// SetupDASReader registers a DAS reader for the DAS header byte +func (r *ReaderRegistry) SetupDASReader(reader Reader) error { + return r.Register(DASMessageHeaderFlag, reader) +} + +// SetupBlobReader registers a blob reader for the blob header byte +func (r *ReaderRegistry) SetupBlobReader(reader Reader) error { + return r.Register(BlobHashesHeaderFlag, reader) +} + +// SetupDACertificateReader registers a DA certificate reader for the certificate header byte +func (r *ReaderRegistry) SetupDACertificateReader(reader Reader) error { + return r.Register(DACertificateMessageHeaderFlag, reader) +} diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index f6cd456d43..578c75e90c 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -27,9 +27,10 @@ import ( ) type Server struct { - reader daprovider.Reader - writer daprovider.Writer - validator daprovider.Validator + reader daprovider.Reader + writer daprovider.Writer + validator daprovider.Validator + headerBytes []byte // Supported header bytes for this provider } type ServerConfig struct { @@ -73,7 +74,7 @@ func fetchJWTSecret(fileName string) ([]byte, error) { } // NewServerWithDAPProvider creates a new server with pre-created reader/writer/validator components -func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader daprovider.Reader, writer daprovider.Writer, validator daprovider.Validator) (*http.Server, error) { +func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader daprovider.Reader, writer daprovider.Writer, validator daprovider.Validator, headerBytes []byte) (*http.Server, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", config.Addr, config.Port)) if err != nil { return nil, err @@ -85,9 +86,10 @@ func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader } server := &Server{ - reader: reader, - writer: writer, - validator: validator, + reader: reader, + writer: writer, + validator: validator, + headerBytes: headerBytes, } if err = rpcServer.RegisterName("daprovider", server); err != nil { return nil, err @@ -132,8 +134,10 @@ func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader return srv, nil } -func (s *Server) IsValidHeaderByte(ctx context.Context, headerByte byte) (*daclient.IsValidHeaderByteResult, error) { - return &daclient.IsValidHeaderByteResult{IsValid: s.reader.IsValidHeaderByte(ctx, headerByte)}, nil +func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*daclient.SupportedHeaderBytesResult, error) { + return &daclient.SupportedHeaderBytesResult{ + HeaderBytes: s.headerBytes, + }, nil } func (s *Server) RecoverPayloadFromBatch( diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 083971af5c..7413cae860 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -44,7 +44,7 @@ type StatelessBlockValidator struct { inboxTracker InboxTrackerInterface streamer TransactionStreamerInterface db ethdb.Database - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry stack *node.Node latestWasmModuleRoot common.Hash } @@ -237,7 +237,7 @@ func NewStatelessBlockValidator( streamer TransactionStreamerInterface, recorder execution.ExecutionRecorder, arbdb ethdb.Database, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, config func() *BlockValidatorConfig, stack *node.Node, latestWasmModuleRoot common.Hash, @@ -324,31 +324,31 @@ func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum ui return false, nil, err } preimages := make(daprovider.PreimagesMap) - if len(postedData) > 40 { - foundDA := false - for _, dapReader := range v.dapReaders { - if dapReader != nil && dapReader.IsValidHeaderByte(ctx, postedData[40]) { - var err error - var preimagesRecorded daprovider.PreimagesMap - _, preimagesRecorded, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, postedData, preimages, true) - if err != nil { - // Matches the way keyset validation was done inside DAS readers i.e logging the error - // But other daproviders might just want to return the error - if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(postedData[40]) { - log.Error(err.Error()) - } else { - return false, nil, err - } + if len(postedData) > 40 && v.dapReaders != nil { + headerByte := postedData[40] + if dapReader, found := v.dapReaders.GetByHeaderByte(headerByte); found { + var err error + var preimagesRecorded daprovider.PreimagesMap + _, preimagesRecorded, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, postedData, preimages, true) + if err != nil { + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(headerByte) { + log.Error(err.Error()) } else { - preimages = preimagesRecorded + return false, nil, err } - foundDA = true - break + } else { + preimages = preimagesRecorded } - } - if !foundDA { - if daprovider.IsDASMessageHeaderByte(postedData[40]) { - log.Error("No DAS Reader configured, but sequencer message found with DAS header") + } else { + // No reader found for this header byte - check if it's a known type + if daprovider.IsDASMessageHeaderByte(headerByte) { + log.Error("No DAS Reader configured for DAS message", "headerByte", fmt.Sprintf("0x%02x", headerByte)) + } else if daprovider.IsBlobHashesHeaderByte(headerByte) { + log.Error("No Blob Reader configured for blob message", "headerByte", fmt.Sprintf("0x%02x", headerByte)) + } else if daprovider.IsDACertificateMessageHeaderByte(headerByte) { + log.Error("No DACertificate Reader configured for certificate message", "headerByte", fmt.Sprintf("0x%02x", headerByte)) } } } From 931d0c99637cf40228d21c843d86f3759242c71a Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 25 Sep 2025 12:54:12 +0600 Subject: [PATCH 07/56] Move daprovider api types to own go pkg --- daprovider/daclient/daclient.go | 38 +++++---------------------- daprovider/das/dasserver/dasserver.go | 14 +++++----- daprovider/server/provider_server.go | 22 ++++++++-------- daprovider/server_api/types.go | 37 ++++++++++++++++++++++++++ 4 files changed, 61 insertions(+), 50 deletions(-) create mode 100644 daprovider/server_api/types.go diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 9c18c00d76..68278517a6 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/server_api" "github.com/offchainlabs/nitro/util/rpcclient" ) @@ -51,25 +52,14 @@ func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher) (*Clie return client, nil } -// SupportedHeaderBytesResult is the result struct that data availability providers should use to respond with their supported header bytes -type SupportedHeaderBytesResult struct { - HeaderBytes hexutil.Bytes `json:"headerBytes,omitempty"` -} - func (c *Client) GetSupportedHeaderBytes(ctx context.Context) ([]byte, error) { - var result SupportedHeaderBytesResult + var result server_api.SupportedHeaderBytesResult if err := c.CallContext(ctx, &result, "daprovider_getSupportedHeaderBytes"); err != nil { return nil, fmt.Errorf("error returned from daprovider_getSupportedHeaderBytes rpc method: %w", err) } return result.HeaderBytes, nil } -// RecoverPayloadFromBatchResult is the result struct that data availability providers should use to respond with underlying payload and updated preimages map to a RecoverPayloadFromBatch fetch request -type RecoverPayloadFromBatchResult struct { - Payload hexutil.Bytes `json:"payload,omitempty"` - Preimages daprovider.PreimagesMap `json:"preimages,omitempty"` -} - func (c *Client) RecoverPayloadFromBatch( ctx context.Context, batchNum uint64, @@ -78,37 +68,26 @@ func (c *Client) RecoverPayloadFromBatch( preimages daprovider.PreimagesMap, validateSeqMsg bool, ) ([]byte, daprovider.PreimagesMap, error) { - var recoverPayloadFromBatchResult RecoverPayloadFromBatchResult + var recoverPayloadFromBatchResult server_api.RecoverPayloadFromBatchResult if err := c.CallContext(ctx, &recoverPayloadFromBatchResult, "daprovider_recoverPayloadFromBatch", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), preimages, validateSeqMsg); err != nil { return nil, nil, fmt.Errorf("error returned from daprovider_recoverPayloadFromBatch rpc method, err: %w", err) } return recoverPayloadFromBatchResult.Payload, recoverPayloadFromBatchResult.Preimages, nil } -// StoreResult is the result struct that data availability providers should use to respond with a commitment to a Store request for posting batch data to their DA service -type StoreResult struct { - SerializedDACert hexutil.Bytes `json:"serialized-da-cert,omitempty"` -} - func (c *Client) Store( ctx context.Context, message []byte, timeout uint64, disableFallbackStoreDataOnChain bool, ) ([]byte, error) { - var storeResult StoreResult + var storeResult server_api.StoreResult if err := c.CallContext(ctx, &storeResult, "daprovider_store", hexutil.Bytes(message), hexutil.Uint64(timeout), disableFallbackStoreDataOnChain); err != nil { return nil, fmt.Errorf("error returned from daprovider_store rpc method, err: %w", err) } return storeResult.SerializedDACert, nil } -// GenerateReadPreimageProofResult is the result struct that data availability providers -// should use to respond with a proof for a specific preimage -type GenerateReadPreimageProofResult struct { - Proof hexutil.Bytes `json:"proof,omitempty"` -} - // GenerateReadPreimageProof generates a proof for a specific preimage at a given offset // This method calls the external DA provider's RPC endpoint to generate the proof func (c *Client) GenerateReadPreimageProof( @@ -117,23 +96,18 @@ func (c *Client) GenerateReadPreimageProof( offset uint64, certificate []byte, ) ([]byte, error) { - var generateProofResult GenerateReadPreimageProofResult + var generateProofResult server_api.GenerateReadPreimageProofResult if err := c.CallContext(ctx, &generateProofResult, "daprovider_generateReadPreimageProof", certHash, hexutil.Uint64(offset), hexutil.Bytes(certificate)); err != nil { return nil, fmt.Errorf("error returned from daprovider_generateProof rpc method, err: %w", err) } return generateProofResult.Proof, nil } -// GenerateCertificateValidityProofResult is the result struct that data availability providers should use to respond with validity proof -type GenerateCertificateValidityProofResult struct { - Proof hexutil.Bytes `json:"proof,omitempty"` -} - func (c *Client) GenerateCertificateValidityProof( ctx context.Context, certificate []byte, ) ([]byte, error) { - var generateCertificateValidityProofResult GenerateCertificateValidityProofResult + var generateCertificateValidityProofResult server_api.GenerateCertificateValidityProofResult if err := c.CallContext(ctx, &generateCertificateValidityProofResult, "daprovider_generateCertificateValidityProof", hexutil.Bytes(certificate)); err != nil { return nil, fmt.Errorf("error returned from daprovider_generateCertificateValidityProof rpc method, err: %w", err) } diff --git a/daprovider/das/dasserver/dasserver.go b/daprovider/das/dasserver/dasserver.go index b0d592905a..e752a27d7e 100644 --- a/daprovider/das/dasserver/dasserver.go +++ b/daprovider/das/dasserver/dasserver.go @@ -22,9 +22,9 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" - "github.com/offchainlabs/nitro/daprovider/daclient" "github.com/offchainlabs/nitro/daprovider/das" "github.com/offchainlabs/nitro/daprovider/das/dasutil" + "github.com/offchainlabs/nitro/daprovider/server_api" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" ) @@ -167,9 +167,9 @@ func NewServer(ctx context.Context, config *ServerConfig, dataSigner signature.D }, nil } -func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*daclient.SupportedHeaderBytesResult, error) { +func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*server_api.SupportedHeaderBytesResult, error) { // DAS supports the DAS message header byte - return &daclient.SupportedHeaderBytesResult{ + return &server_api.SupportedHeaderBytesResult{ HeaderBytes: []byte{daprovider.DASMessageHeaderFlag}, }, nil } @@ -181,12 +181,12 @@ func (s *Server) RecoverPayloadFromBatch( sequencerMsg hexutil.Bytes, preimages daprovider.PreimagesMap, validateSeqMsg bool, -) (*daclient.RecoverPayloadFromBatchResult, error) { +) (*server_api.RecoverPayloadFromBatchResult, error) { payload, preimages, err := s.reader.RecoverPayloadFromBatch(ctx, uint64(batchNum), batchBlockHash, sequencerMsg, preimages, validateSeqMsg) if err != nil { return nil, err } - return &daclient.RecoverPayloadFromBatchResult{ + return &server_api.RecoverPayloadFromBatchResult{ Payload: payload, Preimages: preimages, }, nil @@ -197,10 +197,10 @@ func (s *Server) Store( message hexutil.Bytes, timeout hexutil.Uint64, disableFallbackStoreDataOnChain bool, -) (*daclient.StoreResult, error) { +) (*server_api.StoreResult, error) { serializedDACert, err := s.writer.Store(ctx, message, uint64(timeout), disableFallbackStoreDataOnChain) if err != nil { return nil, err } - return &daclient.StoreResult{SerializedDACert: serializedDACert}, nil + return &server_api.StoreResult{SerializedDACert: serializedDACert}, nil } diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 578c75e90c..75c997876c 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -23,7 +23,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" - "github.com/offchainlabs/nitro/daprovider/daclient" + "github.com/offchainlabs/nitro/daprovider/server_api" ) type Server struct { @@ -134,8 +134,8 @@ func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader return srv, nil } -func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*daclient.SupportedHeaderBytesResult, error) { - return &daclient.SupportedHeaderBytesResult{ +func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*server_api.SupportedHeaderBytesResult, error) { + return &server_api.SupportedHeaderBytesResult{ HeaderBytes: s.headerBytes, }, nil } @@ -147,12 +147,12 @@ func (s *Server) RecoverPayloadFromBatch( sequencerMsg hexutil.Bytes, preimages daprovider.PreimagesMap, validateSeqMsg bool, -) (*daclient.RecoverPayloadFromBatchResult, error) { +) (*server_api.RecoverPayloadFromBatchResult, error) { payload, preimages, err := s.reader.RecoverPayloadFromBatch(ctx, uint64(batchNum), batchBlockHash, sequencerMsg, preimages, validateSeqMsg) if err != nil { return nil, err } - return &daclient.RecoverPayloadFromBatchResult{ + return &server_api.RecoverPayloadFromBatchResult{ Payload: payload, Preimages: preimages, }, nil @@ -163,15 +163,15 @@ func (s *Server) Store( message hexutil.Bytes, timeout hexutil.Uint64, disableFallbackStoreDataOnChain bool, -) (*daclient.StoreResult, error) { +) (*server_api.StoreResult, error) { serializedDACert, err := s.writer.Store(ctx, message, uint64(timeout), disableFallbackStoreDataOnChain) if err != nil { return nil, err } - return &daclient.StoreResult{SerializedDACert: serializedDACert}, nil + return &server_api.StoreResult{SerializedDACert: serializedDACert}, nil } -func (s *Server) GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*daclient.GenerateReadPreimageProofResult, error) { +func (s *Server) GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*server_api.GenerateReadPreimageProofResult, error) { if s.validator == nil { return nil, errors.New("validator not available") } @@ -180,10 +180,10 @@ func (s *Server) GenerateReadPreimageProof(ctx context.Context, certHash common. if err != nil { return nil, err } - return &daclient.GenerateReadPreimageProofResult{Proof: hexutil.Bytes(proof)}, nil + return &server_api.GenerateReadPreimageProofResult{Proof: hexutil.Bytes(proof)}, nil } -func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certificate hexutil.Bytes) (*daclient.GenerateCertificateValidityProofResult, error) { +func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certificate hexutil.Bytes) (*server_api.GenerateCertificateValidityProofResult, error) { if s.validator == nil { return nil, errors.New("validator not available") } @@ -192,5 +192,5 @@ func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certifica if err != nil { return nil, err } - return &daclient.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(proof)}, nil + return &server_api.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(proof)}, nil } diff --git a/daprovider/server_api/types.go b/daprovider/server_api/types.go new file mode 100644 index 0000000000..868877d2e5 --- /dev/null +++ b/daprovider/server_api/types.go @@ -0,0 +1,37 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package server_api + +import ( + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/offchainlabs/nitro/daprovider" +) + +// SupportedHeaderBytesResult is the result struct that data availability providers should use to respond with their supported header bytes +type SupportedHeaderBytesResult struct { + HeaderBytes hexutil.Bytes `json:"headerBytes,omitempty"` +} + +// RecoverPayloadFromBatchResult is the result struct that data availability providers should use to respond with underlying payload and updated preimages map to a RecoverPayloadFromBatch fetch request +type RecoverPayloadFromBatchResult struct { + Payload hexutil.Bytes `json:"payload,omitempty"` + Preimages daprovider.PreimagesMap `json:"preimages,omitempty"` +} + +// StoreResult is the result struct that data availability providers should use to respond with a commitment to a Store request for posting batch data to their DA service +type StoreResult struct { + SerializedDACert hexutil.Bytes `json:"serialized-da-cert,omitempty"` +} + +// GenerateReadPreimageProofResult is the result struct that data availability providers +// should use to respond with a proof for a specific preimage +type GenerateReadPreimageProofResult struct { + Proof hexutil.Bytes `json:"proof,omitempty"` +} + +// GenerateCertificateValidityProofResult is the result struct that data availability providers should use to respond with validity proof +type GenerateCertificateValidityProofResult struct { + Proof hexutil.Bytes `json:"proof,omitempty"` +} From 1f5b050f0324399ac442bb987b60e86231282595 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 25 Sep 2025 15:51:34 +0600 Subject: [PATCH 08/56] Use Promises with daprovider.Reader iface --- arbstate/inbox.go | 6 +- daprovider/daclient/daclient.go | 45 ++++++++-- daprovider/das/dasserver/dasserver.go | 34 +++++-- daprovider/das/dasutil/dasutil.go | 100 ++++++++++++++++++--- daprovider/factory/factory.go | 3 +- daprovider/reader.go | 91 ++++++++++++++++--- daprovider/referenceda/reference_reader.go | 82 +++++++++++++---- daprovider/server/provider_server.go | 34 +++++-- staker/stateless_block_validator.go | 7 +- 9 files changed, 333 insertions(+), 69 deletions(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 07e6a16750..accb226936 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -79,8 +79,8 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Use the registry to find the appropriate reader for the header byte if len(payload) > 0 && dapReaders != nil { if dapReader, found := dapReaders.GetByHeaderByte(payload[0]); found { - var err error - payload, _, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) + promise := dapReader.RecoverPayload(batchNum, batchBlockHash, data, keysetValidationMode != daprovider.KeysetDontValidate) + result, err := promise.Await(ctx) if err != nil { // Matches the way keyset validation was done inside DAS readers i.e logging the error // But other daproviders might just want to return the error @@ -93,6 +93,8 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } else { return nil, err } + } else { + payload = result.Payload } if payload == nil { return parsedMsg, nil diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 68278517a6..97697aa23f 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -14,6 +14,7 @@ import ( "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/server_api" + "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/rpcclient" ) @@ -60,19 +61,45 @@ func (c *Client) GetSupportedHeaderBytes(ctx context.Context) ([]byte, error) { return result.HeaderBytes, nil } -func (c *Client) RecoverPayloadFromBatch( - ctx context.Context, +// RecoverPayload fetches the underlying payload from the DA provider +func (c *Client) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages daprovider.PreimagesMap, validateSeqMsg bool, -) ([]byte, daprovider.PreimagesMap, error) { - var recoverPayloadFromBatchResult server_api.RecoverPayloadFromBatchResult - if err := c.CallContext(ctx, &recoverPayloadFromBatchResult, "daprovider_recoverPayloadFromBatch", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), preimages, validateSeqMsg); err != nil { - return nil, nil, fmt.Errorf("error returned from daprovider_recoverPayloadFromBatch rpc method, err: %w", err) - } - return recoverPayloadFromBatchResult.Payload, recoverPayloadFromBatchResult.Preimages, nil +) containers.PromiseInterface[daprovider.PayloadResult] { + promise := containers.NewPromise[daprovider.PayloadResult](nil) + go func() { + ctx := context.Background() + var recoverPayloadFromBatchResult server_api.RecoverPayloadFromBatchResult + if err := c.CallContext(ctx, &recoverPayloadFromBatchResult, "daprovider_recoverPayloadFromBatch", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), nil, validateSeqMsg); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_recoverPayloadFromBatch rpc method, err: %w", err)) + } else { + promise.Produce(daprovider.PayloadResult{Payload: recoverPayloadFromBatchResult.Payload}) + } + }() + return &promise +} + +// CollectPreimages collects preimages from the DA provider +func (c *Client) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise := containers.NewPromise[daprovider.PreimagesResult](nil) + go func() { + ctx := context.Background() + preimages := make(daprovider.PreimagesMap) + var recoverPayloadFromBatchResult server_api.RecoverPayloadFromBatchResult + if err := c.CallContext(ctx, &recoverPayloadFromBatchResult, "daprovider_recoverPayloadFromBatch", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), preimages, validateSeqMsg); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_recoverPayloadFromBatch rpc method, err: %w", err)) + } else { + promise.Produce(daprovider.PreimagesResult{Preimages: recoverPayloadFromBatchResult.Preimages}) + } + }() + return &promise } func (c *Client) Store( diff --git a/daprovider/das/dasserver/dasserver.go b/daprovider/das/dasserver/dasserver.go index e752a27d7e..986bb32d79 100644 --- a/daprovider/das/dasserver/dasserver.go +++ b/daprovider/das/dasserver/dasserver.go @@ -182,14 +182,34 @@ func (s *Server) RecoverPayloadFromBatch( preimages daprovider.PreimagesMap, validateSeqMsg bool, ) (*server_api.RecoverPayloadFromBatchResult, error) { - payload, preimages, err := s.reader.RecoverPayloadFromBatch(ctx, uint64(batchNum), batchBlockHash, sequencerMsg, preimages, validateSeqMsg) - if err != nil { - return nil, err + // If preimages are requested, use CollectPreimages, otherwise RecoverPayload + if preimages != nil { + promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + // We still need to get the payload, so call RecoverPayload too + payloadPromise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + payloadResult, err := payloadPromise.Await(ctx) + if err != nil { + return nil, err + } + return &server_api.RecoverPayloadFromBatchResult{ + Payload: payloadResult.Payload, + Preimages: result.Preimages, + }, nil + } else { + promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + return &server_api.RecoverPayloadFromBatchResult{ + Payload: result.Payload, + Preimages: nil, + }, nil } - return &server_api.RecoverPayloadFromBatchResult{ - Payload: payload, - Preimages: preimages, - }, nil } func (s *Server) Store( diff --git a/daprovider/das/dasutil/dasutil.go b/daprovider/das/dasutil/dasutil.go index 10a6baa6e0..8812f3cfad 100644 --- a/daprovider/das/dasutil/dasutil.go +++ b/daprovider/das/dasutil/dasutil.go @@ -20,6 +20,7 @@ import ( "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/das/dastree" + "github.com/offchainlabs/nitro/util/containers" ) type DASReader interface { @@ -52,15 +53,56 @@ type readerForDAS struct { keysetFetcher DASKeysetFetcher } -func (d *readerForDAS) RecoverPayloadFromBatch( +// recoverInternal is the shared implementation for both RecoverPayload and CollectPreimages +func (d *readerForDAS) recoverInternal( ctx context.Context, batchNum uint64, - batchBlockHash common.Hash, sequencerMsg []byte, - preimages daprovider.PreimagesMap, validateSeqMsg bool, + needPayload bool, + needPreimages bool, ) ([]byte, daprovider.PreimagesMap, error) { - return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, d.keysetFetcher, preimages, validateSeqMsg) + return recoverPayloadFromDasBatchInternal(ctx, batchNum, sequencerMsg, d.dasReader, d.keysetFetcher, validateSeqMsg, needPayload, needPreimages) +} + +// RecoverPayload fetches the underlying payload from the DA provider +func (d *readerForDAS) RecoverPayload( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, +) containers.PromiseInterface[daprovider.PayloadResult] { + promise := containers.NewPromise[daprovider.PayloadResult](nil) + go func() { + ctx := context.Background() + payload, _, err := d.recoverInternal(ctx, batchNum, sequencerMsg, validateSeqMsg, true, false) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PayloadResult{Payload: payload}) + } + }() + return &promise +} + +// CollectPreimages collects preimages from the DA provider +func (d *readerForDAS) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise := containers.NewPromise[daprovider.PreimagesResult](nil) + go func() { + ctx := context.Background() + _, preimages, err := d.recoverInternal(ctx, batchNum, sequencerMsg, validateSeqMsg, false, true) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PreimagesResult{Preimages: preimages}) + } + }() + return &promise } // NewWriterForDAS is generally meant to be only used by nitro. @@ -95,6 +137,7 @@ var ( const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week +// RecoverPayloadFromDasBatch is deprecated, use recoverPayloadFromDasBatchInternal func RecoverPayloadFromDasBatch( ctx context.Context, batchNum uint64, @@ -104,8 +147,41 @@ func RecoverPayloadFromDasBatch( preimages daprovider.PreimagesMap, validateSeqMsg bool, ) ([]byte, daprovider.PreimagesMap, error) { + needPreimages := preimages != nil + payload, recoveredPreimages, err := recoverPayloadFromDasBatchInternal(ctx, batchNum, sequencerMsg, dasReader, keysetFetcher, validateSeqMsg, true, needPreimages) + if err != nil { + return nil, nil, err + } + // If preimages were passed in, copy recovered preimages into the provided map + if preimages != nil && recoveredPreimages != nil { + for piType, piMap := range recoveredPreimages { + if preimages[piType] == nil { + preimages[piType] = make(map[common.Hash][]byte) + } + for hash, preimage := range piMap { + preimages[piType][hash] = preimage + } + } + return payload, preimages, nil + } + return payload, recoveredPreimages, nil +} + +// recoverPayloadFromDasBatchInternal is the shared implementation +func recoverPayloadFromDasBatchInternal( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + dasReader DASReader, + keysetFetcher DASKeysetFetcher, + validateSeqMsg bool, + needPayload bool, + needPreimages bool, +) ([]byte, daprovider.PreimagesMap, error) { + var preimages daprovider.PreimagesMap var preimageRecorder daprovider.PreimageRecorder - if preimages != nil { + if needPreimages { + preimages = make(daprovider.PreimagesMap) preimageRecorder = daprovider.RecordPreimagesTo(preimages) } cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) @@ -174,13 +250,17 @@ func RecoverPayloadFromDasBatch( } dataHash := cert.DataHash - payload, err := getByHash(ctx, dataHash) - if err != nil { - log.Error("Couldn't fetch DAS batch contents", "err", err) - return nil, nil, err + var payload []byte + // We need to fetch the payload if either we need to return it or need to record preimages + if needPayload || needPreimages { + payload, err = getByHash(ctx, dataHash) + if err != nil { + log.Error("Couldn't fetch DAS batch contents", "err", err) + return nil, nil, err + } } - if preimageRecorder != nil { + if preimageRecorder != nil && payload != nil { if version == 0 { treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) preimageRecorder(dataHash, payload, arbutil.Keccak256PreimageType) diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go index 42d2c80dc2..f4159fdee1 100644 --- a/daprovider/factory/factory.go +++ b/daprovider/factory/factory.go @@ -190,7 +190,8 @@ func (f *ReferenceDAFactory) CreateReader(ctx context.Context) (daprovider.Reade return nil, nil, errors.New("validator-contract address not configured for reference DA reader") } validatorAddr := common.HexToAddress(f.config.ValidatorContract) - reader := referenceda.NewReader(f.l1Client, validatorAddr) + storage := referenceda.GetInMemoryStorage() + reader := referenceda.NewReader(storage, f.l1Client, validatorAddr) return reader, nil, nil } diff --git a/daprovider/reader.go b/daprovider/reader.go index 5f6ea8dc39..9df85b09d2 100644 --- a/daprovider/reader.go +++ b/daprovider/reader.go @@ -13,6 +13,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/util/containers" ) // CertificateValidationError represents an error in certificate validation @@ -29,16 +30,32 @@ func IsCertificateValidationError(err error) bool { return err != nil && strings.Contains(err.Error(), "certificate validation failed") } +// PayloadResult contains the recovered payload data +type PayloadResult struct { + Payload []byte +} + +// PreimagesResult contains the collected preimages +type PreimagesResult struct { + Preimages PreimagesMap +} + type Reader interface { - // RecoverPayloadFromBatch fetches the underlying payload and a map of preimages from the DA provider given the batch header information - RecoverPayloadFromBatch( - ctx context.Context, + // RecoverPayload fetches the underlying payload from the DA provider given the batch header information + RecoverPayload( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, + ) containers.PromiseInterface[PayloadResult] + + // CollectPreimages collects preimages from the DA provider given the batch header information + CollectPreimages( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages PreimagesMap, validateSeqMsg bool, - ) ([]byte, PreimagesMap, error) + ) containers.PromiseInterface[PreimagesResult] } // NewReaderForBlobReader is generally meant to be only used by nitro. @@ -51,13 +68,15 @@ type readerForBlobReader struct { blobReader BlobReader } -func (b *readerForBlobReader) RecoverPayloadFromBatch( +// recoverInternal is the shared implementation for both RecoverPayload and CollectPreimages +func (b *readerForBlobReader) recoverInternal( ctx context.Context, batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages PreimagesMap, validateSeqMsg bool, + needPayload bool, + needPreimages bool, ) ([]byte, PreimagesMap, error) { blobHashes := sequencerMsg[41:] if len(blobHashes)%len(common.Hash{}) != 0 { @@ -71,7 +90,10 @@ func (b *readerForBlobReader) RecoverPayloadFromBatch( if err != nil { return nil, nil, fmt.Errorf("failed to get blobs: %w", err) } - if preimages != nil { + + var preimages PreimagesMap + if needPreimages { + preimages = make(PreimagesMap) preimageRecorder := RecordPreimagesTo(preimages) for i, blob := range kzgBlobs { // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable @@ -80,10 +102,55 @@ func (b *readerForBlobReader) RecoverPayloadFromBatch( preimageRecorder(versionedHashes[i], b[:], arbutil.EthVersionedHashPreimageType) } } - payload, err := blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return nil, nil, nil + + var payload []byte + if needPayload { + payload, err = blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil, nil + } } + return payload, preimages, nil } + +// RecoverPayload fetches the underlying payload from the DA provider +func (b *readerForBlobReader) RecoverPayload( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, +) containers.PromiseInterface[PayloadResult] { + promise := containers.NewPromise[PayloadResult](nil) + go func() { + ctx := context.Background() + payload, _, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, true, false) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(PayloadResult{Payload: payload}) + } + }() + return &promise +} + +// CollectPreimages collects preimages from the DA provider +func (b *readerForBlobReader) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, +) containers.PromiseInterface[PreimagesResult] { + promise := containers.NewPromise[PreimagesResult](nil) + go func() { + ctx := context.Background() + _, preimages, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, false, true) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(PreimagesResult{Preimages: preimages}) + } + }() + return &promise +} diff --git a/daprovider/referenceda/reference_reader.go b/daprovider/referenceda/reference_reader.go index ab4ed970b9..5186904bda 100644 --- a/daprovider/referenceda/reference_reader.go +++ b/daprovider/referenceda/reference_reader.go @@ -15,6 +15,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/util/containers" ) // Reader implements the daprovider.Reader interface for ReferenceDA @@ -24,22 +25,24 @@ type Reader struct { validatorAddr common.Address } -func NewReader(l1Client *ethclient.Client, validatorAddr common.Address) *Reader { +// NewReader creates a new ReferenceDA reader +func NewReader(storage *InMemoryStorage, l1Client *ethclient.Client, validatorAddr common.Address) *Reader { return &Reader{ - storage: GetInMemoryStorage(), + storage: storage, l1Client: l1Client, validatorAddr: validatorAddr, } } -// RecoverPayloadFromBatch fetches the batch data from the ReferenceDA storage -func (r *Reader) RecoverPayloadFromBatch( +// recoverInternal is the shared implementation for both RecoverPayload and CollectPreimages +func (r *Reader) recoverInternal( ctx context.Context, batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages daprovider.PreimagesMap, validateSeqMsg bool, + needPayload bool, + needPreimages bool, ) ([]byte, daprovider.PreimagesMap, error) { if len(sequencerMsg) <= 40 { return nil, nil, fmt.Errorf("sequencer message too small") @@ -79,22 +82,27 @@ func (r *Reader) RecoverPayloadFromBatch( "certificateHex", fmt.Sprintf("0x%x", certBytes)) // Retrieve the data from storage using the hash - payload, err := r.storage.GetByHash(ctx, cert.DataHash) - if err != nil { - return nil, nil, fmt.Errorf("failed to retrieve data from storage: %w", err) - } - if payload == nil { - return nil, nil, fmt.Errorf("data not found in storage for hash %s", common.Hash(cert.DataHash).Hex()) - } + var payload []byte + if needPayload || needPreimages { + payload, err = r.storage.GetByHash(ctx, common.BytesToHash(cert.DataHash[:])) + if err != nil { + return nil, nil, fmt.Errorf("failed to retrieve data from storage: %w", err) + } + if payload == nil { + return nil, nil, fmt.Errorf("data not found in storage for hash %s", common.Hash(cert.DataHash).Hex()) + } - // Verify data matches certificate hash (SHA256) - actualHash := sha256.Sum256(payload) - if actualHash != cert.DataHash { - return nil, nil, fmt.Errorf("data hash mismatch: expected %s, got %s", common.Hash(cert.DataHash).Hex(), common.Hash(actualHash).Hex()) + // Verify data matches certificate hash (SHA256) + actualHash := sha256.Sum256(payload) + if actualHash != cert.DataHash { + return nil, nil, fmt.Errorf("data hash mismatch: expected %s, got %s", common.Hash(cert.DataHash).Hex(), common.Hash(actualHash).Hex()) + } } // Record preimages if needed - if preimages != nil { + var preimages daprovider.PreimagesMap + if needPreimages && payload != nil { + preimages = make(daprovider.PreimagesMap) preimageRecorder := daprovider.RecordPreimagesTo(preimages) // Record the mapping from certificate hash to actual payload data @@ -111,3 +119,43 @@ func (r *Reader) RecoverPayloadFromBatch( return payload, preimages, nil } + +// RecoverPayload fetches the underlying payload from the DA provider +func (r *Reader) RecoverPayload( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, +) containers.PromiseInterface[daprovider.PayloadResult] { + promise := containers.NewPromise[daprovider.PayloadResult](nil) + go func() { + ctx := context.Background() + payload, _, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, true, false) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PayloadResult{Payload: payload}) + } + }() + return &promise +} + +// CollectPreimages collects preimages from the DA provider +func (r *Reader) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + validateSeqMsg bool, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise := containers.NewPromise[daprovider.PreimagesResult](nil) + go func() { + ctx := context.Background() + _, preimages, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, false, true) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PreimagesResult{Preimages: preimages}) + } + }() + return &promise +} diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 75c997876c..e6a2d8f1ad 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -148,14 +148,34 @@ func (s *Server) RecoverPayloadFromBatch( preimages daprovider.PreimagesMap, validateSeqMsg bool, ) (*server_api.RecoverPayloadFromBatchResult, error) { - payload, preimages, err := s.reader.RecoverPayloadFromBatch(ctx, uint64(batchNum), batchBlockHash, sequencerMsg, preimages, validateSeqMsg) - if err != nil { - return nil, err + // If preimages are requested, use CollectPreimages, otherwise RecoverPayload + if preimages != nil { + promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + // We still need to get the payload, so call RecoverPayload too + payloadPromise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + payloadResult, err := payloadPromise.Await(ctx) + if err != nil { + return nil, err + } + return &server_api.RecoverPayloadFromBatchResult{ + Payload: payloadResult.Payload, + Preimages: result.Preimages, + }, nil + } else { + promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + return &server_api.RecoverPayloadFromBatchResult{ + Payload: result.Payload, + Preimages: nil, + }, nil } - return &server_api.RecoverPayloadFromBatchResult{ - Payload: payload, - Preimages: preimages, - }, nil } func (s *Server) Store( diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 7413cae860..cff150b3b7 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -327,9 +327,8 @@ func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum ui if len(postedData) > 40 && v.dapReaders != nil { headerByte := postedData[40] if dapReader, found := v.dapReaders.GetByHeaderByte(headerByte); found { - var err error - var preimagesRecorded daprovider.PreimagesMap - _, preimagesRecorded, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, postedData, preimages, true) + promise := dapReader.CollectPreimages(batchNum, batchBlockHash, postedData, true) + result, err := promise.Await(ctx) if err != nil { // Matches the way keyset validation was done inside DAS readers i.e logging the error // But other daproviders might just want to return the error @@ -339,7 +338,7 @@ func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum ui return false, nil, err } } else { - preimages = preimagesRecorded + preimages = result.Preimages } } else { // No reader found for this header byte - check if it's a known type From de6cd2046919f0105d4ebe81869527e90e5aa196 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 25 Sep 2025 16:19:53 +0600 Subject: [PATCH 09/56] dasserver migration to provider_server We moved the contents of dasserver to provider_server on the custom-da branch and I accidentally left dasserver in place on this branch when moving things across. I don't want to bring over the changes to the daprovider construction logic in node.go yet so I just made the das_migration.go file which we'll delete later. --- arbnode/node.go | 6 +- daprovider/das/dasserver/dasserver.go | 226 -------------------------- daprovider/server/das_migration.go | 158 ++++++++++++++++++ 3 files changed, 161 insertions(+), 229 deletions(-) delete mode 100644 daprovider/das/dasserver/dasserver.go create mode 100644 daprovider/server/das_migration.go diff --git a/arbnode/node.go b/arbnode/node.go index 872b95ed8b..375936b08e 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -40,7 +40,7 @@ import ( "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/daclient" "github.com/offchainlabs/nitro/daprovider/das" - "github.com/offchainlabs/nitro/daprovider/das/dasserver" + dapserver "github.com/offchainlabs/nitro/daprovider/server" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -589,13 +589,13 @@ func getDAS( } }() - serverConfig := dasserver.DefaultServerConfig + serverConfig := dapserver.DefaultDASServerConfig serverConfig.Port = 0 // Initializes server at a random available port serverConfig.DataAvailability = config.DataAvailability serverConfig.EnableDAWriter = config.BatchPoster.Enable serverConfig.JWTSecret = jwtPath withDAWriter = config.BatchPoster.Enable - dasServer, closeFn, err := dasserver.NewServer(ctx, &serverConfig, dataSigner, l1client, l1Reader, deployInfo.SequencerInbox) + dasServer, closeFn, err := dapserver.NewServerForDAS(ctx, &serverConfig, dataSigner, l1client, l1Reader, deployInfo.SequencerInbox) if err != nil { return nil, nil, nil, err } diff --git a/daprovider/das/dasserver/dasserver.go b/daprovider/das/dasserver/dasserver.go deleted file mode 100644 index 986bb32d79..0000000000 --- a/daprovider/das/dasserver/dasserver.go +++ /dev/null @@ -1,226 +0,0 @@ -package dasserver - -import ( - "context" - "errors" - "fmt" - "hash/crc32" - "net" - "net/http" - "os" - "strings" - "time" - - "github.com/spf13/pflag" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/rpc" - - "github.com/offchainlabs/nitro/cmd/genericconf" - "github.com/offchainlabs/nitro/daprovider" - "github.com/offchainlabs/nitro/daprovider/das" - "github.com/offchainlabs/nitro/daprovider/das/dasutil" - "github.com/offchainlabs/nitro/daprovider/server_api" - "github.com/offchainlabs/nitro/util/headerreader" - "github.com/offchainlabs/nitro/util/signature" -) - -type Server struct { - reader daprovider.Reader - writer daprovider.Writer -} - -type ServerConfig struct { - Addr string `koanf:"addr"` - Port uint64 `koanf:"port"` - JWTSecret string `koanf:"jwtsecret"` - EnableDAWriter bool `koanf:"enable-da-writer"` - DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - ServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"server-timeouts"` - RPCServerBodyLimit int `koanf:"rpc-server-body-limit"` -} - -var DefaultServerConfig = ServerConfig{ - Addr: "localhost", - Port: 9880, - JWTSecret: "", - EnableDAWriter: false, - DataAvailability: das.DefaultDataAvailabilityConfig, - ServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, - RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, -} - -func ServerConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.String(prefix+".addr", DefaultServerConfig.Addr, "JSON rpc server listening interface") - f.Uint64(prefix+".port", DefaultServerConfig.Port, "JSON rpc server listening port") - f.String(prefix+".jwtsecret", DefaultServerConfig.JWTSecret, "path to file with jwtsecret for validation") - f.Bool(prefix+".enable-da-writer", DefaultServerConfig.EnableDAWriter, "implies if the das server supports daprovider's writer interface") - f.Int(prefix+".rpc-server-body-limit", DefaultServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") - das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) - genericconf.HTTPServerTimeoutConfigAddOptions(prefix+".server-timeouts", f) -} - -func fetchJWTSecret(fileName string) ([]byte, error) { - if data, err := os.ReadFile(fileName); err == nil { - jwtSecret := common.FromHex(strings.TrimSpace(string(data))) - if len(jwtSecret) == 32 { - log.Info("Loaded JWT secret file", "path", fileName, "crc32", fmt.Sprintf("%#x", crc32.ChecksumIEEE(jwtSecret))) - return jwtSecret, nil - } - log.Error("Invalid JWT secret", "path", fileName, "length", len(jwtSecret)) - return nil, errors.New("invalid JWT secret") - } - return nil, errors.New("JWT secret file not found") -} - -func NewServer(ctx context.Context, config *ServerConfig, dataSigner signature.DataSignerFunc, l1Client *ethclient.Client, l1Reader *headerreader.HeaderReader, sequencerInboxAddr common.Address) (*http.Server, func(), error) { - var err error - var daWriter dasutil.DASWriter - var daReader dasutil.DASReader - var dasKeysetFetcher *das.KeysetFetcher - var dasLifecycleManager *das.LifecycleManager - if config.EnableDAWriter { - daWriter, daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReaderAndWriter(ctx, &config.DataAvailability, dataSigner, l1Client, sequencerInboxAddr) - if err != nil { - return nil, nil, err - } - } else { - daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReader(ctx, &config.DataAvailability, l1Reader, &sequencerInboxAddr) - if err != nil { - return nil, nil, err - } - } - - daReader = das.NewReaderTimeoutWrapper(daReader, config.DataAvailability.RequestTimeout) - if config.DataAvailability.PanicOnError { - if daWriter != nil { - daWriter = das.NewWriterPanicWrapper(daWriter) - } - daReader = das.NewReaderPanicWrapper(daReader) - } - - listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", config.Addr, config.Port)) - if err != nil { - return nil, nil, err - } - - rpcServer := rpc.NewServer() - if config.RPCServerBodyLimit > 0 { - rpcServer.SetHTTPBodyLimit(config.RPCServerBodyLimit) - } - var writer daprovider.Writer - if daWriter != nil { - writer = dasutil.NewWriterForDAS(daWriter) - } - server := &Server{ - reader: dasutil.NewReaderForDAS(daReader, dasKeysetFetcher), - writer: writer, - } - if err = rpcServer.RegisterName("daprovider", server); err != nil { - return nil, nil, err - } - - addr, ok := listener.Addr().(*net.TCPAddr) - if !ok { - return nil, nil, errors.New("failed getting dasserver address from listener") - } - - var handler http.Handler - if config.JWTSecret != "" { - jwt, err := fetchJWTSecret(config.JWTSecret) - if err != nil { - return nil, nil, fmt.Errorf("failed creating new dasserver: %w", err) - } - handler = node.NewHTTPHandlerStack(rpcServer, nil, nil, jwt) - } else { - handler = rpcServer - } - - srv := &http.Server{ - Addr: "http://" + addr.String(), - Handler: handler, - ReadTimeout: config.ServerTimeouts.ReadTimeout, - ReadHeaderTimeout: config.ServerTimeouts.ReadHeaderTimeout, - WriteTimeout: config.ServerTimeouts.WriteTimeout, - IdleTimeout: config.ServerTimeouts.IdleTimeout, - } - go func() { - if err := srv.Serve(listener); err != nil && - !errors.Is(err, http.ErrServerClosed) { - log.Error("das-server's Serve method returned a non http.ErrServerClosed error", "err", err) - } - }() - - go func() { - <-ctx.Done() - _ = srv.Shutdown(context.Background()) - }() - - return srv, func() { - if dasLifecycleManager != nil { - dasLifecycleManager.StopAndWaitUntil(2 * time.Second) - } - }, nil -} - -func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*server_api.SupportedHeaderBytesResult, error) { - // DAS supports the DAS message header byte - return &server_api.SupportedHeaderBytesResult{ - HeaderBytes: []byte{daprovider.DASMessageHeaderFlag}, - }, nil -} - -func (s *Server) RecoverPayloadFromBatch( - ctx context.Context, - batchNum hexutil.Uint64, - batchBlockHash common.Hash, - sequencerMsg hexutil.Bytes, - preimages daprovider.PreimagesMap, - validateSeqMsg bool, -) (*server_api.RecoverPayloadFromBatchResult, error) { - // If preimages are requested, use CollectPreimages, otherwise RecoverPayload - if preimages != nil { - promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) - result, err := promise.Await(ctx) - if err != nil { - return nil, err - } - // We still need to get the payload, so call RecoverPayload too - payloadPromise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) - payloadResult, err := payloadPromise.Await(ctx) - if err != nil { - return nil, err - } - return &server_api.RecoverPayloadFromBatchResult{ - Payload: payloadResult.Payload, - Preimages: result.Preimages, - }, nil - } else { - promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) - result, err := promise.Await(ctx) - if err != nil { - return nil, err - } - return &server_api.RecoverPayloadFromBatchResult{ - Payload: result.Payload, - Preimages: nil, - }, nil - } -} - -func (s *Server) Store( - ctx context.Context, - message hexutil.Bytes, - timeout hexutil.Uint64, - disableFallbackStoreDataOnChain bool, -) (*server_api.StoreResult, error) { - serializedDACert, err := s.writer.Store(ctx, message, uint64(timeout), disableFallbackStoreDataOnChain) - if err != nil { - return nil, err - } - return &server_api.StoreResult{SerializedDACert: serializedDACert}, nil -} diff --git a/daprovider/server/das_migration.go b/daprovider/server/das_migration.go new file mode 100644 index 0000000000..bd545b0642 --- /dev/null +++ b/daprovider/server/das_migration.go @@ -0,0 +1,158 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +// Package dapserver contains temporary DAS migration code +// TODO: This file is temporary and will be removed once DA provider initialization +// is moved out of arbnode/node.go on the custom-da branch +package dapserver + +import ( + "context" + "net/http" + "time" + + "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/das" + "github.com/offchainlabs/nitro/daprovider/das/dasutil" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/signature" +) + +// DASServerConfig is the configuration for a DAS server +// TODO: This is temporary and duplicates dasserver.ServerConfig +// It will be removed when DAS initialization moves to the factory pattern +type DASServerConfig struct { + Addr string `koanf:"addr"` + Port uint64 `koanf:"port"` + JWTSecret string `koanf:"jwtsecret"` + EnableDAWriter bool `koanf:"enable-da-writer"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + ServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"server-timeouts"` + RPCServerBodyLimit int `koanf:"rpc-server-body-limit"` +} + +// DefaultDASServerConfig provides default values for DAS server configuration +// TODO: This is temporary and will be removed with the migration +var DefaultDASServerConfig = DASServerConfig{ + Addr: "localhost", + Port: 9880, + JWTSecret: "", + EnableDAWriter: false, + ServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, + RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, + DataAvailability: das.DefaultDataAvailabilityConfig, +} + +// ServerConfigAddDASOptions adds DAS-specific command-line options +// TODO: This is temporary and will be removed when DAS config moves elsewhere +func ServerConfigAddDASOptions(prefix string, f *pflag.FlagSet) { + f.String(prefix+".addr", DefaultDASServerConfig.Addr, "JSON rpc server listening interface") + f.Uint64(prefix+".port", DefaultDASServerConfig.Port, "JSON rpc server listening port") + f.String(prefix+".jwtsecret", DefaultDASServerConfig.JWTSecret, "path to file with jwtsecret for validation") + f.Bool(prefix+".enable-da-writer", DefaultDASServerConfig.EnableDAWriter, "implies if the das server supports daprovider's writer interface") + f.Int(prefix+".rpc-server-body-limit", DefaultDASServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") + das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) + genericconf.HTTPServerTimeoutConfigAddOptions(prefix+".server-timeouts", f) +} + +// NewServerForDAS creates a new DA provider server configured for DAS/AnyTrust +// TODO: This is temporary. On the custom-da branch, this initialization logic +// moves to the factory pattern and this function will be removed. +// +// Returns: +// - *http.Server: The HTTP server instance +// - func(): Cleanup function to stop the DAS lifecycle manager +// - error: Any error that occurred during initialization +func NewServerForDAS( + ctx context.Context, + config *DASServerConfig, + dataSigner signature.DataSignerFunc, + l1Client *ethclient.Client, + l1Reader *headerreader.HeaderReader, + sequencerInboxAddr common.Address, +) (*http.Server, func(), error) { + // Initialize DAS components + var err error + var daWriter dasutil.DASWriter + var daReader dasutil.DASReader + var dasKeysetFetcher *das.KeysetFetcher + var dasLifecycleManager *das.LifecycleManager + + if config.EnableDAWriter { + // Create both reader and writer for sequencer nodes + daWriter, daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReaderAndWriter( + ctx, &config.DataAvailability, dataSigner, l1Client, sequencerInboxAddr, + ) + if err != nil { + return nil, nil, err + } + } else { + // Create only reader for non-sequencer nodes + daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReader( + ctx, &config.DataAvailability, l1Reader, &sequencerInboxAddr, + ) + if err != nil { + return nil, nil, err + } + } + + // Apply DAS-specific wrappers + daReader = das.NewReaderTimeoutWrapper(daReader, config.DataAvailability.RequestTimeout) + if config.DataAvailability.PanicOnError { + if daWriter != nil { + daWriter = das.NewWriterPanicWrapper(daWriter) + } + daReader = das.NewReaderPanicWrapper(daReader) + } + + // Convert to daprovider interfaces + var writer daprovider.Writer + if daWriter != nil { + writer = dasutil.NewWriterForDAS(daWriter) + } + reader := dasutil.NewReaderForDAS(daReader, dasKeysetFetcher) + + // Translate DAS config to generic server config + serverConfig := ServerConfig{ + Addr: config.Addr, + Port: config.Port, + JWTSecret: config.JWTSecret, + EnableDAWriter: config.EnableDAWriter, + ServerTimeouts: config.ServerTimeouts, + RPCServerBodyLimit: config.RPCServerBodyLimit, + } + + // Create the generic DA provider server with DAS components + server, err := NewServerWithDAPProvider( + ctx, + &serverConfig, + reader, + writer, + nil, // DAS doesn't use a validator + []byte{daprovider.DASMessageHeaderFlag}, + ) + if err != nil { + // Clean up lifecycle manager if server creation fails + if dasLifecycleManager != nil { + dasLifecycleManager.StopAndWaitUntil(2 * time.Second) + } + return nil, nil, err + } + + // Return server and cleanup function for the lifecycle manager + cleanupFn := func() { + if dasLifecycleManager != nil { + log.Info("Stopping DAS lifecycle manager") + dasLifecycleManager.StopAndWaitUntil(2 * time.Second) + } + } + + return server, cleanupFn, nil +} From 51456b674e5698d56eeb0b6867988b58628c81f4 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 25 Sep 2025 16:33:43 +0600 Subject: [PATCH 10/56] Split rpc payload and preimage methods In normal execution we only care about the payload, and in validation we only care about the preimages (we will reconstruct the payload using the preimages). This change separates these concerns and simplifies the signatures of these methods. --- daprovider/daclient/daclient.go | 17 +++++---- daprovider/server/provider_server.go | 52 ++++++++++++---------------- daprovider/server_api/types.go | 8 ----- 3 files changed, 30 insertions(+), 47 deletions(-) diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 97697aa23f..d45337b511 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -71,11 +71,11 @@ func (c *Client) RecoverPayload( promise := containers.NewPromise[daprovider.PayloadResult](nil) go func() { ctx := context.Background() - var recoverPayloadFromBatchResult server_api.RecoverPayloadFromBatchResult - if err := c.CallContext(ctx, &recoverPayloadFromBatchResult, "daprovider_recoverPayloadFromBatch", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), nil, validateSeqMsg); err != nil { - promise.ProduceError(fmt.Errorf("error returned from daprovider_recoverPayloadFromBatch rpc method, err: %w", err)) + var result daprovider.PayloadResult + if err := c.CallContext(ctx, &result, "daprovider_recoverPayload", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), validateSeqMsg); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_recoverPayload rpc method, err: %w", err)) } else { - promise.Produce(daprovider.PayloadResult{Payload: recoverPayloadFromBatchResult.Payload}) + promise.Produce(result) } }() return &promise @@ -91,12 +91,11 @@ func (c *Client) CollectPreimages( promise := containers.NewPromise[daprovider.PreimagesResult](nil) go func() { ctx := context.Background() - preimages := make(daprovider.PreimagesMap) - var recoverPayloadFromBatchResult server_api.RecoverPayloadFromBatchResult - if err := c.CallContext(ctx, &recoverPayloadFromBatchResult, "daprovider_recoverPayloadFromBatch", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), preimages, validateSeqMsg); err != nil { - promise.ProduceError(fmt.Errorf("error returned from daprovider_recoverPayloadFromBatch rpc method, err: %w", err)) + var result daprovider.PreimagesResult + if err := c.CallContext(ctx, &result, "daprovider_collectPreimages", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), validateSeqMsg); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_collectPreimages rpc method, err: %w", err)) } else { - promise.Produce(daprovider.PreimagesResult{Preimages: recoverPayloadFromBatchResult.Preimages}) + promise.Produce(result) } }() return &promise diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index e6a2d8f1ad..9b981c9822 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -140,42 +140,34 @@ func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*server_api.Suppo }, nil } -func (s *Server) RecoverPayloadFromBatch( +func (s *Server) RecoverPayload( ctx context.Context, batchNum hexutil.Uint64, batchBlockHash common.Hash, sequencerMsg hexutil.Bytes, - preimages daprovider.PreimagesMap, validateSeqMsg bool, -) (*server_api.RecoverPayloadFromBatchResult, error) { - // If preimages are requested, use CollectPreimages, otherwise RecoverPayload - if preimages != nil { - promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) - result, err := promise.Await(ctx) - if err != nil { - return nil, err - } - // We still need to get the payload, so call RecoverPayload too - payloadPromise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) - payloadResult, err := payloadPromise.Await(ctx) - if err != nil { - return nil, err - } - return &server_api.RecoverPayloadFromBatchResult{ - Payload: payloadResult.Payload, - Preimages: result.Preimages, - }, nil - } else { - promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) - result, err := promise.Await(ctx) - if err != nil { - return nil, err - } - return &server_api.RecoverPayloadFromBatchResult{ - Payload: result.Payload, - Preimages: nil, - }, nil +) (*daprovider.PayloadResult, error) { + promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + return &result, nil +} + +func (s *Server) CollectPreimages( + ctx context.Context, + batchNum hexutil.Uint64, + batchBlockHash common.Hash, + sequencerMsg hexutil.Bytes, + validateSeqMsg bool, +) (*daprovider.PreimagesResult, error) { + promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + result, err := promise.Await(ctx) + if err != nil { + return nil, err } + return &result, nil } func (s *Server) Store( diff --git a/daprovider/server_api/types.go b/daprovider/server_api/types.go index 868877d2e5..9a26c6d683 100644 --- a/daprovider/server_api/types.go +++ b/daprovider/server_api/types.go @@ -5,8 +5,6 @@ package server_api import ( "github.com/ethereum/go-ethereum/common/hexutil" - - "github.com/offchainlabs/nitro/daprovider" ) // SupportedHeaderBytesResult is the result struct that data availability providers should use to respond with their supported header bytes @@ -14,12 +12,6 @@ type SupportedHeaderBytesResult struct { HeaderBytes hexutil.Bytes `json:"headerBytes,omitempty"` } -// RecoverPayloadFromBatchResult is the result struct that data availability providers should use to respond with underlying payload and updated preimages map to a RecoverPayloadFromBatch fetch request -type RecoverPayloadFromBatchResult struct { - Payload hexutil.Bytes `json:"payload,omitempty"` - Preimages daprovider.PreimagesMap `json:"preimages,omitempty"` -} - // StoreResult is the result struct that data availability providers should use to respond with a commitment to a Store request for posting batch data to their DA service type StoreResult struct { SerializedDACert hexutil.Bytes `json:"serialized-da-cert,omitempty"` From 0fdfdc4f77b4a728c6a41a203e7ce9ca39591a76 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 25 Sep 2025 17:45:41 +0600 Subject: [PATCH 11/56] Removed the validateSeqMsg param from Reader For DAS and ReferenceDA we almost always want to validate the certificate. The only time we don't is in the replay binary when we want to panic instead if it's invalid on the first read of the message, otherwise we want to ignore it and assume it's already valid. So this means that it doesn't make sense for validateSeqMsg to be part of the reader API at all, which is great because it means we can simplify the API. We can pass into the reader at construction time how to handle this because in replay it always constructs a new (fake, non-network calling) reader for each invocation. --- arbstate/inbox.go | 8 ++++-- cmd/replay/main.go | 2 +- daprovider/daclient/daclient.go | 6 ++-- daprovider/das/dasutil/dasutil.go | 21 +++++++------- daprovider/factory/factory.go | 2 +- daprovider/reader.go | 9 ++---- daprovider/referenceda/reference_reader.go | 33 +++++++++------------- daprovider/server/das_migration.go | 2 +- daprovider/server/provider_server.go | 6 ++-- staker/stateless_block_validator.go | 2 +- 10 files changed, 41 insertions(+), 50 deletions(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index accb226936..a0c308e626 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -79,7 +79,7 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Use the registry to find the appropriate reader for the header byte if len(payload) > 0 && dapReaders != nil { if dapReader, found := dapReaders.GetByHeaderByte(payload[0]); found { - promise := dapReader.RecoverPayload(batchNum, batchBlockHash, data, keysetValidationMode != daprovider.KeysetDontValidate) + promise := dapReader.RecoverPayload(batchNum, batchBlockHash, data) result, err := promise.Await(ctx) if err != nil { // Matches the way keyset validation was done inside DAS readers i.e logging the error @@ -173,7 +173,11 @@ type inboxMultiplexer struct { cachedSegmentTimestamp uint64 cachedSegmentBlockNumber uint64 cachedSubMessageNumber uint64 - keysetValidationMode daprovider.KeysetValidationMode + // keysetValidationMode is used for error handling in ParseSequencerMessage. + // Note: DAS readers now handle validation internally based on their construction-time mode, + // but ParseSequencerMessage still needs this to decide whether to panic or log on validation errors. + // In replay mode, this allows proper error handling based on the position within the message. + keysetValidationMode daprovider.KeysetValidationMode } func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 8b29489b16..69d93a192d 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -242,7 +242,7 @@ func main() { } dapReaders := daprovider.NewReaderRegistry() if dasReader != nil { - err = dapReaders.SetupDASReader(dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher)) + err = dapReaders.SetupDASReader(dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher, keysetValidationMode)) if err != nil { panic(fmt.Sprintf("Failed to register DAS reader: %v", err)) } diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index d45337b511..c0115e4cce 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -66,13 +66,12 @@ func (c *Client) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[daprovider.PayloadResult] { promise := containers.NewPromise[daprovider.PayloadResult](nil) go func() { ctx := context.Background() var result daprovider.PayloadResult - if err := c.CallContext(ctx, &result, "daprovider_recoverPayload", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), validateSeqMsg); err != nil { + if err := c.CallContext(ctx, &result, "daprovider_recoverPayload", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg)); err != nil { promise.ProduceError(fmt.Errorf("error returned from daprovider_recoverPayload rpc method, err: %w", err)) } else { promise.Produce(result) @@ -86,13 +85,12 @@ func (c *Client) CollectPreimages( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[daprovider.PreimagesResult] { promise := containers.NewPromise[daprovider.PreimagesResult](nil) go func() { ctx := context.Background() var result daprovider.PreimagesResult - if err := c.CallContext(ctx, &result, "daprovider_collectPreimages", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), validateSeqMsg); err != nil { + if err := c.CallContext(ctx, &result, "daprovider_collectPreimages", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg)); err != nil { promise.ProduceError(fmt.Errorf("error returned from daprovider_collectPreimages rpc method, err: %w", err)) } else { promise.Produce(result) diff --git a/daprovider/das/dasutil/dasutil.go b/daprovider/das/dasutil/dasutil.go index 8812f3cfad..afae0c7964 100644 --- a/daprovider/das/dasutil/dasutil.go +++ b/daprovider/das/dasutil/dasutil.go @@ -41,16 +41,18 @@ type DASKeysetFetcher interface { // NewReaderForDAS is generally meant to be only used by nitro. // DA Providers should implement methods in the Reader interface independently -func NewReaderForDAS(dasReader DASReader, keysetFetcher DASKeysetFetcher) *readerForDAS { +func NewReaderForDAS(dasReader DASReader, keysetFetcher DASKeysetFetcher, validationMode daprovider.KeysetValidationMode) *readerForDAS { return &readerForDAS{ - dasReader: dasReader, - keysetFetcher: keysetFetcher, + dasReader: dasReader, + keysetFetcher: keysetFetcher, + validationMode: validationMode, } } type readerForDAS struct { - dasReader DASReader - keysetFetcher DASKeysetFetcher + dasReader DASReader + keysetFetcher DASKeysetFetcher + validationMode daprovider.KeysetValidationMode } // recoverInternal is the shared implementation for both RecoverPayload and CollectPreimages @@ -58,10 +60,11 @@ func (d *readerForDAS) recoverInternal( ctx context.Context, batchNum uint64, sequencerMsg []byte, - validateSeqMsg bool, needPayload bool, needPreimages bool, ) ([]byte, daprovider.PreimagesMap, error) { + // Convert validation mode to boolean for the internal function + validateSeqMsg := d.validationMode != daprovider.KeysetDontValidate return recoverPayloadFromDasBatchInternal(ctx, batchNum, sequencerMsg, d.dasReader, d.keysetFetcher, validateSeqMsg, needPayload, needPreimages) } @@ -70,12 +73,11 @@ func (d *readerForDAS) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[daprovider.PayloadResult] { promise := containers.NewPromise[daprovider.PayloadResult](nil) go func() { ctx := context.Background() - payload, _, err := d.recoverInternal(ctx, batchNum, sequencerMsg, validateSeqMsg, true, false) + payload, _, err := d.recoverInternal(ctx, batchNum, sequencerMsg, true, false) if err != nil { promise.ProduceError(err) } else { @@ -90,12 +92,11 @@ func (d *readerForDAS) CollectPreimages( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[daprovider.PreimagesResult] { promise := containers.NewPromise[daprovider.PreimagesResult](nil) go func() { ctx := context.Background() - _, preimages, err := d.recoverInternal(ctx, batchNum, sequencerMsg, validateSeqMsg, false, true) + _, preimages, err := d.recoverInternal(ctx, batchNum, sequencerMsg, false, true) if err != nil { promise.ProduceError(err) } else { diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go index f4159fdee1..baff1b4357 100644 --- a/daprovider/factory/factory.go +++ b/daprovider/factory/factory.go @@ -135,7 +135,7 @@ func (f *AnyTrustFactory) CreateReader(ctx context.Context) (daprovider.Reader, daReader = das.NewReaderPanicWrapper(daReader) } - reader := dasutil.NewReaderForDAS(daReader, keysetFetcher) + reader := dasutil.NewReaderForDAS(daReader, keysetFetcher, daprovider.KeysetValidate) cleanupFn := func() { if lifecycleManager != nil { lifecycleManager.StopAndWaitUntil(0) diff --git a/daprovider/reader.go b/daprovider/reader.go index 9df85b09d2..c18f81e65d 100644 --- a/daprovider/reader.go +++ b/daprovider/reader.go @@ -46,7 +46,6 @@ type Reader interface { batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[PayloadResult] // CollectPreimages collects preimages from the DA provider given the batch header information @@ -54,7 +53,6 @@ type Reader interface { batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[PreimagesResult] } @@ -74,7 +72,6 @@ func (b *readerForBlobReader) recoverInternal( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, needPayload bool, needPreimages bool, ) ([]byte, PreimagesMap, error) { @@ -120,12 +117,11 @@ func (b *readerForBlobReader) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[PayloadResult] { promise := containers.NewPromise[PayloadResult](nil) go func() { ctx := context.Background() - payload, _, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, true, false) + payload, _, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, true, false) if err != nil { promise.ProduceError(err) } else { @@ -140,12 +136,11 @@ func (b *readerForBlobReader) CollectPreimages( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[PreimagesResult] { promise := containers.NewPromise[PreimagesResult](nil) go func() { ctx := context.Background() - _, preimages, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, false, true) + _, preimages, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, false, true) if err != nil { promise.ProduceError(err) } else { diff --git a/daprovider/referenceda/reference_reader.go b/daprovider/referenceda/reference_reader.go index 5186904bda..4f9ea1d509 100644 --- a/daprovider/referenceda/reference_reader.go +++ b/daprovider/referenceda/reference_reader.go @@ -40,7 +40,6 @@ func (r *Reader) recoverInternal( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, needPayload bool, needPreimages bool, ) ([]byte, daprovider.PreimagesMap, error) { @@ -57,22 +56,20 @@ func (r *Reader) recoverInternal( return nil, nil, fmt.Errorf("failed to deserialize certificate: %w", err) } - // Validate certificate if requested + // Validate certificate - always validate for ReferenceDA // TODO: Uncomment the following once we have merged customda contracts changes. /* - if validateSeqMsg { - // Create contract binding - validator, err := ospgen.NewReferenceDAProofValidator(r.validatorAddr, r.l1Client) - if err != nil { - return nil, nil, fmt.Errorf("failed to create validator binding: %w", err) - } - - // Validate using contract - callOpts := &bind.CallOpts{Context: ctx} - err = cert.ValidateWithContract(validator, callOpts) - if err != nil { - return nil, nil, fmt.Errorf("certificate validation failed: %w", err) - } + // Create contract binding + validator, err := ospgen.NewReferenceDAProofValidator(r.validatorAddr, r.l1Client) + if err != nil { + return nil, nil, fmt.Errorf("failed to create validator binding: %w", err) + } + + // Validate using contract + callOpts := &bind.CallOpts{Context: ctx} + err = cert.ValidateWithContract(validator, callOpts) + if err != nil { + return nil, nil, fmt.Errorf("certificate validation failed: %w", err) } */ @@ -125,12 +122,11 @@ func (r *Reader) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[daprovider.PayloadResult] { promise := containers.NewPromise[daprovider.PayloadResult](nil) go func() { ctx := context.Background() - payload, _, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, true, false) + payload, _, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, true, false) if err != nil { promise.ProduceError(err) } else { @@ -145,12 +141,11 @@ func (r *Reader) CollectPreimages( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - validateSeqMsg bool, ) containers.PromiseInterface[daprovider.PreimagesResult] { promise := containers.NewPromise[daprovider.PreimagesResult](nil) go func() { ctx := context.Background() - _, preimages, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, validateSeqMsg, false, true) + _, preimages, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, false, true) if err != nil { promise.ProduceError(err) } else { diff --git a/daprovider/server/das_migration.go b/daprovider/server/das_migration.go index bd545b0642..a3290047bd 100644 --- a/daprovider/server/das_migration.go +++ b/daprovider/server/das_migration.go @@ -117,7 +117,7 @@ func NewServerForDAS( if daWriter != nil { writer = dasutil.NewWriterForDAS(daWriter) } - reader := dasutil.NewReaderForDAS(daReader, dasKeysetFetcher) + reader := dasutil.NewReaderForDAS(daReader, dasKeysetFetcher, daprovider.KeysetValidate) // Translate DAS config to generic server config serverConfig := ServerConfig{ diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 9b981c9822..aa8ec0a253 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -145,9 +145,8 @@ func (s *Server) RecoverPayload( batchNum hexutil.Uint64, batchBlockHash common.Hash, sequencerMsg hexutil.Bytes, - validateSeqMsg bool, ) (*daprovider.PayloadResult, error) { - promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg) result, err := promise.Await(ctx) if err != nil { return nil, err @@ -160,9 +159,8 @@ func (s *Server) CollectPreimages( batchNum hexutil.Uint64, batchBlockHash common.Hash, sequencerMsg hexutil.Bytes, - validateSeqMsg bool, ) (*daprovider.PreimagesResult, error) { - promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg, validateSeqMsg) + promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg) result, err := promise.Await(ctx) if err != nil { return nil, err diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index cff150b3b7..aff94cad45 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -327,7 +327,7 @@ func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum ui if len(postedData) > 40 && v.dapReaders != nil { headerByte := postedData[40] if dapReader, found := v.dapReaders.GetByHeaderByte(headerByte); found { - promise := dapReader.CollectPreimages(batchNum, batchBlockHash, postedData, true) + promise := dapReader.CollectPreimages(batchNum, batchBlockHash, postedData) result, err := promise.Await(ctx) if err != nil { // Matches the way keyset validation was done inside DAS readers i.e logging the error From 162ff103ffebeb7bad2a1a2800a194c7df54776c Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 25 Sep 2025 18:07:01 +0600 Subject: [PATCH 12/56] Use Promises with Validator interface --- daprovider/daclient/daclient.go | 38 +++++++++------ daprovider/referenceda/reference_validator.go | 46 ++++++++++++++++++- daprovider/server/provider_server.go | 10 ++-- daprovider/validator.go | 18 ++++++-- 4 files changed, 88 insertions(+), 24 deletions(-) diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index c0115e4cce..ca762c548e 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -115,25 +115,35 @@ func (c *Client) Store( // GenerateReadPreimageProof generates a proof for a specific preimage at a given offset // This method calls the external DA provider's RPC endpoint to generate the proof func (c *Client) GenerateReadPreimageProof( - ctx context.Context, certHash common.Hash, offset uint64, certificate []byte, -) ([]byte, error) { - var generateProofResult server_api.GenerateReadPreimageProofResult - if err := c.CallContext(ctx, &generateProofResult, "daprovider_generateReadPreimageProof", certHash, hexutil.Uint64(offset), hexutil.Bytes(certificate)); err != nil { - return nil, fmt.Errorf("error returned from daprovider_generateProof rpc method, err: %w", err) - } - return generateProofResult.Proof, nil +) containers.PromiseInterface[daprovider.PreimageProofResult] { + promise := containers.NewPromise[daprovider.PreimageProofResult](nil) + go func() { + ctx := context.Background() + var generateProofResult server_api.GenerateReadPreimageProofResult + if err := c.CallContext(ctx, &generateProofResult, "daprovider_generateReadPreimageProof", certHash, hexutil.Uint64(offset), hexutil.Bytes(certificate)); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_generateProof rpc method, err: %w", err)) + } else { + promise.Produce(daprovider.PreimageProofResult{Proof: generateProofResult.Proof}) + } + }() + return &promise } func (c *Client) GenerateCertificateValidityProof( - ctx context.Context, certificate []byte, -) ([]byte, error) { - var generateCertificateValidityProofResult server_api.GenerateCertificateValidityProofResult - if err := c.CallContext(ctx, &generateCertificateValidityProofResult, "daprovider_generateCertificateValidityProof", hexutil.Bytes(certificate)); err != nil { - return nil, fmt.Errorf("error returned from daprovider_generateCertificateValidityProof rpc method, err: %w", err) - } - return generateCertificateValidityProofResult.Proof, nil +) containers.PromiseInterface[daprovider.ValidityProofResult] { + promise := containers.NewPromise[daprovider.ValidityProofResult](nil) + go func() { + ctx := context.Background() + var generateCertificateValidityProofResult server_api.GenerateCertificateValidityProofResult + if err := c.CallContext(ctx, &generateCertificateValidityProofResult, "daprovider_generateCertificateValidityProof", hexutil.Bytes(certificate)); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_generateCertificateValidityProof rpc method, err: %w", err)) + } else { + promise.Produce(daprovider.ValidityProofResult{Proof: generateCertificateValidityProofResult.Proof}) + } + }() + return &promise } diff --git a/daprovider/referenceda/reference_validator.go b/daprovider/referenceda/reference_validator.go index 06484b3f00..d99403262f 100644 --- a/daprovider/referenceda/reference_validator.go +++ b/daprovider/referenceda/reference_validator.go @@ -10,6 +10,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" + + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/util/containers" ) type Validator struct { @@ -29,7 +32,7 @@ func NewValidator(l1Client *ethclient.Client, validatorAddr common.Address) *Val // GenerateReadPreimageProof creates a ReadPreimage proof for ReferenceDA // The proof enhancer will prepend the standardized header [certKeccak256, offset, certSize, certificate] // So we only need to return the custom data: [Version(1), PreimageSize(8), PreimageData] -func (v *Validator) GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) { +func (v *Validator) generateReadPreimageProofInternal(ctx context.Context, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) { // Deserialize certificate to extract data hash cert, err := Deserialize(certificate) if err != nil { @@ -58,6 +61,23 @@ func (v *Validator) GenerateReadPreimageProof(ctx context.Context, certHash comm return proof, nil } +// GenerateReadPreimageProof creates a ReadPreimage proof for ReferenceDA +// The proof enhancer will prepend the standardized header [certKeccak256, offset, certSize, certificate] +// So we only need to return the custom data: [Version(1), PreimageSize(8), PreimageData] +func (v *Validator) GenerateReadPreimageProof(certHash common.Hash, offset uint64, certificate []byte) containers.PromiseInterface[daprovider.PreimageProofResult] { + promise := containers.NewPromise[daprovider.PreimageProofResult](nil) + go func() { + ctx := context.Background() + proof, err := v.generateReadPreimageProofInternal(ctx, certHash, offset, certificate) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PreimageProofResult{Proof: proof}) + } + }() + return &promise +} + // GenerateCertificateValidityProof creates a certificate validity proof for ReferenceDA // The ReferenceDA implementation returns a two-byte proof with: // - claimedValid (1 byte): 1 if valid, 0 if invalid @@ -66,7 +86,7 @@ func (v *Validator) GenerateReadPreimageProof(ctx context.Context, certHash comm // This validates the certificate signature against trusted signers from the contract. // Invalid certificates (wrong format, untrusted signer) return claimedValid=0. // Only transient errors (like RPC failures) return an error. -func (v *Validator) GenerateCertificateValidityProof(ctx context.Context, certificate []byte) ([]byte, error) { +func (v *Validator) generateCertificateValidityProofInternal(ctx context.Context, certificate []byte) ([]byte, error) { // Try to deserialize certificate cert, err := Deserialize(certificate) if err != nil { @@ -111,3 +131,25 @@ func (v *Validator) GenerateCertificateValidityProof(ctx context.Context, certif // Certificate is valid (signed by trusted signer) return []byte{1, 0x01}, nil // Valid certificate, version 1 } + +// GenerateCertificateValidityProof creates a certificate validity proof for ReferenceDA +// The ReferenceDA implementation returns a two-byte proof with: +// - claimedValid (1 byte): 1 if valid, 0 if invalid +// - version (1 byte): 0x01 for version 1 +// +// This validates the certificate signature against trusted signers from the contract. +// Invalid certificates (wrong format, untrusted signer) return claimedValid=0. +// Only transient errors (like RPC failures) return an error. +func (v *Validator) GenerateCertificateValidityProof(certificate []byte) containers.PromiseInterface[daprovider.ValidityProofResult] { + promise := containers.NewPromise[daprovider.ValidityProofResult](nil) + go func() { + ctx := context.Background() + proof, err := v.generateCertificateValidityProofInternal(ctx, certificate) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.ValidityProofResult{Proof: proof}) + } + }() + return &promise +} diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index aa8ec0a253..2b4dc60d7d 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -186,11 +186,12 @@ func (s *Server) GenerateReadPreimageProof(ctx context.Context, certHash common. return nil, errors.New("validator not available") } // #nosec G115 - proof, err := s.validator.GenerateReadPreimageProof(ctx, certHash, uint64(offset), certificate) + promise := s.validator.GenerateReadPreimageProof(certHash, uint64(offset), certificate) + result, err := promise.Await(ctx) if err != nil { return nil, err } - return &server_api.GenerateReadPreimageProofResult{Proof: hexutil.Bytes(proof)}, nil + return &server_api.GenerateReadPreimageProofResult{Proof: hexutil.Bytes(result.Proof)}, nil } func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certificate hexutil.Bytes) (*server_api.GenerateCertificateValidityProofResult, error) { @@ -198,9 +199,10 @@ func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certifica return nil, errors.New("validator not available") } // #nosec G115 - proof, err := s.validator.GenerateCertificateValidityProof(ctx, certificate) + promise := s.validator.GenerateCertificateValidityProof(certificate) + result, err := promise.Await(ctx) if err != nil { return nil, err } - return &server_api.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(proof)}, nil + return &server_api.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(result.Proof)}, nil } diff --git a/daprovider/validator.go b/daprovider/validator.go index d97ec87458..0b6c15272b 100644 --- a/daprovider/validator.go +++ b/daprovider/validator.go @@ -4,11 +4,21 @@ package daprovider import ( - "context" - "github.com/ethereum/go-ethereum/common" + + "github.com/offchainlabs/nitro/util/containers" ) +// PreimageProofResult contains the generated preimage proof +type PreimageProofResult struct { + Proof []byte +} + +// ValidityProofResult contains the generated validity proof +type ValidityProofResult struct { + Proof []byte +} + // Validator defines the interface for custom data availability systems. // This interface is used to generate proofs for DACertificate certificates and preimages. type Validator interface { @@ -16,9 +26,9 @@ type Validator interface { // The proof format depends on the implementation and must be compatible with the Solidity // IDACertificateValidator contract. // certHash is the keccak256 hash of the certificate. - GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) + GenerateReadPreimageProof(certHash common.Hash, offset uint64, certificate []byte) containers.PromiseInterface[PreimageProofResult] // GenerateCertificateValidityProof returns a proof of whether the certificate // is valid according to the DA system's rules. - GenerateCertificateValidityProof(ctx context.Context, certificate []byte) ([]byte, error) + GenerateCertificateValidityProof(certificate []byte) containers.PromiseInterface[ValidityProofResult] } From cb742ea9c2fce0d9edbd508f6ecf45ee9dfed11e Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 25 Sep 2025 22:20:53 -0700 Subject: [PATCH 13/56] Revert #3700 (#3711) #3700 was opened to address docker authentication issues, but turns out original problem was caused simply because docker hub was down --- .github/workflows/_go-tests.yml | 15 +++++---------- .github/workflows/docker.yml | 15 ++++++--------- .github/workflows/nightly-ci.yml | 15 ++++++--------- 3 files changed, 17 insertions(+), 28 deletions(-) diff --git a/.github/workflows/_go-tests.yml b/.github/workflows/_go-tests.yml index 378c8483e0..c65fe18e07 100644 --- a/.github/workflows/_go-tests.yml +++ b/.github/workflows/_go-tests.yml @@ -11,17 +11,12 @@ jobs: fail-fast: false matrix: test-mode: [defaults, pathdb, challenge, stylus, l3challenge] - + services: + redis: + image: redis + ports: + - 6379:6379 steps: - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Start redis - run: docker run -d -p 6379:6379 redis:latest - - name: Checkout uses: actions/checkout@v5 with: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0b3aa3f221..c4bc249b93 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -16,17 +16,14 @@ jobs: docker: name: Docker build runs-on: arbitrator-ci + services: + # local registry + registry: + image: registry:2 + ports: + - 5000:5000 steps: - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Start registry - run: docker run -d -p 5000:5000 registry:2.8 - - name: Checkout uses: actions/checkout@v5 with: diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml index c991852b57..e6eb2f4738 100644 --- a/.github/workflows/nightly-ci.yml +++ b/.github/workflows/nightly-ci.yml @@ -14,21 +14,18 @@ jobs: name: Scheduled tests runs-on: arbitrator-ci + services: + redis: + image: redis + ports: + - 6379:6379 + strategy: fail-fast: false matrix: test-mode: [legacychallenge, long, challenge, l3challenge, execution-spec-tests] steps: - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Start redis - run: docker run -d -p 6379:6379 redis:latest - - name: Checkout uses: actions/checkout@v5 with: From 87712d4d0437a930b308b571f592ce9a8543270a Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Fri, 26 Sep 2025 11:29:36 +0600 Subject: [PATCH 14/56] Add missing import --- daprovider/server/provider_server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index be0e2d9905..6b67b80220 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -12,6 +12,7 @@ import ( "net/http" "os" "strings" + "time" flag "github.com/spf13/pflag" From 4546581d5027694e6d2f328107bc8f58c656e6d2 Mon Sep 17 00:00:00 2001 From: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> Date: Fri, 26 Sep 2025 12:08:02 +0600 Subject: [PATCH 15/56] Remove deprecated AnyTrust db storage service (#3701) The DbStorageService has been deprecated for over a year, with a warning printed for any remaining users during that time informing them of how to migrate and that if they didn't, continuing to try to use it would be a fatal error in future. The future is here. This also lets us remove badgerdb is a dependency. --- daprovider/das/das.go | 5 - daprovider/das/das_test.go | 26 +-- daprovider/das/db_storage_service.go | 285 --------------------------- daprovider/das/factory.go | 21 +- go.mod | 7 +- go.sum | 16 -- system_tests/common_test.go | 10 +- system_tests/das_test.go | 7 +- 8 files changed, 7 insertions(+), 370 deletions(-) delete mode 100644 daprovider/das/db_storage_service.go diff --git a/daprovider/das/das.go b/daprovider/das/das.go index 12d0f13339..9bb19d7132 100644 --- a/daprovider/das/das.go +++ b/daprovider/das/das.go @@ -29,13 +29,10 @@ type DataAvailabilityConfig struct { LocalCache CacheConfig `koanf:"local-cache"` RedisCache RedisConfig `koanf:"redis-cache"` - LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` S3Storage S3StorageServiceConfig `koanf:"s3-storage"` GoogleCloudStorage GoogleCloudStorageServiceConfig `koanf:"google-cloud-storage"` - MigrateLocalDBToFileStorage bool `koanf:"migrate-local-db-to-file-storage"` - Key KeyConfig `koanf:"key"` RPCAggregator AggregatorConfig `koanf:"rpc-aggregator"` @@ -100,11 +97,9 @@ func dataAvailabilityConfigAddOptions(prefix string, f *pflag.FlagSet, r role) { RedisConfigAddOptions(prefix+".redis-cache", f) // Storage options - LocalDBStorageConfigAddOptions(prefix+".local-db-storage", f) LocalFileStorageConfigAddOptions(prefix+".local-file-storage", f) S3ConfigAddOptions(prefix+".s3-storage", f) GoogleCloudConfigAddOptions(prefix+".google-cloud-storage", f) - f.Bool(prefix+".migrate-local-db-to-file-storage", DefaultDataAvailabilityConfig.MigrateLocalDBToFileStorage, "daserver will migrate all data on startup from local-db-storage to local-file-storage, then mark local-db-storage as unusable") // Key config for storage KeyConfigAddOptions(prefix+".key", f) diff --git a/daprovider/das/das_test.go b/daprovider/das/das_test.go index 1f3b174ba7..e92b34aaec 100644 --- a/daprovider/das/das_test.go +++ b/daprovider/das/das_test.go @@ -21,20 +21,14 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { _, _, err := GenerateAndStoreKeys(dbPath) Require(t, err) - enableFileStorage, enableDbStorage := false, false + enableFileStorage := false switch storageType { - case "db": - enableDbStorage = true case "files": enableFileStorage = true default: Fail(t, "unknown storage type") } - dbConfig := DefaultLocalDBStorageConfig - dbConfig.Enable = enableDbStorage - dbConfig.DataDir = dbPath - config := DataAvailabilityConfig{ Enable: true, Key: KeyConfig{ @@ -45,7 +39,6 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { DataDir: dbPath, MaxRetention: DefaultLocalFileStorageConfig.MaxRetention, }, - LocalDBStorage: dbConfig, ParentChainNodeURL: "none", } @@ -100,10 +93,6 @@ func TestDASStoreRetrieveMultipleInstancesFiles(t *testing.T) { testDASStoreRetrieveMultipleInstances(t, "files") } -func TestDASStoreRetrieveMultipleInstancesDB(t *testing.T) { - testDASStoreRetrieveMultipleInstances(t, "db") -} - func testDASMissingMessage(t *testing.T, storageType string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -112,20 +101,14 @@ func testDASMissingMessage(t *testing.T, storageType string) { _, _, err := GenerateAndStoreKeys(dbPath) Require(t, err) - enableFileStorage, enableDbStorage := false, false + enableFileStorage := false switch storageType { - case "db": - enableDbStorage = true case "files": enableFileStorage = true default: Fail(t, "unknown storage type") } - dbConfig := DefaultLocalDBStorageConfig - dbConfig.Enable = enableDbStorage - dbConfig.DataDir = dbPath - config := DataAvailabilityConfig{ Enable: true, Key: KeyConfig{ @@ -136,7 +119,6 @@ func testDASMissingMessage(t *testing.T, storageType string) { DataDir: dbPath, MaxRetention: DefaultLocalFileStorageConfig.MaxRetention, }, - LocalDBStorage: dbConfig, ParentChainNodeURL: "none", } @@ -174,10 +156,6 @@ func TestDASMissingMessageFiles(t *testing.T) { testDASMissingMessage(t, "files") } -func TestDASMissingMessageDB(t *testing.T) { - testDASMissingMessage(t, "db") -} - func Require(t *testing.T, err error, printables ...interface{}) { t.Helper() testhelpers.RequireImpl(t, err, printables...) diff --git a/daprovider/das/db_storage_service.go b/daprovider/das/db_storage_service.go deleted file mode 100644 index 8aee7be8d8..0000000000 --- a/daprovider/das/db_storage_service.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md - -package das - -import ( - "bytes" - "context" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "time" - - "github.com/dgraph-io/badger/v4" - "github.com/spf13/pflag" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - - "github.com/offchainlabs/nitro/daprovider/das/dastree" - "github.com/offchainlabs/nitro/daprovider/das/dasutil" - "github.com/offchainlabs/nitro/util/pretty" - "github.com/offchainlabs/nitro/util/stopwaiter" -) - -type LocalDBStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - - // BadgerDB options - NumMemtables int `koanf:"num-memtables"` - NumLevelZeroTables int `koanf:"num-level-zero-tables"` - NumLevelZeroTablesStall int `koanf:"num-level-zero-tables-stall"` - NumCompactors int `koanf:"num-compactors"` - BaseTableSize int64 `koanf:"base-table-size"` - ValueLogFileSize int64 `koanf:"value-log-file-size"` -} - -var badgerDefaultOptions = badger.DefaultOptions("") - -const migratedMarker = "MIGRATED" - -var DefaultLocalDBStorageConfig = LocalDBStorageConfig{ - Enable: false, - DataDir: "", - DiscardAfterTimeout: false, - - NumMemtables: badgerDefaultOptions.NumMemtables, - NumLevelZeroTables: badgerDefaultOptions.NumLevelZeroTables, - NumLevelZeroTablesStall: badgerDefaultOptions.NumLevelZeroTablesStall, - NumCompactors: badgerDefaultOptions.NumCompactors, - BaseTableSize: badgerDefaultOptions.BaseTableSize, - ValueLogFileSize: badgerDefaultOptions.ValueLogFileSize, -} - -func LocalDBStorageConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "!!!DEPRECATED, USE local-file-storage!!! enable storage/retrieval of sequencer batch data from a database on the local filesystem") - f.String(prefix+".data-dir", DefaultLocalDBStorageConfig.DataDir, "directory in which to store the database") - f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - - f.Int(prefix+".num-memtables", DefaultLocalDBStorageConfig.NumMemtables, "BadgerDB option: sets the maximum number of tables to keep in memory before stalling") - f.Int(prefix+".num-level-zero-tables", DefaultLocalDBStorageConfig.NumLevelZeroTables, "BadgerDB option: sets the maximum number of Level 0 tables before compaction starts") - f.Int(prefix+".num-level-zero-tables-stall", DefaultLocalDBStorageConfig.NumLevelZeroTablesStall, "BadgerDB option: sets the number of Level 0 tables that once reached causes the DB to stall until compaction succeeds") - f.Int(prefix+".num-compactors", DefaultLocalDBStorageConfig.NumCompactors, "BadgerDB option: Sets the number of compaction workers to run concurrently") - f.Int64(prefix+".base-table-size", DefaultLocalDBStorageConfig.BaseTableSize, "BadgerDB option: sets the maximum size in bytes for LSM table or file in the base level") - f.Int64(prefix+".value-log-file-size", DefaultLocalDBStorageConfig.ValueLogFileSize, "BadgerDB option: sets the maximum size of a single log file") - -} - -type DBStorageService struct { - db *badger.DB - discardAfterTimeout bool - dirPath string - stopWaiter stopwaiter.StopWaiterSafe -} - -// The DBStorageService is deprecated. This function will migrate data to the target -// LocalFileStorageService if it is provided and migration hasn't already happened. -func NewDBStorageService(ctx context.Context, config *LocalDBStorageConfig, target *LocalFileStorageService) (*DBStorageService, error) { - if alreadyMigrated(config.DataDir) { - log.Warn("local-db-storage already migrated, please remove it from the daserver configuration and restart. data-dir can be cleaned up manually now") - return nil, nil - } - if target == nil { - log.Error("local-db-storage is DEPRECATED, please use use the local-file-storage and migrate-local-db-to-file-storage options. This error will be made fatal in future, continuing for now...") - } - - options := badger.DefaultOptions(config.DataDir). - WithNumMemtables(config.NumMemtables). - WithNumLevelZeroTables(config.NumLevelZeroTables). - WithNumLevelZeroTablesStall(config.NumLevelZeroTablesStall). - WithNumCompactors(config.NumCompactors). - WithBaseTableSize(config.BaseTableSize). - WithValueLogFileSize(config.ValueLogFileSize) - db, err := badger.Open(options) - if err != nil { - return nil, err - } - - ret := &DBStorageService{ - db: db, - discardAfterTimeout: config.DiscardAfterTimeout, - dirPath: config.DataDir, - } - - if target != nil { - if err = ret.migrateTo(ctx, target); err != nil { - return nil, fmt.Errorf("error migrating local-db-storage to %s: %w", target, err) - } - if err = ret.setMigrated(); err != nil { - return nil, fmt.Errorf("error finalizing migration of local-db-storage to %s: %w", target, err) - } - return nil, nil - } - - if err := ret.stopWaiter.Start(ctx, ret); err != nil { - return nil, err - } - - err = ret.stopWaiter.LaunchThreadSafe(func(myCtx context.Context) { - ticker := time.NewTicker(5 * time.Minute) - defer ticker.Stop() - defer func() { - if err := ret.db.Close(); err != nil { - log.Error("Failed to close DB", "err", err) - } - }() - for { - select { - case <-ticker.C: - for db.RunValueLogGC(0.7) == nil { - select { - case <-myCtx.Done(): - return - default: - } - } - case <-myCtx.Done(): - return - } - } - }) - if err != nil { - return nil, err - } - - return ret, nil -} - -func (dbs *DBStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { - log.Trace("das.DBStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", dbs) - - var ret []byte - err := dbs.db.View(func(txn *badger.Txn) error { - item, err := txn.Get(key.Bytes()) - if err != nil { - return err - } - return item.Value(func(val []byte) error { - ret = append([]byte{}, val...) - return nil - }) - }) - if errors.Is(err, badger.ErrKeyNotFound) { - return ret, ErrNotFound - } - return ret, err -} - -func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint64) error { - logPut("das.DBStorageService.Put", data, timeout, dbs) - - return dbs.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry(dastree.HashBytes(data), data) - if dbs.discardAfterTimeout && timeout <= math.MaxInt64 { - // #nosec G115 - e = e.WithTTL(time.Until(time.Unix(int64(timeout), 0))) - } - return txn.SetEntry(e) - }) -} - -func (dbs *DBStorageService) migrateTo(ctx context.Context, s StorageService) error { - originExpirationPolicy, err := dbs.ExpirationPolicy(ctx) - if err != nil { - return err - } - targetExpirationPolicy, err := s.ExpirationPolicy(ctx) - if err != nil { - return err - } - - if originExpirationPolicy == dasutil.KeepForever && targetExpirationPolicy == dasutil.DiscardAfterDataTimeout { - return errors.New("can't migrate from DBStorageService to target, incompatible expiration policies - can't migrate from non-expiring to expiring since non-expiring DB lacks expiry time metadata") - } - - return dbs.db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - it := txn.NewIterator(opts) - defer it.Close() - log.Info("Migrating from DBStorageService", "target", s) - migrationStart := time.Now() - count := 0 - for it.Rewind(); it.Valid(); it.Next() { - if count%1000 == 0 { - log.Info("Migration in progress", "migrated", count) - } - item := it.Item() - k := item.Key() - expiry := item.ExpiresAt() - err := item.Value(func(v []byte) error { - log.Trace("migrated", "key", pretty.FirstFewBytes(k), "value", pretty.FirstFewBytes(v), "expiry", expiry) - return s.Put(ctx, v, expiry) - }) - if err != nil { - return err - } - count++ - } - log.Info("Migration from DBStorageService complete", "target", s, "migrated", count, "duration", time.Since(migrationStart)) - return nil - }) -} - -func (dbs *DBStorageService) Sync(ctx context.Context) error { - return dbs.db.Sync() -} - -func (dbs *DBStorageService) Close(ctx context.Context) error { - return dbs.stopWaiter.StopAndWait() -} - -func alreadyMigrated(dirPath string) bool { - migratedMarkerFile := filepath.Join(dirPath, migratedMarker) - _, err := os.Stat(migratedMarkerFile) - if os.IsNotExist(err) { - return false - } - if err != nil { - log.Error("error checking if local-db-storage is already migrated", "err", err) - return false - } - return true -} - -func (dbs *DBStorageService) setMigrated() error { - migratedMarkerFile := filepath.Join(dbs.dirPath, migratedMarker) - file, err := os.OpenFile(migratedMarkerFile, os.O_CREATE|os.O_WRONLY, 0o600) - if err != nil { - return err - } - file.Close() - return nil -} - -func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (dasutil.ExpirationPolicy, error) { - if dbs.discardAfterTimeout { - return dasutil.DiscardAfterDataTimeout, nil - } - return dasutil.KeepForever, nil -} - -func (dbs *DBStorageService) String() string { - return "BadgerDB(" + dbs.dirPath + ")" -} - -func (dbs *DBStorageService) HealthCheck(ctx context.Context) error { - testData := []byte("Test-Data") - // #nosec G115 - err := dbs.Put(ctx, testData, uint64(time.Now().Add(time.Minute).Unix())) - if err != nil { - return err - } - res, err := dbs.GetByHash(ctx, dastree.Hash(testData)) - if err != nil { - return err - } - if !bytes.Equal(res, testData) { - return errors.New("invalid GetByHash result") - } - return nil -} diff --git a/daprovider/das/factory.go b/daprovider/das/factory.go index fc0afcaa32..826f09d239 100644 --- a/daprovider/das/factory.go +++ b/daprovider/das/factory.go @@ -41,22 +41,6 @@ func CreatePersistentStorageService( storageServices = append(storageServices, fs) } - if config.LocalDBStorage.Enable { - var s *DBStorageService - if config.MigrateLocalDBToFileStorage { - s, err = NewDBStorageService(ctx, &config.LocalDBStorage, fs) - } else { - s, err = NewDBStorageService(ctx, &config.LocalDBStorage, nil) - } - if err != nil { - return nil, nil, err - } - if s != nil { - lifecycleManager.Register(s) - storageServices = append(storageServices, s) - } - } - if config.S3Storage.Enable { s, err := NewS3StorageService(config.S3Storage) if err != nil { @@ -168,10 +152,9 @@ func CreateDAComponentsForDaserver( } // Check config requirements - if !config.LocalDBStorage.Enable && - !config.LocalFileStorage.Enable && + if !config.LocalFileStorage.Enable && !config.S3Storage.Enable { - return nil, nil, nil, nil, nil, errors.New("At least one of --data-availability.(local-db-storage|local-file-storage|s3-storage) must be enabled.") + return nil, nil, nil, nil, nil, errors.New("At least one of --data-availability.(local-file-storage|s3-storage) must be enabled.") } // Done checking config requirements diff --git a/go.mod b/go.mod index 9caa5fe490..1f9c18767d 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/ccoveille/go-safecast v1.1.0 github.com/cockroachdb/pebble v1.1.5 github.com/codeclysm/extract/v3 v3.0.2 - github.com/dgraph-io/badger/v4 v4.2.0 github.com/enescakir/emoji v1.0.0 github.com/ethereum/go-ethereum v1.16.2 github.com/fatih/structtag v1.2.0 @@ -54,7 +53,6 @@ require ( golang.org/x/term v0.30.0 golang.org/x/tools v0.29.0 google.golang.org/api v0.187.0 - google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -74,7 +72,6 @@ require ( github.com/ferranbt/fastssz v0.1.4 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/glog v1.2.4 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -101,6 +98,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/grpc v1.64.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -137,11 +135,9 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v1.1.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect @@ -152,7 +148,6 @@ require ( github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v1.0.0 // indirect - github.com/google/flatbuffers v1.12.1 // indirect github.com/google/go-github/v62 v62.0.0 github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect github.com/gorilla/mux v1.8.0 diff --git a/go.sum b/go.sum index 7b73bdb7de..663b2eefd1 100644 --- a/go.sum +++ b/go.sum @@ -104,7 +104,6 @@ github.com/ccoveille/go-safecast v1.1.0/go.mod h1:QqwNjxQ7DAqY0C721OIO9InMk9zCwc github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -153,12 +152,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etly github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= -github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= -github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= @@ -169,8 +162,6 @@ github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5R github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= @@ -236,8 +227,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -261,8 +250,6 @@ github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -512,7 +499,6 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -646,7 +632,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -749,7 +734,6 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170712054546-1be3d31502d6/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/system_tests/common_test.go b/system_tests/common_test.go index df8cefc43b..19409457cd 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -1874,11 +1874,8 @@ func setupConfigWithDAS( var dbPath string var err error - enableFileStorage, enableDbStorage, enableDas := false, false, true + enableFileStorage, enableDas := false, true switch dasModeString { - case "db": - enableDbStorage = true - chainConfig = chaininfo.ArbitrumDevTestDASChainConfig() case "files": enableFileStorage = true chainConfig = chaininfo.ArbitrumDevTestDASChainConfig() @@ -1891,10 +1888,6 @@ func setupConfigWithDAS( dasSignerKey, _, err := das.GenerateAndStoreKeys(dbPath) Require(t, err) - dbConfig := das.DefaultLocalDBStorageConfig - dbConfig.Enable = enableDbStorage - dbConfig.DataDir = dbPath - dasConfig := &das.DataAvailabilityConfig{ Enable: enableDas, Key: das.KeyConfig{ @@ -1904,7 +1897,6 @@ func setupConfigWithDAS( Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorage: dbConfig, RequestTimeout: 5 * time.Second, ParentChainNodeURL: "none", SequencerInboxAddress: "none", diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 979c2c64a7..e9233942a3 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -205,14 +205,10 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1Reader.Start(ctx) defer l1Reader.StopAndWait() - keyDir, fileDataDir, dbDataDir := t.TempDir(), t.TempDir(), t.TempDir() + keyDir, fileDataDir := t.TempDir(), t.TempDir() pubkey, _, err := das.GenerateAndStoreKeys(keyDir) Require(t, err) - dbConfig := das.DefaultLocalDBStorageConfig - dbConfig.Enable = true - dbConfig.DataDir = dbDataDir - serverConfig := das.DataAvailabilityConfig{ Enable: true, @@ -222,7 +218,6 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { Enable: true, DataDir: fileDataDir, }, - LocalDBStorage: dbConfig, Key: das.KeyConfig{ KeyDir: keyDir, From a81514d47062a8a894b404cb0d2de1b06c65721a Mon Sep 17 00:00:00 2001 From: Mikhail Rogachev Date: Fri, 26 Sep 2025 08:10:16 +0200 Subject: [PATCH 16/56] Fixup log messages in attributeWasmComputation (#3709) --- arbos/programs/programs.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index d17761c4eb..28a2630475 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -282,7 +282,7 @@ func attributeWasmComputation(contract *vm.Contract, startingGas uint64) { var residual uint64 if accountedGas > usedGas { - log.Error("negative WASM computation residual, usedGas", usedGas, "accounted", accountedGas) + log.Trace("negative WASM computation residual", "usedGas", usedGas, "accountedGas", accountedGas) residual = 0 } else { residual = usedGas - accountedGas @@ -290,7 +290,7 @@ func attributeWasmComputation(contract *vm.Contract, startingGas uint64) { var overflow bool if contract.UsedMultiGas, overflow = contract.UsedMultiGas.SafeIncrement(multigas.ResourceKindWasmComputation, residual); overflow { - log.Error("WASM computation gas overflow, residual", residual) + log.Trace("WASM computation gas overflow", "residual", residual) } } From 29d4273d1cdbcd1a83ac9290fd94b497d3d6bd1d Mon Sep 17 00:00:00 2001 From: VolodymyrBg Date: Fri, 26 Sep 2025 09:20:31 +0300 Subject: [PATCH 17/56] remove no-op OS-specific prerequisites block in check-build.sh (#3675) Co-authored-by: Pepper Lebeck-Jobe --- scripts/check-build.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/scripts/check-build.sh b/scripts/check-build.sh index 24ea7bbf20..705b258659 100755 --- a/scripts/check-build.sh +++ b/scripts/check-build.sh @@ -86,11 +86,6 @@ fi # Check prerequisites for building binaries prerequisites=(git go curl clang make cmake npm wasm2wat wasm-ld yarn gotestsum python3) -if [[ "$OS" == "Linux" ]]; then - prerequisites+=() -else - prerequisites+=() -fi for pkg in "${prerequisites[@]}"; do display_name="$pkg" From a9647825cc1b260ba132efc5e6a7cee3595a77ba Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Fri, 26 Sep 2025 09:35:43 +0300 Subject: [PATCH 18/56] fix: missing reassignment of rivals when recording creation times in block snapshot (#3619) Co-authored-by: Raul Jordan Co-authored-by: Joshua Colvin Co-authored-by: Pepper Lebeck-Jobe --- bold/challenge-manager/challenge-tree/ancestors_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bold/challenge-manager/challenge-tree/ancestors_test.go b/bold/challenge-manager/challenge-tree/ancestors_test.go index b5875e8c16..252cff5151 100644 --- a/bold/challenge-manager/challenge-tree/ancestors_test.go +++ b/bold/challenge-manager/challenge-tree/ancestors_test.go @@ -216,6 +216,8 @@ func setupBlockChallengeTreeSnapshot(t *testing.T, tree *RoyalChallengeTree, cla key = buildEdgeCreationTimeKey(protocol.OriginId{}, mutual) tree.edgeCreationTimes.Put(key, threadsafe.NewMap[protocol.EdgeId, creationTime]()) mutuals = tree.edgeCreationTimes.Get(key) + a = aliceEdges["blk-4.a-6.a"] + b = bobEdges["blk-4.a-6.b"] aCreation, err = a.CreatedAtBlock() require.NoError(t, err) bCreation, err = b.CreatedAtBlock() From 5895dcf4adf05c2ab8f52098b2121990ad9f5e63 Mon Sep 17 00:00:00 2001 From: Ruslan Granger <155269177+ruslan0012@users.noreply.github.com> Date: Fri, 26 Sep 2025 10:13:05 +0300 Subject: [PATCH 19/56] update CI/CD infrastructure: standardize actions and upgrade linting tools (#3702) * fix inconsistent setup-go versions across workflows * update golangci-lint to v2.5.0 (latest stable release) * Update _fast.yml * upgrade Go version from 1.24.5 to 1.25 * Update Dockerfile * Update Dockerfile --------- Co-authored-by: Pepper Lebeck-Jobe --- .github/workflows/_fast.yml | 2 +- .github/workflows/fuzz.yml | 4 ++-- Dockerfile | 4 ++-- go.mod | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/_fast.yml b/.github/workflows/_fast.yml index be8b9ed327..1009d9d972 100644 --- a/.github/workflows/_fast.yml +++ b/.github/workflows/_fast.yml @@ -25,7 +25,7 @@ jobs: - name: GolangCI Lint uses: golangci/golangci-lint-action@v8 with: - version: v2.1 + version: v2.5 - name: Custom Lint run: | diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index f886e64d13..b3ec707866 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -17,7 +17,7 @@ jobs: steps: - uses: actions/checkout@v5 - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" - id: list @@ -36,7 +36,7 @@ jobs: steps: - uses: actions/checkout@v5 - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" - uses: shogo82148/actions-go-fuzz/run@v1 diff --git a/Dockerfile b/Dockerfile index 54364cdba0..d15a959ea6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -72,7 +72,7 @@ COPY --from=wasm-libs-builder /workspace/ / FROM wasm-base AS wasm-bin-builder RUN apt update && apt install -y wabt # pinned go version -RUN curl -L https://golang.org/dl/go1.24.5.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - +RUN curl -L https://golang.org/dl/go1.25.1.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - COPY ./Makefile ./go.mod ./go.sum ./ COPY ./arbcompress ./arbcompress COPY ./arbos ./arbos @@ -245,7 +245,7 @@ RUN ./download-machine.sh consensus-v50-rc.3 0x385fa2524d86d4ebc340988224f8686b3 RUN ./download-machine.sh consensus-v50-rc.4 0x393be710f252e8217d66fe179739eba1ed471f0d5a847b5905c30926d853241a RUN ./download-machine.sh consensus-v40 0xdb698a2576298f25448bc092e52cf13b1e24141c997135d70f217d674bbeb69a -FROM golang:1.24.5-bookworm AS node-builder +FROM golang:1.25-bookworm AS node-builder WORKDIR /workspace ARG version="" ARG datetime="" diff --git a/go.mod b/go.mod index 1f9c18767d..03c9eec434 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/offchainlabs/nitro -go 1.24.5 +go 1.25 replace github.com/ethereum/go-ethereum => ./go-ethereum From b580dd7c4026dac84e8dbcbe5aa3998b59d7e9ea Mon Sep 17 00:00:00 2001 From: VolodymyrBg Date: Fri, 26 Sep 2025 10:34:34 +0300 Subject: [PATCH 20/56] fix(events): use stable subscription ids and remove by id (#3563) * fix(events): use stable subscription ids and remove by id * Update producer.go * Update producer.go * Update producer_test.go * Update producer.go * Update producer_test.go --------- Co-authored-by: Pepper Lebeck-Jobe --- bold/containers/events/producer.go | 20 +++++++----- bold/containers/events/producer_test.go | 41 +++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/bold/containers/events/producer.go b/bold/containers/events/producer.go index 2df0605773..abbd3909da 100644 --- a/bold/containers/events/producer.go +++ b/bold/containers/events/producer.go @@ -22,6 +22,7 @@ type Producer[T any] struct { subs []*Subscription[T] doneListener chan subId // channel to listen for IDs of subscriptions to be remove. broadcastTimeout time.Duration // maximum duration to wait for an event to be sent. + nextId subId // monotonically increasing id for stable subscription identification } type ProducerOpt[T any] func(*Producer[T]) @@ -60,13 +61,17 @@ func (ep *Producer[T]) Start(ctx context.Context) { select { case id := <-ep.doneListener: ep.Lock() - // Check if id overflows the length of the slice. - if int(id) >= len(ep.subs) { - ep.Unlock() - continue + // Find the subscription by stable id and remove it if present. + idx := -1 + for i, s := range ep.subs { + if s.id == id { + idx = i + break + } + } + if idx >= 0 { + ep.subs = append(ep.subs[:idx], ep.subs[idx+1:]...) } - // Otherwise, clear the subscription from the list. - ep.subs = append(ep.subs[:id], ep.subs[id+1:]...) ep.Unlock() case <-ctx.Done(): close(ep.doneListener) @@ -82,10 +87,11 @@ func (ep *Producer[T]) Subscribe() *Subscription[T] { ep.Lock() defer ep.Unlock() sub := &Subscription[T]{ - id: subId(len(ep.subs)), // Assign a unique ID based on the current count of subscriptions + id: ep.nextId, // Assign a stable, monotonically increasing ID events: make(chan T), done: ep.doneListener, } + ep.nextId++ ep.subs = append(ep.subs, sub) return sub } diff --git a/bold/containers/events/producer_test.go b/bold/containers/events/producer_test.go index 1b09a1f9b4..0c42a5f4b8 100644 --- a/bold/containers/events/producer_test.go +++ b/bold/containers/events/producer_test.go @@ -68,3 +68,44 @@ func TestEventProducer_Start(t *testing.T) { t.Error("Expected to end after context cancellation") } } + +func TestRemovalUsesStableId(t *testing.T) { + // This test ensures that removing subscriptions uses stable IDs rather than slice indices. + // Before the fix, deleting two subscriptions by their IDs 0 and 1 would incorrectly + // remove the first (index 0) and the third (now at index 1 after compaction), leaving + // the second subscription in place instead of the third. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + producer := NewProducer[int]() + go producer.Start(ctx) + + s0 := producer.Subscribe() + s1 := producer.Subscribe() + s2 := producer.Subscribe() + + // Cancel first two subscriptions; they will send their IDs to doneListener via Next. + for _, s := range []*Subscription[int]{s0, s1} { + c, cancelSub := context.WithCancel(context.Background()) + cancelSub() + _, shouldEnd := s.Next(c) + require.True(t, shouldEnd) + } + + // Wait until the producer processes removal and only one subscription remains. + deadline := time.Now().Add(2 * time.Second) + for { + producer.RLock() + remaining := len(producer.subs) + producer.RUnlock() + if remaining == 1 || time.Now().After(deadline) { + break + } + time.Sleep(5 * time.Millisecond) + } + + producer.RLock() + require.Equal(t, 1, len(producer.subs)) + require.Same(t, s2, producer.subs[0]) + producer.RUnlock() +} From f60bbc8c76e021a9630dea2a9ce59552086655a4 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Fri, 26 Sep 2025 16:01:04 +0600 Subject: [PATCH 21/56] Use new daprovider.Validator interface with promises Also update the evil_da_provider to use the new separate RecoverPayload and CollectPreimages. --- arbnode/node.go | 3 +- cmd/replay/main.go | 82 ++-- daprovider/daclient/daclient.go | 1 - daprovider/referenceda/reference_validator.go | 1 - daprovider/server/client_provider_test.go | 7 +- daprovider/server/provider_server.go | 25 -- system_tests/bold_challenge_protocol_test.go | 3 - system_tests/bold_customda_challenge_test.go | 18 +- system_tests/evil_da_provider.go | 357 ++++++++++++------ .../server_arb/readpreimage_proof_enhancer.go | 6 +- .../validatecertificate_proof_enhancer.go | 4 +- 11 files changed, 307 insertions(+), 200 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 6ce606a5df..11a0c4f123 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -699,7 +699,8 @@ func getDAProvider( cleanupFuncs = append(cleanupFuncs, validatorCleanup) } - providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &serverConfig, reader, writer, validator) + headerBytes := daFactory.GetSupportedHeaderBytes() + providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &serverConfig, reader, writer, validator, headerBytes) // Create combined cleanup function closeFn := func() { diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 6bbec4daf6..281c50130c 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -36,6 +36,7 @@ import ( "github.com/offchainlabs/nitro/daprovider/das/dastree" "github.com/offchainlabs/nitro/daprovider/das/dasutil" "github.com/offchainlabs/nitro/gethhook" + "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/wavmio" ) @@ -169,48 +170,61 @@ func (r *BlobPreimageReader) Initialize(ctx context.Context) error { type DACertificatePreimageReader struct { } -func (r *DACertificatePreimageReader) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - return daprovider.IsDACertificateMessageHeaderByte(headerByte) -} - -func (r *DACertificatePreimageReader) RecoverPayloadFromBatch( - ctx context.Context, +func (r *DACertificatePreimageReader) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages daprovider.PreimagesMap, - validateSeqMsg bool, -) ([]byte, daprovider.PreimagesMap, error) { - if len(sequencerMsg) <= 40 { - return nil, nil, fmt.Errorf("sequencer message too small") - } - certificate := sequencerMsg[40:] +) containers.PromiseInterface[daprovider.PayloadResult] { + promise := containers.NewPromise[daprovider.PayloadResult](nil) + go func() { + if len(sequencerMsg) <= 40 { + promise.ProduceError(fmt.Errorf("sequencer message too small")) + return + } + certificate := sequencerMsg[40:] + + // Hash the entire sequencer message to get the preimage key + customDAPreimageHash := crypto.Keccak256Hash(certificate) + + // Validate the certificate before trying to read it + if !wavmio.ValidateCertificate(arbutil.DACertificatePreimageType, customDAPreimageHash) { + // Preimage is not available - treat as invalid batch + log.Info("DACertificate preimage validation failed, treating as invalid batch", + "batchNum", batchNum, + "hash", customDAPreimageHash.Hex()) + promise.Produce(daprovider.PayloadResult{Payload: []byte{}}) + return + } - // Hash the entire sequencer message to get the preimage key - customDAPreimageHash := crypto.Keccak256Hash(certificate) + // Read the preimage (which contains the actual batch data) + payload, err := wavmio.ResolveTypedPreimage(arbutil.DACertificatePreimageType, customDAPreimageHash) + if err != nil { + // This should not happen after successful validation + panic(fmt.Errorf("failed to resolve DACertificate preimage after validation: %w", err)) + } - // Validate the certificate before trying to read it - if !wavmio.ValidateCertificate(arbutil.DACertificatePreimageType, customDAPreimageHash) { - // Preimage is not available - treat as invalid batch - log.Info("DACertificate preimage validation failed, treating as invalid batch", + log.Info("DACertificate batch recovered", "batchNum", batchNum, - "hash", customDAPreimageHash.Hex()) - return []byte{}, preimages, nil - } - - // Read the preimage (which contains the actual batch data) - payload, err := wavmio.ResolveTypedPreimage(arbutil.DACertificatePreimageType, customDAPreimageHash) - if err != nil { - // This should not happen after successful validation - panic(fmt.Errorf("failed to resolve DACertificate preimage after validation: %w", err)) - } + "hash", customDAPreimageHash.Hex(), + "payloadSize", len(payload)) - log.Info("DACertificate batch recovered", - "batchNum", batchNum, - "hash", customDAPreimageHash.Hex(), - "payloadSize", len(payload)) + promise.Produce(daprovider.PayloadResult{Payload: payload}) + }() + return &promise +} - return payload, preimages, nil +func (r *DACertificatePreimageReader) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise := containers.NewPromise[daprovider.PreimagesResult](nil) + go func() { + // For the replay tool, we don't need to collect preimages + // Just return an empty map + promise.Produce(daprovider.PreimagesResult{Preimages: make(daprovider.PreimagesMap)}) + }() + return &promise } // To generate: diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 907562c463..ffe89c3c71 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/server_api" "github.com/offchainlabs/nitro/util/containers" diff --git a/daprovider/referenceda/reference_validator.go b/daprovider/referenceda/reference_validator.go index 043ffb5c72..9a5ff3452d 100644 --- a/daprovider/referenceda/reference_validator.go +++ b/daprovider/referenceda/reference_validator.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/util/containers" diff --git a/daprovider/server/client_provider_test.go b/daprovider/server/client_provider_test.go index 2f41fadd8d..d82a1f84aa 100644 --- a/daprovider/server/client_provider_test.go +++ b/daprovider/server/client_provider_test.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/daclient" "github.com/offchainlabs/nitro/daprovider/referenceda" "github.com/offchainlabs/nitro/util/rpcclient" @@ -60,11 +61,13 @@ func setupProviderServer(ctx context.Context, t *testing.T) *http.Server { // The services below will work fine as long as we don't need to do any action on-chain. dummyAddress := common.HexToAddress("0x0") - reader := referenceda.NewReader(nil, dummyAddress) + storage := referenceda.GetInMemoryStorage() + reader := referenceda.NewReader(storage, nil, dummyAddress) writer := referenceda.NewWriter(dataSigner) validator := referenceda.NewValidator(nil, dummyAddress) + headerBytes := []byte{daprovider.DACertificateMessageHeaderFlag} - providerServer, err := NewServerWithDAPProvider(ctx, &providerServerConfig, reader, writer, validator) + providerServer, err := NewServerWithDAPProvider(ctx, &providerServerConfig, reader, writer, validator, headerBytes) testhelpers.RequireImpl(t, err) return providerServer diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index dfb02184dd..a4d4f29b52 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -22,7 +22,6 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/server_api" @@ -209,27 +208,3 @@ func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certifica } return &server_api.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(result.Proof)}, nil } - -func (s *Server) GenerateProof(ctx context.Context, preimageType hexutil.Uint, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*daclient.GenerateProofResult, error) { - if s.validator == nil { - return nil, errors.New("validator not available") - } - // #nosec G115 - proof, err := s.validator.GenerateProof(ctx, arbutil.PreimageType(uint8(preimageType)), certHash, uint64(offset), certificate) - if err != nil { - return nil, err - } - return &daclient.GenerateProofResult{Proof: proof}, nil -} - -func (s *Server) GenerateCertificateValidityProof(ctx context.Context, preimageType hexutil.Uint, certificate hexutil.Bytes) (*daclient.GenerateCertificateValidityProofResult, error) { - if s.validator == nil { - return nil, errors.New("validator not available") - } - // #nosec G115 - proof, err := s.validator.GenerateCertificateValidityProof(ctx, arbutil.PreimageType(uint8(preimageType)), certificate) - if err != nil { - return nil, err - } - return &daclient.GenerateCertificateValidityProofResult{Proof: proof}, nil -} diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go index 91980c5a21..699ce72748 100644 --- a/system_tests/bold_challenge_protocol_test.go +++ b/system_tests/bold_challenge_protocol_test.go @@ -557,9 +557,6 @@ func setupL1ForBoldProtocol( l2info = NewArbTestInfo(t, chainConfig.ChainID) } - l1info.GenerateAccount("RollupOwner") - l1info.GenerateAccount("Sequencer") - l1info.GenerateAccount("User") l1info.GenerateAccount("Asserter") l1info.GenerateAccount("EvilAsserter") diff --git a/system_tests/bold_customda_challenge_test.go b/system_tests/bold_customda_challenge_test.go index c315ae75da..024d446fa3 100644 --- a/system_tests/bold_customda_challenge_test.go +++ b/system_tests/bold_customda_challenge_test.go @@ -83,7 +83,8 @@ func TestChallengeProtocolBOLDCustomDA_ValidCertClaimedInvalid(t *testing.T) { // createReferenceDAProviderServer creates and starts a ReferenceDA provider server with automatic port selection func createReferenceDAProviderServer(t *testing.T, ctx context.Context, l1Client *ethclient.Client, validatorAddr common.Address, dataSigner signature.DataSignerFunc) (*http.Server, string) { // Create ReferenceDA components - reader := referenceda.NewReader(l1Client, validatorAddr) + storage := referenceda.GetInMemoryStorage() + reader := referenceda.NewReader(storage, l1Client, validatorAddr) writer := referenceda.NewWriter(dataSigner) validator := referenceda.NewValidator(l1Client, validatorAddr) @@ -97,7 +98,8 @@ func createReferenceDAProviderServer(t *testing.T, ctx context.Context, l1Client } // Create the provider server - server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, reader, writer, validator) + headerBytes := []byte{daprovider.DACertificateMessageHeaderFlag} + server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, reader, writer, validator, headerBytes) Require(t, err) // Extract the actual address with port @@ -158,7 +160,8 @@ func createEvilDAProviderServer(t *testing.T, ctx context.Context, l1Client *eth // Use asserting writer to ensure evil provider is never used for writing. // In this test we call the writers directly to have more control over batch posting. writer := &assertingWriter{} - server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, evilProvider, writer, evilProvider) + headerBytes := []byte{daprovider.DACertificateMessageHeaderFlag} + server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, evilProvider, writer, evilProvider, headerBytes) Require(t, err) // Extract the actual address with port @@ -421,8 +424,13 @@ func testChallengeProtocolBOLDCustomDA(t *testing.T, evilStrategy EvilStrategy, Require(t, err) // Create DA readers for validators - dapReadersA := []daprovider.Reader{daClientA} - dapReadersB := []daprovider.Reader{daClientB} + dapReadersA := daprovider.NewReaderRegistry() + err = dapReadersA.SetupDACertificateReader(daClientA) + Require(t, err) + + dapReadersB := daprovider.NewReaderRegistry() + err = dapReadersB.SetupDACertificateReader(daClientB) + Require(t, err) statelessA, err := staker.NewStatelessBlockValidator( l2nodeA.InboxReader, diff --git a/system_tests/evil_da_provider.go b/system_tests/evil_da_provider.go index 2c3f68cb46..948318316d 100644 --- a/system_tests/evil_da_provider.go +++ b/system_tests/evil_da_provider.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/referenceda" + "github.com/offchainlabs/nitro/util/containers" ) // EvilDAProvider implements both Reader and Validator interfaces @@ -36,8 +37,9 @@ type EvilDAProvider struct { func NewEvilDAProvider(l1Client *ethclient.Client, validatorAddr common.Address) *EvilDAProvider { // Create fresh ReferenceDA components - they'll all share the singleton storage + storage := referenceda.GetInMemoryStorage() return &EvilDAProvider{ - reader: referenceda.NewReader(l1Client, validatorAddr), + reader: referenceda.NewReader(storage, l1Client, validatorAddr), validator: referenceda.NewValidator(l1Client, validatorAddr), evilMappings: make(map[common.Hash][]byte), invalidClaimCerts: make(map[common.Hash]bool), @@ -71,165 +73,270 @@ func (e *EvilDAProvider) SetClaimCertInvalid(certKeccak common.Hash) { e.invalidClaimCerts[certKeccak] = true } -// IsValidHeaderByte delegates to underlying reader -func (e *EvilDAProvider) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - return e.reader.IsValidHeaderByte(ctx, headerByte) -} - -// RecoverPayloadFromBatch intercepts and returns evil data if configured -func (e *EvilDAProvider) RecoverPayloadFromBatch( - ctx context.Context, +// RecoverPayload intercepts and returns evil data if configured +func (e *EvilDAProvider) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages daprovider.PreimagesMap, - validateSeqMsg bool, -) ([]byte, daprovider.PreimagesMap, error) { - // Check if this is a CustomDA message and extract certificate - if len(sequencerMsg) > 40 && daprovider.IsDACertificateMessageHeaderByte(sequencerMsg[40]) { - certificate := sequencerMsg[40:] - - // Check if we're supposed to claim this certificate is invalid - certKeccak := crypto.Keccak256Hash(certificate) - e.mu.RLock() - shouldClaimInvalid := e.invalidClaimCerts[certKeccak] - e.mu.RUnlock() - - if shouldClaimInvalid { - log.Info("EvilDAProvider rejecting certificate we claim is invalid", - "certKeccak", certKeccak.Hex(), - "batchNum", batchNum) - // Return an error similar to what would happen with an actually invalid certificate - return nil, nil, fmt.Errorf("certificate validation failed: claimed to be invalid") - } +) containers.PromiseInterface[daprovider.PayloadResult] { + promise := containers.NewPromise[daprovider.PayloadResult](nil) + go func() { + // Check if this is a CustomDA message and extract certificate + if len(sequencerMsg) > 40 && daprovider.IsDACertificateMessageHeaderByte(sequencerMsg[40]) { + certificate := sequencerMsg[40:] + + // Check if we're supposed to claim this certificate is invalid + certKeccak := crypto.Keccak256Hash(certificate) + e.mu.RLock() + shouldClaimInvalid := e.invalidClaimCerts[certKeccak] + e.mu.RUnlock() - // Try to deserialize certificate - cert, err := referenceda.Deserialize(certificate) - if err == nil { - // Check if this certificate is from our untrusted signer - signer, signerErr := cert.RecoverSigner() - if signerErr == nil { - untrustedAddr := e.GetUntrustedSignerAddress() + if shouldClaimInvalid { + log.Info("EvilDAProvider rejecting certificate we claim is invalid", + "certKeccak", certKeccak.Hex(), + "batchNum", batchNum) + // Return an error similar to what would happen with an actually invalid certificate + promise.ProduceError(fmt.Errorf("certificate validation failed: claimed to be invalid")) + return + } - // If this cert was signed by our known untrusted signer, accept it and return the data - if untrustedAddr != nil && signer == *untrustedAddr { - log.Info("EvilDAProvider accepting untrusted certificate", - "signer", signer.Hex(), - "dataHash", common.Hash(cert.DataHash).Hex()) + // Try to deserialize certificate + cert, err := referenceda.Deserialize(certificate) + if err == nil { + // Check if this certificate is from our untrusted signer + signer, signerErr := cert.RecoverSigner() + if signerErr == nil { + untrustedAddr := e.GetUntrustedSignerAddress() + + // If this cert was signed by our known untrusted signer, accept it and return the data + if untrustedAddr != nil && signer == *untrustedAddr { + log.Info("EvilDAProvider accepting untrusted certificate", + "signer", signer.Hex(), + "dataHash", common.Hash(cert.DataHash).Hex()) + + // Get the data from the underlying storage (it was stored with untrusted signer) + // Delegate to underlying reader + delegatePromise := e.reader.RecoverPayload(batchNum, batchBlockHash, sequencerMsg) + ctx := context.Background() + result, err := delegatePromise.Await(ctx) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(result) + } + return + } + } + + // Extract data hash (SHA256) from certificate + dataHash := cert.DataHash - // Get the data from the underlying storage (it was stored with untrusted signer) - // We need to call the reader WITHOUT validation - return e.reader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, sequencerMsg, preimages, false) + e.mu.RLock() + if evilData, exists := e.evilMappings[dataHash]; exists { + e.mu.RUnlock() + + log.Info("EvilDAProvider returning evil data", + "dataHash", common.Hash(dataHash).Hex(), + "evilDataSize", len(evilData)) + + promise.Produce(daprovider.PayloadResult{Payload: evilData}) + return } + e.mu.RUnlock() } + } - // Extract data hash (SHA256) from certificate - dataHash := cert.DataHash + // Fall back to underlying reader for non-evil certificates + delegatePromise := e.reader.RecoverPayload(batchNum, batchBlockHash, sequencerMsg) + ctx := context.Background() + result, err := delegatePromise.Await(ctx) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(result) + } + }() + return &promise +} +// CollectPreimages collects preimages for the batch +func (e *EvilDAProvider) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise := containers.NewPromise[daprovider.PreimagesResult](nil) + go func() { + // Check if this is a CustomDA message and extract certificate + if len(sequencerMsg) > 40 && daprovider.IsDACertificateMessageHeaderByte(sequencerMsg[40]) { + certificate := sequencerMsg[40:] + + // Check if we're supposed to claim this certificate is invalid + certKeccak := crypto.Keccak256Hash(certificate) e.mu.RLock() - if evilData, exists := e.evilMappings[dataHash]; exists { - e.mu.RUnlock() + shouldClaimInvalid := e.invalidClaimCerts[certKeccak] + e.mu.RUnlock() - // Record preimages with evil data - if preimages != nil { + if shouldClaimInvalid { + // For invalid certificates, we still return empty preimages (no error) + // This matches the behavior where validation fails but preimages aren't needed + promise.Produce(daprovider.PreimagesResult{Preimages: make(daprovider.PreimagesMap)}) + return + } + + // Try to deserialize certificate + cert, err := referenceda.Deserialize(certificate) + if err == nil { + // Check if this certificate is from our untrusted signer + signer, signerErr := cert.RecoverSigner() + if signerErr == nil { + untrustedAddr := e.GetUntrustedSignerAddress() + + // If this cert was signed by our known untrusted signer, delegate to reader + if untrustedAddr != nil && signer == *untrustedAddr { + // Delegate to underlying reader which will get the data from storage + delegatePromise := e.reader.CollectPreimages(batchNum, batchBlockHash, sequencerMsg) + ctx := context.Background() + result, err := delegatePromise.Await(ctx) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(result) + } + return + } + } + + // Extract data hash (SHA256) from certificate + dataHash := cert.DataHash + + e.mu.RLock() + if evilData, exists := e.evilMappings[dataHash]; exists { + e.mu.RUnlock() + + // Record preimages with evil data + preimages := make(daprovider.PreimagesMap) preimageRecorder := daprovider.RecordPreimagesTo(preimages) // Use keccak256 of certificate for preimage recording - certKeccak := crypto.Keccak256Hash(certificate) preimageRecorder(certKeccak, evilData, arbutil.DACertificatePreimageType) - } - log.Info("EvilDAProvider returning evil data", - "dataHash", common.Hash(dataHash).Hex(), - "evilDataSize", len(evilData)) - - return evilData, preimages, nil + promise.Produce(daprovider.PreimagesResult{Preimages: preimages}) + return + } + e.mu.RUnlock() } - e.mu.RUnlock() } - } - // Fall back to underlying reader for non-evil certificates - return e.reader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, sequencerMsg, preimages, validateSeqMsg) + // Fall back to underlying reader for non-evil certificates + delegatePromise := e.reader.CollectPreimages(batchNum, batchBlockHash, sequencerMsg) + ctx := context.Background() + result, err := delegatePromise.Await(ctx) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(result) + } + }() + return &promise } -// GenerateProof generates proof for evil data if configured, otherwise delegates -func (e *EvilDAProvider) GenerateProof( - ctx context.Context, - preimageType arbutil.PreimageType, +// GenerateReadPreimageProof generates proof for evil data if configured, otherwise delegates +func (e *EvilDAProvider) GenerateReadPreimageProof( certHash common.Hash, offset uint64, certificate []byte, -) ([]byte, error) { - if preimageType != arbutil.DACertificatePreimageType { - return e.validator.GenerateProof(ctx, preimageType, certHash, offset, certificate) - } +) containers.PromiseInterface[daprovider.PreimageProofResult] { + promise := containers.NewPromise[daprovider.PreimageProofResult](nil) + go func() { + // Try to deserialize certificate to check for evil mapping + cert, err := referenceda.Deserialize(certificate) + if err == nil { + // Extract data hash (SHA256) from certificate + dataHash := cert.DataHash + + e.mu.RLock() + evilData, hasEvil := e.evilMappings[dataHash] + e.mu.RUnlock() + + if hasEvil { + // Generate proof with evil data + // Format: [Version(1), CertificateSize(8), Certificate, PreimageSize(8), PreimageData] + certLen := len(certificate) + proof := make([]byte, 1+8+certLen+8+len(evilData)) + proof[0] = 1 // Version + binary.BigEndian.PutUint64(proof[1:9], uint64(certLen)) + copy(proof[9:9+certLen], certificate) + binary.BigEndian.PutUint64(proof[9+certLen:9+certLen+8], uint64(len(evilData))) + copy(proof[9+certLen+8:], evilData) + + log.Debug("EvilDAProvider generating evil proof", + "certHash", certHash.Hex(), + "dataHash", common.Hash(dataHash).Hex(), + "evilDataSize", len(evilData)) - // Try to deserialize certificate to check for evil mapping - cert, err := referenceda.Deserialize(certificate) - if err == nil { - // Extract data hash (SHA256) from certificate - dataHash := cert.DataHash - - e.mu.RLock() - evilData, hasEvil := e.evilMappings[dataHash] - e.mu.RUnlock() - - if hasEvil { - // Generate proof with evil data - // Format: [Version(1), CertificateSize(8), Certificate, PreimageSize(8), PreimageData] - certLen := len(certificate) - proof := make([]byte, 1+8+certLen+8+len(evilData)) - proof[0] = 1 // Version - binary.BigEndian.PutUint64(proof[1:9], uint64(certLen)) - copy(proof[9:9+certLen], certificate) - binary.BigEndian.PutUint64(proof[9+certLen:9+certLen+8], uint64(len(evilData))) - copy(proof[9+certLen+8:], evilData) - - log.Debug("EvilDAProvider generating evil proof", - "certHash", certHash.Hex(), - "dataHash", common.Hash(dataHash).Hex(), - "evilDataSize", len(evilData)) - - return proof, nil + promise.Produce(daprovider.PreimageProofResult{Proof: proof}) + return + } } - } - // No evil mapping, delegate to underlying validator - return e.validator.GenerateProof(ctx, preimageType, certHash, offset, certificate) + // No evil mapping, delegate to underlying validator + delegatePromise := e.validator.GenerateReadPreimageProof(certHash, offset, certificate) + ctx := context.Background() + result, err := delegatePromise.Await(ctx) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(result) + } + }() + return &promise } // GenerateCertificateValidityProof generates a proof of certificate validity -func (e *EvilDAProvider) GenerateCertificateValidityProof(ctx context.Context, preimageType arbutil.PreimageType, certificate []byte) ([]byte, error) { - // Check if we should lie about this certificate - cert, err := referenceda.Deserialize(certificate) - if err == nil { - signer, err := cert.RecoverSigner() +func (e *EvilDAProvider) GenerateCertificateValidityProof(certificate []byte) containers.PromiseInterface[daprovider.ValidityProofResult] { + promise := containers.NewPromise[daprovider.ValidityProofResult](nil) + go func() { + // Check if we should lie about this certificate + cert, err := referenceda.Deserialize(certificate) if err == nil { - untrustedAddr := e.GetUntrustedSignerAddress() + signer, err := cert.RecoverSigner() + if err == nil { + untrustedAddr := e.GetUntrustedSignerAddress() - // If this cert was signed by our known untrusted signer, lie and say it's valid - if untrustedAddr != nil && signer == *untrustedAddr { - log.Info("EvilDAProvider lying about untrusted certificate validity", - "signer", signer.Hex(), + // If this cert was signed by our known untrusted signer, lie and say it's valid + if untrustedAddr != nil && signer == *untrustedAddr { + log.Info("EvilDAProvider lying about untrusted certificate validity", + "signer", signer.Hex(), + "dataHash", common.Hash(cert.DataHash).Hex()) + promise.Produce(daprovider.ValidityProofResult{Proof: []byte{1, 0x01}}) // EVIL: claim valid when it's not + return + } + } + + // Check if we should claim this specific valid cert is invalid + certKeccak := crypto.Keccak256Hash(certificate) + e.mu.RLock() + shouldClaimInvalid := e.invalidClaimCerts[certKeccak] + e.mu.RUnlock() + + if shouldClaimInvalid { + log.Info("EvilDAProvider lying about valid certificate (claiming invalid)", + "certKeccak", certKeccak.Hex(), "dataHash", common.Hash(cert.DataHash).Hex()) - return []byte{1, 0x01}, nil // EVIL: claim valid when it's not + promise.Produce(daprovider.ValidityProofResult{Proof: []byte{0, 0x01}}) // EVIL: claim invalid when it's valid + return } } - // Check if we should claim this specific valid cert is invalid - certKeccak := crypto.Keccak256Hash(certificate) - e.mu.RLock() - shouldClaimInvalid := e.invalidClaimCerts[certKeccak] - e.mu.RUnlock() - - if shouldClaimInvalid { - log.Info("EvilDAProvider lying about valid certificate (claiming invalid)", - "certKeccak", certKeccak.Hex(), - "dataHash", common.Hash(cert.DataHash).Hex()) - return []byte{0, 0x01}, nil // EVIL: claim invalid when it's valid + // For all other cases, delegate to underlying validator + delegatePromise := e.validator.GenerateCertificateValidityProof(certificate) + ctx := context.Background() + result, err := delegatePromise.Await(ctx) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(result) } - } - - // For all other cases, delegate to underlying validator - return e.validator.GenerateCertificateValidityProof(ctx, preimageType, certificate) + }() + return &promise } diff --git a/validator/server_arb/readpreimage_proof_enhancer.go b/validator/server_arb/readpreimage_proof_enhancer.go index 91253d1d67..9b88080171 100644 --- a/validator/server_arb/readpreimage_proof_enhancer.go +++ b/validator/server_arb/readpreimage_proof_enhancer.go @@ -96,15 +96,17 @@ func (e *ReadPreimageProofEnhancer) EnhanceProof(ctx context.Context, messageNum } // Generate custom proof with certificate - customProof, err := e.daValidator.GenerateProof(ctx, arbutil.DACertificatePreimageType, common.BytesToHash(certKeccak256[:]), offset, certificate) + promise := e.daValidator.GenerateReadPreimageProof(common.BytesToHash(certKeccak256[:]), offset, certificate) + result, err := promise.Await(ctx) if err != nil { return nil, fmt.Errorf("failed to generate custom DA proof: %w", err) } + customProof := result.Proof // Build standard CustomDA proof preamble: // [...proof..., certSize(8), certificate, customProof] // We're dropping the CustomDA marker data (certKeccak256, offset, marker byte) from the original proof. - // It was only needed here to call GenerateProof above, the same information is + // It was only needed here to call GenerateReadPreimageProof above, the same information is // available to the OSP in the instruction arguments. certSize := uint64(len(certificate)) markerDataStart := certKeccak256Pos // Start of CustomDA marker data that we'll drop diff --git a/validator/server_arb/validatecertificate_proof_enhancer.go b/validator/server_arb/validatecertificate_proof_enhancer.go index 5b9041eb73..29c478adf7 100644 --- a/validator/server_arb/validatecertificate_proof_enhancer.go +++ b/validator/server_arb/validatecertificate_proof_enhancer.go @@ -78,10 +78,12 @@ func (e *ValidateCertificateProofEnhancer) EnhanceProof(ctx context.Context, mes } // Generate certificate validity proof - validityProof, err := e.daValidator.GenerateCertificateValidityProof(ctx, arbutil.DACertificatePreimageType, certificate) + promise := e.daValidator.GenerateCertificateValidityProof(certificate) + result, err := promise.Await(ctx) if err != nil { return nil, fmt.Errorf("failed to generate certificate validity proof: %w", err) } + validityProof := result.Proof // Build enhanced proof: [...originalProof..., certSize(8), certificate, validityProof] // Remove the marker data (hash + marker) from original proof From 79e0d6a93059b16a1e6ee2544896cf2d1a487e5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 11:06:53 +0200 Subject: [PATCH 22/56] Extend provider server with data streaming API --- daprovider/das/data_streaming/receiver.go | 11 ++++++++++ daprovider/server/client_provider_test.go | 1 + daprovider/server/provider_server.go | 26 +++++++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/daprovider/das/data_streaming/receiver.go b/daprovider/das/data_streaming/receiver.go index 4a77651983..682d3c0346 100644 --- a/daprovider/das/data_streaming/receiver.go +++ b/daprovider/das/data_streaming/receiver.go @@ -12,6 +12,12 @@ import ( "time" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/offchainlabs/nitro/util/signature" +) + +const ( + DefaultMaxPendingMessages = 10 + DefaultMessageCollectionExpiry = 1 * time.Minute ) // DataStreamReceiver implements the server side of the data streaming protocol. It stays compatible with `DataStreamer` @@ -39,6 +45,11 @@ func NewDataStreamReceiver(payloadVerifier *PayloadVerifier, maxPendingMessages } } +// NewDefaultDataStreamReceiver sets up a new stream receiver with default settings. +func NewDefaultDataStreamReceiver(verifier *signature.Verifier) *DataStreamReceiver { + return NewDataStreamReceiver(DefaultPayloadVerifier(verifier), DefaultMaxPendingMessages, DefaultMessageCollectionExpiry, nil) +} + // StartStreamingResult is expected by DataStreamer to be returned by the endpoint responsible for the StartReceiving method. // lint:require-exhaustive-initialization type StartStreamingResult struct { diff --git a/daprovider/server/client_provider_test.go b/daprovider/server/client_provider_test.go index d82a1f84aa..de46779b2d 100644 --- a/daprovider/server/client_provider_test.go +++ b/daprovider/server/client_provider_test.go @@ -53,6 +53,7 @@ func setupProviderServer(ctx context.Context, t *testing.T) *http.Server { EnableDAWriter: true, ServerTimeouts: genericconf.HTTPServerTimeoutConfig{}, RPCServerBodyLimit: RPCServerBodyLimit, + JWTSecret: "", } privateKey, err := crypto.GenerateKey() diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index a4d4f29b52..eb48c3fb10 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -27,13 +28,16 @@ import ( "github.com/offchainlabs/nitro/daprovider/server_api" ) +// lint:require-exhaustive-initialization type Server struct { reader daprovider.Reader writer daprovider.Writer validator daprovider.Validator headerBytes []byte // Supported header bytes for this provider + dataReceiver *data_streaming.DataStreamReceiver } +// lint:require-exhaustive-initialization type ServerConfig struct { Addr string `koanf:"addr"` Port uint64 `koanf:"port"` @@ -91,6 +95,8 @@ func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader writer: writer, validator: validator, headerBytes: headerBytes, + // TODO: nil verifier + dataReceiver: data_streaming.NewDefaultDataStreamReceiver(nil), } if err = rpcServer.RegisterName("daprovider", server); err != nil { return nil, err @@ -208,3 +214,23 @@ func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certifica } return &server_api.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(result.Proof)}, nil } + +// ============================= DATA STREAM API ==================================================================== // + +func (s *Server) StartChunkedStore(ctx context.Context, timestamp, nChunks, chunkSize, totalSize, timeout hexutil.Uint64, sig hexutil.Bytes) (*data_streaming.StartStreamingResult, error) { + return s.dataReceiver.StartReceiving(ctx, uint64(timestamp), uint64(nChunks), uint64(chunkSize), uint64(totalSize), uint64(timeout), sig) +} + +func (s *Server) SendChunk(ctx context.Context, messageId, chunkId hexutil.Uint64, chunk hexutil.Bytes, sig hexutil.Bytes) error { + return s.dataReceiver.ReceiveChunk(ctx, data_streaming.MessageId(messageId), uint64(chunkId), chunk, sig) +} + +func (s *Server) CommitChunkedStore(ctx context.Context, messageId hexutil.Uint64, sig hexutil.Bytes) (*daclient.StoreResult, error) { + message, timeout, _, err := s.dataReceiver.FinalizeReceiving(ctx, data_streaming.MessageId(messageId), sig) + if err != nil { + return nil, err + } + + serializedDACert, err := s.writer.Store(ctx, message, timeout) + return &daclient.StoreResult{SerializedDACert: serializedDACert}, err +} From 7da2836d07cf28b87ba8a69b594ef5133a7e0791 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 12:09:30 +0200 Subject: [PATCH 23/56] Add TrustingPayloadVerifier, NoopPayload Signer. Create the provider server with trusting verifier (it's usually behind jwt anyway) --- arbnode/node.go | 3 ++- cmd/daprovider/daprovider.go | 3 ++- daprovider/das/data_streaming/receiver.go | 5 ++--- daprovider/das/data_streaming/signing.go | 8 ++++++++ daprovider/server/client_provider_test.go | 3 ++- daprovider/server/provider_server.go | 8 +++++--- system_tests/bold_customda_challenge_test.go | 5 +++-- 7 files changed, 24 insertions(+), 11 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 11a0c4f123..9121a8e421 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -14,6 +14,7 @@ import ( "path/filepath" "strings" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -700,7 +701,7 @@ func getDAProvider( } headerBytes := daFactory.GetSupportedHeaderBytes() - providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &serverConfig, reader, writer, validator, headerBytes) + providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &serverConfig, reader, writer, validator, headerBytes, data_streaming.TrustingPayloadVerifier()) // Create combined cleanup function closeFn := func() { diff --git a/cmd/daprovider/daprovider.go b/cmd/daprovider/daprovider.go index c98bc248bb..8f4fba54ba 100644 --- a/cmd/daprovider/daprovider.go +++ b/cmd/daprovider/daprovider.go @@ -10,6 +10,7 @@ import ( "syscall" "github.com/knadh/koanf/parsers/json" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -286,7 +287,7 @@ func startup() error { log.Info("Starting json rpc server", "mode", config.Mode, "addr", config.ProviderServer.Addr, "port", config.ProviderServer.Port) headerBytes := providerFactory.GetSupportedHeaderBytes() - providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &config.ProviderServer, reader, writer, validator, headerBytes) + providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &config.ProviderServer, reader, writer, validator, headerBytes, data_streaming.TrustingPayloadVerifier()) if err != nil { return err } diff --git a/daprovider/das/data_streaming/receiver.go b/daprovider/das/data_streaming/receiver.go index 682d3c0346..e3a1779a6b 100644 --- a/daprovider/das/data_streaming/receiver.go +++ b/daprovider/das/data_streaming/receiver.go @@ -12,7 +12,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/offchainlabs/nitro/util/signature" ) const ( @@ -46,8 +45,8 @@ func NewDataStreamReceiver(payloadVerifier *PayloadVerifier, maxPendingMessages } // NewDefaultDataStreamReceiver sets up a new stream receiver with default settings. -func NewDefaultDataStreamReceiver(verifier *signature.Verifier) *DataStreamReceiver { - return NewDataStreamReceiver(DefaultPayloadVerifier(verifier), DefaultMaxPendingMessages, DefaultMessageCollectionExpiry, nil) +func NewDefaultDataStreamReceiver(verifier *PayloadVerifier) *DataStreamReceiver { + return NewDataStreamReceiver(verifier, DefaultMaxPendingMessages, DefaultMessageCollectionExpiry, nil) } // StartStreamingResult is expected by DataStreamer to be returned by the endpoint responsible for the StartReceiving method. diff --git a/daprovider/das/data_streaming/signing.go b/daprovider/das/data_streaming/signing.go index 2b64f2b8fe..6ed492c45f 100644 --- a/daprovider/das/data_streaming/signing.go +++ b/daprovider/das/data_streaming/signing.go @@ -27,6 +27,10 @@ func CustomPayloadSigner(signingFunc func([]byte, ...uint64) ([]byte, error)) *P } } +func NoopPayloadSigner() *PayloadSigner { + return CustomPayloadSigner(func(bytes []byte, extras ...uint64) ([]byte, error) { return make([]byte, 0), nil }) +} + // lint:require-exhaustive-initialization type PayloadVerifier struct { verifyPayload func(ctx context.Context, signature []byte, bytes []byte, extras ...uint64) error @@ -45,6 +49,10 @@ func CustomPayloadVerifier(verifyingFunc func(ctx context.Context, signature []b } } +func TrustingPayloadVerifier() *PayloadVerifier { + return CustomPayloadVerifier(func(ctx context.Context, signature []byte, bytes []byte, extras ...uint64) error { return nil }) +} + func flattenDataForSigning(bytes []byte, extras ...uint64) []byte { var bufferForExtras []byte for _, field := range extras { diff --git a/daprovider/server/client_provider_test.go b/daprovider/server/client_provider_test.go index de46779b2d..ae78c691a0 100644 --- a/daprovider/server/client_provider_test.go +++ b/daprovider/server/client_provider_test.go @@ -8,6 +8,7 @@ import ( "net/http" "testing" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/common" @@ -68,7 +69,7 @@ func setupProviderServer(ctx context.Context, t *testing.T) *http.Server { validator := referenceda.NewValidator(nil, dummyAddress) headerBytes := []byte{daprovider.DACertificateMessageHeaderFlag} - providerServer, err := NewServerWithDAPProvider(ctx, &providerServerConfig, reader, writer, validator, headerBytes) + providerServer, err := NewServerWithDAPProvider(ctx, &providerServerConfig, reader, writer, validator, headerBytes, data_streaming.TrustingPayloadVerifier()) testhelpers.RequireImpl(t, err) return providerServer diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index eb48c3fb10..92f16ce7f0 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -78,8 +78,10 @@ func fetchJWTSecret(fileName string) ([]byte, error) { return nil, errors.New("JWT secret file not found") } -// NewServerWithDAPProvider creates a new server with pre-created reader/writer/validator components -func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader daprovider.Reader, writer daprovider.Writer, validator daprovider.Validator, headerBytes []byte) (*http.Server, error) { +// NewServerWithDAPProvider creates a new server with pre-created reader/writer/validator components. +// The server supports the Data Stream protocol (see `data_streaming` package). The `verifier` parameter is used for +// authenticating the sender (`daclient`). If `nil`, then a trusting verifier will be used (blindly accepting any signature). +func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader daprovider.Reader, writer daprovider.Writer, validator daprovider.Validator, headerBytes []byte, verifier *data_streaming.PayloadVerifier) (*http.Server, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", config.Addr, config.Port)) if err != nil { return nil, err @@ -96,7 +98,7 @@ func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader validator: validator, headerBytes: headerBytes, // TODO: nil verifier - dataReceiver: data_streaming.NewDefaultDataStreamReceiver(nil), + dataReceiver: data_streaming.NewDefaultDataStreamReceiver(verifier), } if err = rpcServer.RegisterName("daprovider", server); err != nil { return nil, err diff --git a/system_tests/bold_customda_challenge_test.go b/system_tests/bold_customda_challenge_test.go index 024d446fa3..218517df19 100644 --- a/system_tests/bold_customda_challenge_test.go +++ b/system_tests/bold_customda_challenge_test.go @@ -38,6 +38,7 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/daclient" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/daprovider/referenceda" dapserver "github.com/offchainlabs/nitro/daprovider/server" "github.com/offchainlabs/nitro/execution/gethexec" @@ -99,7 +100,7 @@ func createReferenceDAProviderServer(t *testing.T, ctx context.Context, l1Client // Create the provider server headerBytes := []byte{daprovider.DACertificateMessageHeaderFlag} - server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, reader, writer, validator, headerBytes) + server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, reader, writer, validator, headerBytes, data_streaming.TrustingPayloadVerifier()) Require(t, err) // Extract the actual address with port @@ -161,7 +162,7 @@ func createEvilDAProviderServer(t *testing.T, ctx context.Context, l1Client *eth // In this test we call the writers directly to have more control over batch posting. writer := &assertingWriter{} headerBytes := []byte{daprovider.DACertificateMessageHeaderFlag} - server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, evilProvider, writer, evilProvider, headerBytes) + server, err := dapserver.NewServerWithDAPProvider(ctx, serverConfig, evilProvider, writer, evilProvider, headerBytes, data_streaming.TrustingPayloadVerifier()) Require(t, err) // Extract the actual address with port From 9aa75b962d63affba0d0ef7ff910896bd37805a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 12:11:23 +0200 Subject: [PATCH 24/56] Linting --- arbnode/node.go | 2 +- cmd/daprovider/daprovider.go | 2 +- daprovider/server/client_provider_test.go | 2 +- daprovider/server/provider_server.go | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 9121a8e421..8639c05ac6 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -14,7 +14,6 @@ import ( "path/filepath" "strings" - "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -41,6 +40,7 @@ import ( "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/daclient" "github.com/offchainlabs/nitro/daprovider/das" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/daprovider/factory" "github.com/offchainlabs/nitro/daprovider/referenceda" dapserver "github.com/offchainlabs/nitro/daprovider/server" diff --git a/cmd/daprovider/daprovider.go b/cmd/daprovider/daprovider.go index 8f4fba54ba..e399ca709f 100644 --- a/cmd/daprovider/daprovider.go +++ b/cmd/daprovider/daprovider.go @@ -10,7 +10,6 @@ import ( "syscall" "github.com/knadh/koanf/parsers/json" - "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -24,6 +23,7 @@ import ( "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/das" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/daprovider/factory" "github.com/offchainlabs/nitro/daprovider/referenceda" dapserver "github.com/offchainlabs/nitro/daprovider/server" diff --git a/daprovider/server/client_provider_test.go b/daprovider/server/client_provider_test.go index ae78c691a0..b11e87adc0 100644 --- a/daprovider/server/client_provider_test.go +++ b/daprovider/server/client_provider_test.go @@ -8,7 +8,6 @@ import ( "net/http" "testing" - "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/common" @@ -17,6 +16,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/daclient" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/daprovider/referenceda" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 92f16ce7f0..5d913c2a5f 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -25,6 +25,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/daclient" "github.com/offchainlabs/nitro/daprovider/server_api" ) From 0e4cb38d454b42bb749e72a74a1e8e8e99b5a268 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 12:25:04 +0200 Subject: [PATCH 25/56] Post-rebase fixes --- daprovider/server/das_migration.go | 2 ++ daprovider/server/provider_server.go | 15 +++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/daprovider/server/das_migration.go b/daprovider/server/das_migration.go index a3290047bd..cebb81bcc4 100644 --- a/daprovider/server/das_migration.go +++ b/daprovider/server/das_migration.go @@ -21,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/das" "github.com/offchainlabs/nitro/daprovider/das/dasutil" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" ) @@ -137,6 +138,7 @@ func NewServerForDAS( writer, nil, // DAS doesn't use a validator []byte{daprovider.DASMessageHeaderFlag}, + data_streaming.TrustingPayloadVerifier(), ) if err != nil { // Clean up lifecycle manager if server creation fails diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 5d913c2a5f..00fc4fcc53 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -14,7 +14,6 @@ import ( "strings" "time" - "github.com/offchainlabs/nitro/daprovider/das/data_streaming" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -25,16 +24,16 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" - "github.com/offchainlabs/nitro/daprovider/daclient" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/daprovider/server_api" ) // lint:require-exhaustive-initialization type Server struct { - reader daprovider.Reader - writer daprovider.Writer - validator daprovider.Validator - headerBytes []byte // Supported header bytes for this provider + reader daprovider.Reader + writer daprovider.Writer + validator daprovider.Validator + headerBytes []byte // Supported header bytes for this provider dataReceiver *data_streaming.DataStreamReceiver } @@ -228,12 +227,12 @@ func (s *Server) SendChunk(ctx context.Context, messageId, chunkId hexutil.Uint6 return s.dataReceiver.ReceiveChunk(ctx, data_streaming.MessageId(messageId), uint64(chunkId), chunk, sig) } -func (s *Server) CommitChunkedStore(ctx context.Context, messageId hexutil.Uint64, sig hexutil.Bytes) (*daclient.StoreResult, error) { +func (s *Server) CommitChunkedStore(ctx context.Context, messageId hexutil.Uint64, sig hexutil.Bytes) (*server_api.StoreResult, error) { message, timeout, _, err := s.dataReceiver.FinalizeReceiving(ctx, data_streaming.MessageId(messageId), sig) if err != nil { return nil, err } serializedDACert, err := s.writer.Store(ctx, message, timeout) - return &daclient.StoreResult{SerializedDACert: serializedDACert}, err + return &server_api.StoreResult{SerializedDACert: serializedDACert}, err } From 81d61ce88c566cd3d534c40a420c27968f5d4a1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 12:27:49 +0200 Subject: [PATCH 26/56] Remove outdated comment --- daprovider/server/provider_server.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 00fc4fcc53..47cb2a2a8e 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -80,7 +80,7 @@ func fetchJWTSecret(fileName string) ([]byte, error) { // NewServerWithDAPProvider creates a new server with pre-created reader/writer/validator components. // The server supports the Data Stream protocol (see `data_streaming` package). The `verifier` parameter is used for -// authenticating the sender (`daclient`). If `nil`, then a trusting verifier will be used (blindly accepting any signature). +// authenticating the sender (`daclient`). func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader daprovider.Reader, writer daprovider.Writer, validator daprovider.Validator, headerBytes []byte, verifier *data_streaming.PayloadVerifier) (*http.Server, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", config.Addr, config.Port)) if err != nil { @@ -97,8 +97,6 @@ func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader writer: writer, validator: validator, headerBytes: headerBytes, - // TODO: nil verifier - dataReceiver: data_streaming.NewDefaultDataStreamReceiver(verifier), } if err = rpcServer.RegisterName("daprovider", server); err != nil { return nil, err From 7f24e088173dd27cac53437b17f9dc07ff6f92b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 12:31:30 +0200 Subject: [PATCH 27/56] Fix server initialization --- daprovider/server/provider_server.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 47cb2a2a8e..951b65b58b 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -93,10 +93,11 @@ func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader } server := &Server{ - reader: reader, - writer: writer, - validator: validator, - headerBytes: headerBytes, + reader: reader, + writer: writer, + validator: validator, + headerBytes: headerBytes, + dataReceiver: data_streaming.NewDefaultDataStreamReceiver(verifier), } if err = rpcServer.RegisterName("daprovider", server); err != nil { return nil, err From fc8a35c865517742f149a2743cb82b4652bb13d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 12:39:25 +0200 Subject: [PATCH 28/56] Remove Store endpoint from the server --- daprovider/server/provider_server.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index 951b65b58b..a182e0d83c 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -178,18 +178,6 @@ func (s *Server) CollectPreimages( return &result, nil } -func (s *Server) Store( - ctx context.Context, - message hexutil.Bytes, - timeout hexutil.Uint64, -) (*server_api.StoreResult, error) { - serializedDACert, err := s.writer.Store(ctx, message, uint64(timeout)) - if err != nil { - return nil, err - } - return &server_api.StoreResult{SerializedDACert: serializedDACert}, nil -} - func (s *Server) GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*server_api.GenerateReadPreimageProofResult, error) { if s.validator == nil { return nil, errors.New("validator not available") From ac37804cb8097ad6b93e7dac6d0ce300f51b7a14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 12:43:46 +0200 Subject: [PATCH 29/56] Use data streaming in client --- daprovider/daclient/daclient.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index ffe89c3c71..94766b4a99 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -13,15 +13,19 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/daprovider/server_api" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/rpcclient" ) +// lint:require-exhaustive-initialization type Client struct { *rpcclient.RpcClient + *data_streaming.DataStreamer[server_api.StoreResult] } +// lint:require-exhaustive-initialization type ClientConfig struct { Enable bool `koanf:"enable"` WithWriter bool `koanf:"with-writer"` @@ -46,8 +50,22 @@ func ClientConfigAddOptions(prefix string, f *pflag.FlagSet) { } func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher) (*Client, error) { - client := &Client{rpcclient.NewRpcClient(config, nil)} - if err := client.Start(ctx); err != nil { + dataStreamer, err := data_streaming.NewDataStreamer[server_api.StoreResult]( + config().URL, + 0, // todo + nil, // todo + data_streaming.DataStreamingRPCMethods{ + StartStream: "daprovider_startChunkedStore", + StreamChunk: "daprovider_sendChunk", + FinalizeStream: "daprovider_commitChunkedStore", + }, + ) + if err != nil { + return nil, err + } + + client := &Client{rpcclient.NewRpcClient(config, nil), dataStreamer} + if err = client.Start(ctx); err != nil { return nil, fmt.Errorf("error starting daprovider client: %w", err) } return client, nil @@ -104,9 +122,9 @@ func (c *Client) Store( message []byte, timeout uint64, ) ([]byte, error) { - var storeResult server_api.StoreResult - if err := c.CallContext(ctx, &storeResult, "daprovider_store", hexutil.Bytes(message), hexutil.Uint64(timeout)); err != nil { - return nil, fmt.Errorf("error returned from daprovider_store rpc method, err: %w", err) + storeResult, err := c.DataStreamer.StreamData(ctx, message, timeout) + if err != nil { + return nil, fmt.Errorf("error returned from daprovider server (chunked store protocol), err: %w", err) } return storeResult.SerializedDACert, nil } From 64106376d7c5a11f23410108ed5b50be12ea7948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 13:00:10 +0200 Subject: [PATCH 30/56] Add body limit argument to the constructor --- arbnode/node.go | 4 ++-- daprovider/daclient/daclient.go | 4 ++-- daprovider/server/client_provider_test.go | 2 +- system_tests/bold_customda_challenge_test.go | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 8639c05ac6..a1cc7efbab 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -597,7 +597,7 @@ func getDAProvider( if config.DA.Mode == "external" { // External DA provider mode - daClient, err = daclient.NewClient(ctx, func() *rpcclient.ClientConfig { return &config.DA.ExternalProvider.RPC }) + daClient, err = daclient.NewClient(ctx, func() *rpcclient.ClientConfig { return &config.DA.ExternalProvider.RPC }, dapserver.DefaultServerConfig.RPCServerBodyLimit) if err != nil { return nil, nil, nil, nil, err } @@ -715,7 +715,7 @@ func getDAProvider( clientConfig := rpcclient.DefaultClientConfig clientConfig.URL = providerServer.Addr clientConfig.JWTSecret = jwtPath - daClient, err = daclient.NewClient(ctx, func() *rpcclient.ClientConfig { return &clientConfig }) + daClient, err = daclient.NewClient(ctx, func() *rpcclient.ClientConfig { return &clientConfig }, serverConfig.RPCServerBodyLimit) if err != nil { return nil, nil, nil, nil, err } diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 94766b4a99..4b4d5bf1be 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -49,10 +49,10 @@ func ClientConfigAddOptions(prefix string, f *pflag.FlagSet) { rpcclient.RPCClientAddOptions(prefix+".rpc", f, &DefaultClientConfig.RPC) } -func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher) (*Client, error) { +func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher, httpBodySizeLimit int) (*Client, error) { dataStreamer, err := data_streaming.NewDataStreamer[server_api.StoreResult]( config().URL, - 0, // todo + httpBodySizeLimit, nil, // todo data_streaming.DataStreamingRPCMethods{ StartStream: "daprovider_startChunkedStore", diff --git a/daprovider/server/client_provider_test.go b/daprovider/server/client_provider_test.go index b11e87adc0..587ae695e4 100644 --- a/daprovider/server/client_provider_test.go +++ b/daprovider/server/client_provider_test.go @@ -81,7 +81,7 @@ func setupClient(ctx context.Context, t *testing.T, providerServerAddress string URL: providerServerAddress, } } - client, err := daclient.NewClient(ctx, clientConfig) + client, err := daclient.NewClient(ctx, clientConfig, RPCServerBodyLimit) testhelpers.RequireImpl(t, err) return client } diff --git a/system_tests/bold_customda_challenge_test.go b/system_tests/bold_customda_challenge_test.go index 218517df19..4e72df9bad 100644 --- a/system_tests/bold_customda_challenge_test.go +++ b/system_tests/bold_customda_challenge_test.go @@ -413,7 +413,7 @@ func testChallengeProtocolBOLDCustomDA(t *testing.T, evilStrategy EvilStrategy, URL: providerURLNodeA, } } - daClientA, err := daclient.NewClient(ctx, daClientConfigA) + daClientA, err := daclient.NewClient(ctx, daClientConfigA, dapserver.DefaultServerConfig.RPCServerBodyLimit) Require(t, err) daClientConfigB := func() *rpcclient.ClientConfig { @@ -421,7 +421,7 @@ func testChallengeProtocolBOLDCustomDA(t *testing.T, evilStrategy EvilStrategy, URL: providerURLNodeB, } } - daClientB, err := daclient.NewClient(ctx, daClientConfigB) + daClientB, err := daclient.NewClient(ctx, daClientConfigB, dapserver.DefaultServerConfig.RPCServerBodyLimit) Require(t, err) // Create DA readers for validators From c47283703631092e21c852aa1422118f6cee916e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 13:06:48 +0200 Subject: [PATCH 31/56] Add signer argument to the constructor --- arbnode/node.go | 14 ++++++++++++-- daprovider/daclient/daclient.go | 4 ++-- daprovider/server/client_provider_test.go | 2 +- system_tests/bold_customda_challenge_test.go | 4 ++-- 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index a1cc7efbab..ccaee73736 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -597,7 +597,12 @@ func getDAProvider( if config.DA.Mode == "external" { // External DA provider mode - daClient, err = daclient.NewClient(ctx, func() *rpcclient.ClientConfig { return &config.DA.ExternalProvider.RPC }, dapserver.DefaultServerConfig.RPCServerBodyLimit) + daClient, err = daclient.NewClient( + ctx, + func() *rpcclient.ClientConfig { return &config.DA.ExternalProvider.RPC }, + dapserver.DefaultServerConfig.RPCServerBodyLimit, + data_streaming.NoopPayloadSigner(), + ) if err != nil { return nil, nil, nil, nil, err } @@ -715,7 +720,12 @@ func getDAProvider( clientConfig := rpcclient.DefaultClientConfig clientConfig.URL = providerServer.Addr clientConfig.JWTSecret = jwtPath - daClient, err = daclient.NewClient(ctx, func() *rpcclient.ClientConfig { return &clientConfig }, serverConfig.RPCServerBodyLimit) + daClient, err = daclient.NewClient( + ctx, + func() *rpcclient.ClientConfig { return &clientConfig }, + serverConfig.RPCServerBodyLimit, + data_streaming.NoopPayloadSigner(), + ) if err != nil { return nil, nil, nil, nil, err } diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 4b4d5bf1be..42d60e44e5 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -49,11 +49,11 @@ func ClientConfigAddOptions(prefix string, f *pflag.FlagSet) { rpcclient.RPCClientAddOptions(prefix+".rpc", f, &DefaultClientConfig.RPC) } -func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher, httpBodySizeLimit int) (*Client, error) { +func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher, httpBodySizeLimit int, payloadSigner *data_streaming.PayloadSigner) (*Client, error) { dataStreamer, err := data_streaming.NewDataStreamer[server_api.StoreResult]( config().URL, httpBodySizeLimit, - nil, // todo + payloadSigner, data_streaming.DataStreamingRPCMethods{ StartStream: "daprovider_startChunkedStore", StreamChunk: "daprovider_sendChunk", diff --git a/daprovider/server/client_provider_test.go b/daprovider/server/client_provider_test.go index 587ae695e4..61df199776 100644 --- a/daprovider/server/client_provider_test.go +++ b/daprovider/server/client_provider_test.go @@ -81,7 +81,7 @@ func setupClient(ctx context.Context, t *testing.T, providerServerAddress string URL: providerServerAddress, } } - client, err := daclient.NewClient(ctx, clientConfig, RPCServerBodyLimit) + client, err := daclient.NewClient(ctx, clientConfig, RPCServerBodyLimit, data_streaming.NoopPayloadSigner()) testhelpers.RequireImpl(t, err) return client } diff --git a/system_tests/bold_customda_challenge_test.go b/system_tests/bold_customda_challenge_test.go index 4e72df9bad..48220dd175 100644 --- a/system_tests/bold_customda_challenge_test.go +++ b/system_tests/bold_customda_challenge_test.go @@ -413,7 +413,7 @@ func testChallengeProtocolBOLDCustomDA(t *testing.T, evilStrategy EvilStrategy, URL: providerURLNodeA, } } - daClientA, err := daclient.NewClient(ctx, daClientConfigA, dapserver.DefaultServerConfig.RPCServerBodyLimit) + daClientA, err := daclient.NewClient(ctx, daClientConfigA, dapserver.DefaultServerConfig.RPCServerBodyLimit, data_streaming.NoopPayloadSigner()) Require(t, err) daClientConfigB := func() *rpcclient.ClientConfig { @@ -421,7 +421,7 @@ func testChallengeProtocolBOLDCustomDA(t *testing.T, evilStrategy EvilStrategy, URL: providerURLNodeB, } } - daClientB, err := daclient.NewClient(ctx, daClientConfigB, dapserver.DefaultServerConfig.RPCServerBodyLimit) + daClientB, err := daclient.NewClient(ctx, daClientConfigB, dapserver.DefaultServerConfig.RPCServerBodyLimit, data_streaming.NoopPayloadSigner()) Require(t, err) // Create DA readers for validators From 69faaf4643804955aeae9f0996bb57ad14b0a6da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Fri, 26 Sep 2025 13:08:45 +0200 Subject: [PATCH 32/56] Adjust testcase --- daprovider/server/client_provider_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/daprovider/server/client_provider_test.go b/daprovider/server/client_provider_test.go index 61df199776..78dba44e6b 100644 --- a/daprovider/server/client_provider_test.go +++ b/daprovider/server/client_provider_test.go @@ -8,8 +8,6 @@ import ( "net/http" "testing" - "github.com/stretchr/testify/require" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -36,7 +34,7 @@ func TestInteractionBetweenClientAndProviderServer_StoreSucceeds(t *testing.T) { testhelpers.RequireImpl(t, err) } -func TestInteractionBetweenClientAndProviderServer_StoreFailsDueToSize(t *testing.T) { +func TestInteractionBetweenClientAndProviderServer_StoreLongMessageSucceeds(t *testing.T) { ctx := context.Background() server := setupProviderServer(ctx, t) client := setupClient(ctx, t, server.Addr) @@ -44,7 +42,7 @@ func TestInteractionBetweenClientAndProviderServer_StoreFailsDueToSize(t *testin message := testhelpers.RandomizeSlice(make([]byte, RPCServerBodyLimit+1)) _, err := client.Store(ctx, message, 0) - require.Regexp(t, ".*Request Entity Too Large.*", err.Error()) + testhelpers.RequireImpl(t, err) } func setupProviderServer(ctx context.Context, t *testing.T) *http.Server { From 32a4565618c60a55c234792a043c53d9f2130ae6 Mon Sep 17 00:00:00 2001 From: Aman Sanghi <102982411+amsanghi@users.noreply.github.com> Date: Mon, 29 Sep 2025 00:27:18 +0530 Subject: [PATCH 33/56] System tests should use BOLD contracts by default (#3690) * System tests should use BOLD contracts by default * more * more * more * more * more --- system_tests/batch_poster_test.go | 3 --- system_tests/bold_l3_support_test.go | 2 +- system_tests/bold_new_challenge_test.go | 2 +- system_tests/common_test.go | 5 +++-- system_tests/estimation_test.go | 2 +- system_tests/fast_confirm_test.go | 4 ++-- system_tests/full_challenge_impl_test.go | 2 +- system_tests/gas_dim_log_a_common_test.go | 2 +- system_tests/program_test.go | 2 +- system_tests/staker_test.go | 4 ++-- 10 files changed, 13 insertions(+), 15 deletions(-) diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index d0ead6ad79..202d369238 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -535,7 +535,6 @@ func testBatchPosterDelayBuffer(t *testing.T, delayBufferEnabled bool) { builder := NewNodeBuilder(ctx). DefaultConfig(t, true). - WithBoldDeployment(). WithDelayBuffer(threshold) builder.L2Info.GenerateAccount("User2") builder.nodeConfig.BatchPoster.MaxDelay = time.Hour // set high max-delay so we can test the delay buffer @@ -606,7 +605,6 @@ func TestBatchPosterDelayBufferDontForceNonDelayedMessages(t *testing.T) { const threshold = 100 builder := NewNodeBuilder(ctx). DefaultConfig(t, true). - WithBoldDeployment(). WithDelayBuffer(threshold) builder.L2Info.GenerateAccount("User2") builder.nodeConfig.BatchPoster.MaxDelay = time.Hour // set high max-delay so we can test the delay buffer @@ -680,7 +678,6 @@ func TestBatchPosterWithDelayProofsAndBacklog(t *testing.T) { const threshold = 10 builder := NewNodeBuilder(ctx). DefaultConfig(t, true). - WithBoldDeployment(). WithDelayBuffer(threshold). WithL1ClientWrapper(t) cleanup := builder.Build(t) diff --git a/system_tests/bold_l3_support_test.go b/system_tests/bold_l3_support_test.go index b4604f5542..0f3ede61bb 100644 --- a/system_tests/bold_l3_support_test.go +++ b/system_tests/bold_l3_support_test.go @@ -37,7 +37,7 @@ func TestL3ChallengeProtocolBOLD(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithBoldDeployment() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) // Block validation requires db hash scheme. builder.execConfig.Caching.StateScheme = rawdb.HashScheme diff --git a/system_tests/bold_new_challenge_test.go b/system_tests/bold_new_challenge_test.go index 55e43a44d8..6a55dd0d44 100644 --- a/system_tests/bold_new_challenge_test.go +++ b/system_tests/bold_new_challenge_test.go @@ -134,7 +134,7 @@ func testChallengeProtocolBOLDVirtualBlocks(t *testing.T, wrongAtFirstVirtual bo ctx, cancel := context.WithCancel(context.Background()) defer cancel() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithBoldDeployment() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) // Block validation requires db hash scheme builder.RequireScheme(t, rawdb.HashScheme) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 19409457cd..eecbcf1e43 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -340,6 +340,7 @@ func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { // most used values across current tests are set here as default b.withL1 = withL1 b.parallelise = true + b.deployBold = true if withL1 { b.isSequencer = true b.nodeConfig = arbnode.ConfigDefaultL1Test() @@ -386,8 +387,8 @@ func (b *NodeBuilder) WithProdConfirmPeriodBlocks() *NodeBuilder { return b } -func (b *NodeBuilder) WithBoldDeployment() *NodeBuilder { - b.deployBold = true +func (b *NodeBuilder) WithPreBoldDeployment() *NodeBuilder { + b.deployBold = false return b } diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index e5e4b54800..a336f95aa0 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -330,7 +330,7 @@ func TestGasEstimationWithRPCGasLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment() cleanup := builder.Build(t) defer cleanup() diff --git a/system_tests/fast_confirm_test.go b/system_tests/fast_confirm_test.go index d0ed5df093..208ebcbad6 100644 --- a/system_tests/fast_confirm_test.go +++ b/system_tests/fast_confirm_test.go @@ -175,7 +175,7 @@ func setupFastConfirmation(ctx context.Context, t *testing.T) (*NodeBuilder, *le }() var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs - builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithProdConfirmPeriodBlocks().DontParalellise() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment().WithProdConfirmPeriodBlocks().DontParalellise() builder.L2Info = NewBlockChainTestInfo( t, types.NewArbitrumSigner(types.NewLondonSigner(builder.chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), @@ -345,7 +345,7 @@ func TestFastConfirmationWithSafe(t *testing.T) { var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs // Create a node with a large confirm period to ensure that the staker can't confirm without the fast confirmer. - builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithProdConfirmPeriodBlocks() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment().WithProdConfirmPeriodBlocks() builder.L2Info = NewBlockChainTestInfo( t, types.NewArbitrumSigner(types.NewLondonSigner(builder.chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 1fdaa7eb1e..c7fff32eff 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -236,7 +236,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall ctx, cancel := context.WithCancel(context.Background()) defer cancel() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true).DontParalellise() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment().DontParalellise() initialBalance := new(big.Int).Lsh(big.NewInt(1), 200) l1Info := builder.L1Info l1Info.GenerateGenesisAccount("deployer", initialBalance) diff --git a/system_tests/gas_dim_log_a_common_test.go b/system_tests/gas_dim_log_a_common_test.go index 5ae0d55725..ec8d0b50f1 100644 --- a/system_tests/gas_dim_log_a_common_test.go +++ b/system_tests/gas_dim_log_a_common_test.go @@ -89,7 +89,7 @@ func gasDimensionTestSetup(t *testing.T, expectRevert bool) ( ) { t.Helper() ctx, cancel = context.WithCancel(context.Background()) - builder = NewNodeBuilder(ctx).DefaultConfig(t, true) + builder = NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment() builder.execConfig.Caching.Archive = true if expectRevert { builder.execConfig.Sequencer.MaxRevertGasReject = 0 diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 921ce75913..9210545a7f 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -1685,7 +1685,7 @@ func setupProgramTest(t *testing.T, jit bool, builderOpts ...func(*NodeBuilder)) ) { ctx, cancel := context.WithCancel(context.Background()) - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment() for _, opt := range builderOpts { opt(builder) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index c78aad8f71..1e275c8726 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -75,7 +75,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) }() var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs - builder := NewNodeBuilder(ctx).DefaultConfig(t, true).DontParalellise() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment().DontParalellise() builder.L2Info = NewBlockChainTestInfo( t, types.NewArbitrumSigner(types.NewLondonSigner(builder.chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), @@ -501,7 +501,7 @@ func TestGetValidatorWalletContractWithDataposterOnlyUsedToCreateValidatorWallet ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true).WithPreBoldDeployment() cleanup := builder.Build(t) defer cleanup() From 5de0279b6f725e14d697bd2b853a6594e64c041e Mon Sep 17 00:00:00 2001 From: viktorking7 <140458814+viktorking7@users.noreply.github.com> Date: Sun, 28 Sep 2025 21:04:07 +0200 Subject: [PATCH 34/56] Update transaction_streamer.go (#3717) --- arbnode/transaction_streamer.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index fc0d420825..77a6b9f5f9 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -1063,17 +1063,19 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( if s.insertionMutex.TryLock() { return true } - lockTick := time.Tick(5 * time.Millisecond) - lockTimeout := time.After(50 * time.Millisecond) + lockTicker := time.NewTicker(5 * time.Millisecond) + defer lockTicker.Stop() + lockTimeout := time.NewTimer(50 * time.Millisecond) + defer lockTimeout.Stop() for { select { - case <-lockTimeout: + case <-lockTimeout.C: return false default: select { - case <-lockTimeout: + case <-lockTimeout.C: return false - case <-lockTick: + case <-lockTicker.C: if s.insertionMutex.TryLock() { return true } From bc621cce7d38072872badc70eb1fa7ff2cbd4bdf Mon Sep 17 00:00:00 2001 From: Mikhail Rogachev Date: Mon, 29 Sep 2025 07:47:18 +0200 Subject: [PATCH 35/56] arbos: Instrument multi-gas in setTrieSlots HostIO (#3713) * Instrument multi-gas in setTrieSlots HostIO * Fix expected storage access in TestMultigasStylus_Calls --------- Co-authored-by: Pepper Lebeck-Jobe --- arbos/programs/api.go | 12 ++++- go-ethereum | 2 +- system_tests/multigas_stylus_program_test.go | 53 +++++++++++++++++++- 3 files changed, 64 insertions(+), 3 deletions(-) diff --git a/arbos/programs/api.go b/arbos/programs/api.go index a70a5b855a..ffa55f3f1d 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -85,8 +85,17 @@ func newApiClosures( return WriteProtection } - cost := vm.WasmStateStoreCost(db, actingAddress, key, value) + costMultiGas := vm.WasmStateStoreCost(db, actingAddress, key, value) + cost := costMultiGas.SingleGas() if cost > *gasLeft { + // Account what is left as WASM computation MultiGas + if *gasLeft > 0 { + scope.Contract.UsedMultiGas.SaturatingIncrementInto( + multigas.ResourceKindWasmComputation, + *gasLeft, + ) + } + *gasLeft = 0 isOutOfGas = true if recording { @@ -95,6 +104,7 @@ func newApiClosures( break } *gasLeft -= cost + scope.Contract.UsedMultiGas.SaturatingAddInto(costMultiGas) db.SetState(actingAddress, key, value) } if isOutOfGas { diff --git a/go-ethereum b/go-ethereum index a230ce368c..56a5a60b09 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit a230ce368ceeb6eb09cd80455dd1ac39fe248a44 +Subproject commit 56a5a60b0905df75a24ae807e0c9fd84588777b6 diff --git a/system_tests/multigas_stylus_program_test.go b/system_tests/multigas_stylus_program_test.go index b5958c562a..d0c274639d 100644 --- a/system_tests/multigas_stylus_program_test.go +++ b/system_tests/multigas_stylus_program_test.go @@ -310,7 +310,7 @@ func TestMultigasStylus_Calls(t *testing.T) { storageVal := testhelpers.RandomHash() calldata = argsForMulticall(vm.CALL, storeAddr, nil, argsForStorageWrite(key, storageVal)) - expectedStorageAccess = params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929 + expectedStorageAccess = params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929 case vm.DELEGATECALL: calldata = argsForMulticall(vm.DELEGATECALL, callsAddr, nil, []byte{0}) @@ -347,3 +347,54 @@ func TestMultigasStylus_Calls(t *testing.T) { }) } } + +func TestMultigasStylus_StorageWrite(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.execConfig.ExposeMultiGas = true + cleanup := builder.Build(t) + defer cleanup() + + l2info := builder.L2Info + l2client := builder.L2.Client + owner := l2info.GetDefaultTransactOpts("Owner", ctx) + + storage := deployWasm(t, ctx, owner, l2client, rustFile("storage")) + + key := testhelpers.RandomHash() + val := testhelpers.RandomHash() + writeArgs := argsForStorageWrite(key, val) + + cases := []struct { + name string + gasLimit uint64 + expectOK bool + }{ + {"success", 1_000_000_000, true}, + {"out_of_gas", 1_500_000, false}, // above intrinsic cost, below storage create slot cost + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + tx := l2info.PrepareTxTo("Owner", &storage, tc.gasLimit, nil, writeArgs) + require.NoError(t, l2client.SendTransaction(ctx, tx)) + + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + if tc.expectOK { + require.NoError(t, err) + + // Expected multigas for create slot operation + require.Equal(t, receipt.GasUsed, receipt.MultiGasUsed.SingleGas()) + require.Equal(t, params.ColdSloadCostEIP2929, receipt.MultiGasUsed.Get(multigas.ResourceKindStorageAccess)) + require.Equal(t, params.SstoreSetGasEIP2200, receipt.MultiGasUsed.Get(multigas.ResourceKindStorageGrowth)) + } else { + require.Error(t, err) + receipt, err := l2client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) + require.Equal(t, receipt.GasUsed, receipt.MultiGasUsed.SingleGas()) + } + }) + } +} From eb092838d3c673484f3b66449a7d069f4ae154d5 Mon Sep 17 00:00:00 2001 From: Avory Date: Mon, 29 Sep 2025 08:47:34 +0300 Subject: [PATCH 36/56] Update blockchain.go (#3722) Co-authored-by: Pepper Lebeck-Jobe --- execution/gethexec/blockchain.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index 3111876e18..f746579203 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -152,11 +152,11 @@ func WriteOrTestGenblock(chainDb ethdb.Database, cacheConfig *core.BlockChainCon if blockNumber > 0 { prevHash = rawdb.ReadCanonicalHash(chainDb, blockNumber-1) if prevHash == EmptyHash { - return fmt.Errorf("block number %d not found in database", chainDb) + return fmt.Errorf("block number %d not found in database", blockNumber-1) } prevHeader := rawdb.ReadHeader(chainDb, prevHash, blockNumber-1) if prevHeader == nil { - return fmt.Errorf("block header for block %d not found in database", chainDb) + return fmt.Errorf("block header for block %d not found in database", blockNumber-1) } timestamp = prevHeader.Time } From 3cd69a8fbac4942d9f0340154cfe8d3ee4d84905 Mon Sep 17 00:00:00 2001 From: Pepper Lebeck-Jobe Date: Mon, 29 Sep 2025 06:58:12 +0100 Subject: [PATCH 37/56] Interpolate the RUN_URL in Slack notification (#3723) The RUN_URL needs to be interpolated by the github actions framework. This doesn't happen if without the double braces. Also, the action expects valid JSON input, so even more braces are applied to the payload argument. Fixes: NIT-3924 --- .github/workflows/nightly-ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml index e6eb2f4738..e3e5c97ba9 100644 --- a/.github/workflows/nightly-ci.yml +++ b/.github/workflows/nightly-ci.yml @@ -122,5 +122,7 @@ jobs: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} payload: | - "channel": "${{ secrets.SLACK_CHANNEL_ID }}", - "text": "⚠️ CI job failed! ${RUN_URL}", + { + "channel": "${{ secrets.SLACK_CHANNEL_ID }}", + "text": "⚠️ CI job failed! ${{ env.RUN_URL }}", + } From cf87312f3dccdf0ec22de427c3b9173f80a4d03f Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Mon, 29 Sep 2025 10:25:24 +0300 Subject: [PATCH 38/56] fix: use Counter for nonceFailureCache overflow metric (#3648) * Update sequencer.go * Update restful_server.go * Update dasRpcServer.go * Update dasRpcClient.go * Update validation_client.go * Update dasRpcClient.go * Update dasRpcServer.go * Update restful_server.go * Update validation_client.go * lint * ci fix --------- Co-authored-by: Pepper Lebeck-Jobe Co-authored-by: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> --- daprovider/das/dasRpcClient.go | 18 ++++++------ daprovider/das/dasRpcServer.go | 40 +++++++++++++-------------- daprovider/das/restful_server.go | 18 ++++++------ execution/gethexec/sequencer.go | 2 +- validator/client/validation_client.go | 4 +-- 5 files changed, 41 insertions(+), 41 deletions(-) diff --git a/daprovider/das/dasRpcClient.go b/daprovider/das/dasRpcClient.go index ff93699d47..f15246e75a 100644 --- a/daprovider/das/dasRpcClient.go +++ b/daprovider/das/dasRpcClient.go @@ -23,11 +23,11 @@ import ( ) var ( - rpcClientStoreRequestGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/requests", nil) - rpcClientStoreSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/success", nil) - rpcClientStoreFailureGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/failure", nil) - rpcClientStoreStoredBytesGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/bytes", nil) - rpcClientStoreDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rpcclient/store/duration", nil, metrics.NewBoundedHistogramSample()) + rpcClientStoreRequestCounter = metrics.NewRegisteredCounter("arb/das/rpcclient/store/requests", nil) + rpcClientStoreSuccessCounter = metrics.NewRegisteredCounter("arb/das/rpcclient/store/success", nil) + rpcClientStoreFailureCounter = metrics.NewRegisteredCounter("arb/das/rpcclient/store/failure", nil) + rpcClientStoreStoredBytesCounter = metrics.NewRegisteredCounter("arb/das/rpcclient/store/bytes", nil) + rpcClientStoreDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rpcclient/store/duration", nil, metrics.NewBoundedHistogramSample()) ) // lint:require-exhaustive-initialization @@ -77,14 +77,14 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu } func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*dasutil.DataAvailabilityCertificate, error) { - rpcClientStoreRequestGauge.Inc(1) + rpcClientStoreRequestCounter.Inc(1) start := time.Now() success := false defer func() { if success { - rpcClientStoreSuccessGauge.Inc(1) + rpcClientStoreSuccessCounter.Inc(1) } else { - rpcClientStoreFailureGauge.Inc(1) + rpcClientStoreFailureCounter.Inc(1) } rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() @@ -108,7 +108,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 return nil, err } - rpcClientStoreStoredBytesGauge.Inc(int64(len(message))) + rpcClientStoreStoredBytesCounter.Inc(int64(len(message))) success = true return &dasutil.DataAvailabilityCertificate{ diff --git a/daprovider/das/dasRpcServer.go b/daprovider/das/dasRpcServer.go index 3fbc7c2916..740ae9f3f6 100644 --- a/daprovider/das/dasRpcServer.go +++ b/daprovider/das/dasRpcServer.go @@ -24,14 +24,14 @@ import ( ) var ( - rpcStoreRequestGauge = metrics.NewRegisteredGauge("arb/das/rpc/store/requests", nil) - rpcStoreSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpc/store/success", nil) - rpcStoreFailureGauge = metrics.NewRegisteredGauge("arb/das/rpc/store/failure", nil) - rpcStoreStoredBytesGauge = metrics.NewRegisteredGauge("arb/das/rpc/store/bytes", nil) - rpcStoreDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rpc/store/duration", nil, metrics.NewBoundedHistogramSample()) - - rpcSendChunkSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpc/sendchunk/success", nil) - rpcSendChunkFailureGauge = metrics.NewRegisteredGauge("arb/das/rpc/sendchunk/failure", nil) + rpcStoreRequestCounter = metrics.NewRegisteredCounter("arb/das/rpc/store/requests", nil) + rpcStoreSuccessCounter = metrics.NewRegisteredCounter("arb/das/rpc/store/success", nil) + rpcStoreFailureCounter = metrics.NewRegisteredCounter("arb/das/rpc/store/failure", nil) + rpcStoreStoredBytesCounter = metrics.NewRegisteredCounter("arb/das/rpc/store/bytes", nil) + rpcStoreDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rpc/store/duration", nil, metrics.NewBoundedHistogramSample()) + + rpcSendChunkSuccessCounter = metrics.NewRegisteredCounter("arb/das/rpc/sendchunk/success", nil) + rpcSendChunkFailureCounter = metrics.NewRegisteredCounter("arb/das/rpc/sendchunk/failure", nil) ) const ( @@ -79,7 +79,7 @@ func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpc daHealthChecker: daHealthChecker, signatureVerifier: signatureVerifier, dataStreamReceiver: data_streaming.NewDataStreamReceiver(dataStreamPayloadVerifier, defaultMaxPendingMessages, defaultMessageCollectionExpiry, func(id data_streaming.MessageId) { - rpcStoreFailureGauge.Inc(1) + rpcStoreFailureCounter.Inc(1) }), }) if err != nil { @@ -121,14 +121,14 @@ type StoreResult struct { func (s *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { // #nosec G115 log.Trace("dasRpc.DASRPCServer.Store", "message", pretty.FirstFewBytes(message), "message length", len(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) - rpcStoreRequestGauge.Inc(1) + rpcStoreRequestCounter.Inc(1) start := time.Now() success := false defer func() { if success { - rpcStoreSuccessGauge.Inc(1) + rpcStoreSuccessCounter.Inc(1) } else { - rpcStoreFailureGauge.Inc(1) + rpcStoreFailureCounter.Inc(1) } rpcStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() @@ -141,7 +141,7 @@ func (s *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout if err != nil { return nil, err } - rpcStoreStoredBytesGauge.Inc(int64(len(message))) + rpcStoreStoredBytesCounter.Inc(int64(len(message))) success = true return &StoreResult{ KeysetHash: cert.KeysetHash[:], @@ -159,11 +159,11 @@ var ( ) func (s *DASRPCServer) StartChunkedStore(ctx context.Context, timestamp, nChunks, chunkSize, totalSize, timeout hexutil.Uint64, sig hexutil.Bytes) (*data_streaming.StartStreamingResult, error) { - rpcStoreRequestGauge.Inc(1) + rpcStoreRequestCounter.Inc(1) failed := true defer func() { if failed { - rpcStoreFailureGauge.Inc(1) + rpcStoreFailureCounter.Inc(1) } }() @@ -180,9 +180,9 @@ func (s *DASRPCServer) SendChunk(ctx context.Context, messageId, chunkId hexutil success := false defer func() { if success { - rpcSendChunkSuccessGauge.Inc(1) + rpcSendChunkSuccessCounter.Inc(1) } else { - rpcSendChunkFailureGauge.Inc(1) + rpcSendChunkFailureCounter.Inc(1) } }() @@ -204,16 +204,16 @@ func (s *DASRPCServer) CommitChunkedStore(ctx context.Context, messageId hexutil success := false defer func() { if success { - rpcStoreSuccessGauge.Inc(1) + rpcStoreSuccessCounter.Inc(1) } else { - rpcStoreFailureGauge.Inc(1) + rpcStoreFailureCounter.Inc(1) } rpcStoreDurationHistogram.Update(time.Since(startTime).Nanoseconds()) }() if err != nil { return nil, err } - rpcStoreStoredBytesGauge.Inc(int64(len(message))) + rpcStoreStoredBytesCounter.Inc(int64(len(message))) success = true return &StoreResult{ KeysetHash: cert.KeysetHash[:], diff --git a/daprovider/das/restful_server.go b/daprovider/das/restful_server.go index f907b007dd..cfda7fb501 100644 --- a/daprovider/das/restful_server.go +++ b/daprovider/das/restful_server.go @@ -24,11 +24,11 @@ import ( ) var ( - restGetByHashRequestGauge = metrics.NewRegisteredGauge("arb/das/rest/getbyhash/requests", nil) - restGetByHashSuccessGauge = metrics.NewRegisteredGauge("arb/das/rest/getbyhash/success", nil) - restGetByHashFailureGauge = metrics.NewRegisteredGauge("arb/das/rest/getbyhash/failure", nil) - restGetByHashReturnedBytesGauge = metrics.NewRegisteredGauge("arb/das/rest/getbyhash/bytes", nil) - restGetByHashDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rest/getbyhash/duration", nil, metrics.NewBoundedHistogramSample()) + restGetByHashRequestCounter = metrics.NewRegisteredCounter("arb/das/rest/getbyhash/requests", nil) + restGetByHashSuccessCounter = metrics.NewRegisteredCounter("arb/das/rest/getbyhash/success", nil) + restGetByHashFailureCounter = metrics.NewRegisteredCounter("arb/das/rest/getbyhash/failure", nil) + restGetByHashReturnedBytesCounter = metrics.NewRegisteredCounter("arb/das/rest/getbyhash/bytes", nil) + restGetByHashDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rest/getbyhash/duration", nil, metrics.NewBoundedHistogramSample()) ) type RestfulDasServer struct { @@ -140,14 +140,14 @@ func (rds *RestfulDasServer) ExpirationPolicyHandler(w http.ResponseWriter, r *h func (rds *RestfulDasServer) GetByHashHandler(w http.ResponseWriter, r *http.Request, requestPath string) { log.Debug("Got request", "requestPath", requestPath) - restGetByHashRequestGauge.Inc(1) + restGetByHashRequestCounter.Inc(1) start := time.Now() success := false defer func() { if success { - restGetByHashSuccessGauge.Inc(1) + restGetByHashSuccessCounter.Inc(1) } else { - restGetByHashFailureGauge.Inc(1) + restGetByHashFailureCounter.Inc(1) } restGetByHashDurationHistogram.Update(time.Since(start).Nanoseconds()) }() @@ -176,7 +176,7 @@ func (rds *RestfulDasServer) GetByHashHandler(w http.ResponseWriter, r *http.Req base64.StdEncoding.Encode(encodedResponseData, responseData) var response RestfulDasServerResponse response.Data = string(encodedResponseData) - restGetByHashReturnedBytesGauge.Inc(int64(len(response.Data))) + restGetByHashReturnedBytesCounter.Inc(int64(len(response.Data))) err = json.NewEncoder(w).Encode(response) if err != nil { diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index aacecd55e6..80a8e33015 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -50,7 +50,7 @@ var ( nonceCacheRejectedCounter = metrics.NewRegisteredCounter("arb/sequencer/noncecache/rejected", nil) nonceCacheClearedCounter = metrics.NewRegisteredCounter("arb/sequencer/noncecache/cleared", nil) nonceFailureCacheSizeGauge = metrics.NewRegisteredGauge("arb/sequencer/noncefailurecache/size", nil) - nonceFailureCacheOverflowCounter = metrics.NewRegisteredGauge("arb/sequencer/noncefailurecache/overflow", nil) + nonceFailureCacheOverflowCounter = metrics.NewRegisteredCounter("arb/sequencer/noncefailurecache/overflow", nil) blockCreationTimer = metrics.NewRegisteredHistogram("arb/sequencer/block/creation", nil, metrics.NewBoundedHistogramSample()) successfulBlocksCounter = metrics.NewRegisteredCounter("arb/sequencer/block/successful", nil) conditionalTxRejectedBySequencerCounter = metrics.NewRegisteredCounter("arb/sequencer/conditionaltx/rejected", nil) diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index 207a092a0e..b6d5a50271 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -26,7 +26,7 @@ import ( "github.com/offchainlabs/nitro/validator/server_common" ) -var executionNodeOfflineGauge = metrics.NewRegisteredGauge("arb/state_provider/execution_node_offline", nil) +var executionNodeOfflineCounter = metrics.NewRegisteredCounter("arb/state_provider/execution_node_offline", nil) type ValidationClient struct { stopwaiter.StopWaiter @@ -238,7 +238,7 @@ func ctxWithCheckAlive(ctxIn context.Context, execRun validator.ExecutionRun) (c ctxCheckAliveWithTimeout, cancelCheckAliveWithTimeout := context.WithTimeout(ctx, 5*time.Second) err := execRun.CheckAlive(ctxCheckAliveWithTimeout) if err != nil { - executionNodeOfflineGauge.Inc(1) + executionNodeOfflineCounter.Inc(1) cancelCheckAliveWithTimeout() return } From 2037191b6ce4d5d0f405f4e2afc67e57b1c9e8a8 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Mon, 29 Sep 2025 09:44:48 +0200 Subject: [PATCH 39/56] Update rest_server_list.go (#3715) Co-authored-by: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> --- daprovider/das/rest_server_list.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/daprovider/das/rest_server_list.go b/daprovider/das/rest_server_list.go index c88f9f625c..110f1b3807 100644 --- a/daprovider/das/rest_server_list.go +++ b/daprovider/das/rest_server_list.go @@ -61,6 +61,8 @@ func restfulServerURLsFromList( if err != nil { return nil, err } + // Ensure response body is closed to avoid leaking connections and file descriptors + defer resp.Body.Close() if resp.StatusCode != 200 { return nil, fmt.Errorf("received error response (%d) fetching online-url-list at %s", resp.StatusCode, listUrl) } From 0f97322c2811deacc39ca13c1c66c47e9d79bafd Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 29 Sep 2025 10:26:42 +0200 Subject: [PATCH 40/56] Resgister DAS reader with both flag variants --- daprovider/factory/factory.go | 6 +++++- daprovider/registry.go | 9 +++++++-- daprovider/server/das_migration.go | 6 +++++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go index baff1b4357..c581786069 100644 --- a/daprovider/factory/factory.go +++ b/daprovider/factory/factory.go @@ -88,7 +88,11 @@ func NewDAProviderFactory( // AnyTrust Factory Implementation func (f *AnyTrustFactory) GetSupportedHeaderBytes() []byte { - return []byte{daprovider.DASMessageHeaderFlag} + // Support both DAS without tree flag (0x80) and with tree flag (0x88) + return []byte{ + daprovider.DASMessageHeaderFlag, + daprovider.DASMessageHeaderFlag | daprovider.TreeDASMessageHeaderFlag, + } } func (f *AnyTrustFactory) ValidateConfig() error { diff --git a/daprovider/registry.go b/daprovider/registry.go index 97957d79f4..6af2335c65 100644 --- a/daprovider/registry.go +++ b/daprovider/registry.go @@ -56,9 +56,14 @@ func (r *ReaderRegistry) SupportedHeaderBytes() []byte { return bytes } -// SetupDASReader registers a DAS reader for the DAS header byte +// SetupDASReader registers a DAS reader for the DAS header bytes (with and without Tree flag) func (r *ReaderRegistry) SetupDASReader(reader Reader) error { - return r.Register(DASMessageHeaderFlag, reader) + // Register for DAS without tree flag (0x80) + if err := r.Register(DASMessageHeaderFlag, reader); err != nil { + return err + } + // Register for DAS with tree flag (0x88 = 0x80 | 0x08) + return r.Register(DASMessageHeaderFlag|TreeDASMessageHeaderFlag, reader) } // SetupBlobReader registers a blob reader for the blob header byte diff --git a/daprovider/server/das_migration.go b/daprovider/server/das_migration.go index a3290047bd..f17a0f34d6 100644 --- a/daprovider/server/das_migration.go +++ b/daprovider/server/das_migration.go @@ -130,13 +130,17 @@ func NewServerForDAS( } // Create the generic DA provider server with DAS components + // Support both DAS without tree flag (0x80) and with tree flag (0x88) server, err := NewServerWithDAPProvider( ctx, &serverConfig, reader, writer, nil, // DAS doesn't use a validator - []byte{daprovider.DASMessageHeaderFlag}, + []byte{ + daprovider.DASMessageHeaderFlag, + daprovider.DASMessageHeaderFlag | daprovider.TreeDASMessageHeaderFlag, + }, ) if err != nil { // Clean up lifecycle manager if server creation fails From 57a5ec448cc40c107b9d3bcf9f2b84576cab2a1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Mon, 29 Sep 2025 12:27:54 +0200 Subject: [PATCH 41/56] Fix body limit --- arbnode/node.go | 4 ++-- daprovider/server/provider_server.go | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index ccaee73736..4a6b93c1a6 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -600,7 +600,7 @@ func getDAProvider( daClient, err = daclient.NewClient( ctx, func() *rpcclient.ClientConfig { return &config.DA.ExternalProvider.RPC }, - dapserver.DefaultServerConfig.RPCServerBodyLimit, + dapserver.DefaultBodyLimit, data_streaming.NoopPayloadSigner(), ) if err != nil { @@ -723,7 +723,7 @@ func getDAProvider( daClient, err = daclient.NewClient( ctx, func() *rpcclient.ClientConfig { return &clientConfig }, - serverConfig.RPCServerBodyLimit, + dapserver.DefaultBodyLimit, data_streaming.NoopPayloadSigner(), ) if err != nil { diff --git a/daprovider/server/provider_server.go b/daprovider/server/provider_server.go index a182e0d83c..c44178fb77 100644 --- a/daprovider/server/provider_server.go +++ b/daprovider/server/provider_server.go @@ -28,6 +28,8 @@ import ( "github.com/offchainlabs/nitro/daprovider/server_api" ) +const DefaultBodyLimit = 5 * 1024 * 1024 // Taken from go-ethereum http.defaultBodyLimit + // lint:require-exhaustive-initialization type Server struct { reader daprovider.Reader @@ -53,7 +55,7 @@ var DefaultServerConfig = ServerConfig{ JWTSecret: "", EnableDAWriter: false, ServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, - RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, + RPCServerBodyLimit: DefaultBodyLimit, } func ServerConfigAddOptions(prefix string, f *flag.FlagSet) { From 138e36543f731ec4d8514eaa761bd65ab77e7e95 Mon Sep 17 00:00:00 2001 From: viktorking7 <140458814+viktorking7@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:22:20 +0200 Subject: [PATCH 42/56] Update blob_client.go (#3721) Co-authored-by: Ganesh Vanahalli --- util/headerreader/blob_client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go index b8881e2ec9..76b1899ecb 100644 --- a/util/headerreader/blob_client.go +++ b/util/headerreader/blob_client.go @@ -72,7 +72,7 @@ func NewBlobClient(config BlobClientConfig, ec *ethclient.Client) (*BlobClient, } var secondaryBeaconUrl *url.URL if config.SecondaryBeaconUrl != "" { - if secondaryBeaconUrl, err = url.Parse(config.BeaconUrl); err != nil { + if secondaryBeaconUrl, err = url.Parse(config.SecondaryBeaconUrl); err != nil { return nil, fmt.Errorf("failed to parse secondary beacon chain URL: %w", err) } } From 26f406cc4ffa5e2e75cf32d1af53cc12f154d834 Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Mon, 29 Sep 2025 19:21:43 +0300 Subject: [PATCH 43/56] fix(solimpl): correct UpperChild zero-check to use UpperChildId (#3703) Co-authored-by: Pepper Lebeck-Jobe Co-authored-by: Raul Jordan --- .../sol-implementation/edge_challenge_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold/chain-abstraction/sol-implementation/edge_challenge_manager.go b/bold/chain-abstraction/sol-implementation/edge_challenge_manager.go index 74eaa39fdf..487d02bae2 100644 --- a/bold/chain-abstraction/sol-implementation/edge_challenge_manager.go +++ b/bold/chain-abstraction/sol-implementation/edge_challenge_manager.go @@ -167,7 +167,7 @@ func (e *specEdge) UpperChild(ctx context.Context) (option.Option[protocol.EdgeI if err != nil { return option.None[protocol.EdgeId](), err } - if edge.LowerChildId == ([32]byte{}) { + if edge.UpperChildId == ([32]byte{}) { return option.None[protocol.EdgeId](), nil } return option.Some(protocol.EdgeId{ From 8453672a61645ff371c0253a61224781665171c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Tue, 30 Sep 2025 12:29:02 +0200 Subject: [PATCH 44/56] use proper client --- daprovider/daclient/daclient.go | 6 ++++-- daprovider/das/data_streaming/protocol_test.go | 8 +++++++- daprovider/das/data_streaming/sender.go | 13 ++++--------- daprovider/das/rpc_client.go | 13 ++++++++++++- 4 files changed, 27 insertions(+), 13 deletions(-) diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 42d60e44e5..1ce85a07b8 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -50,10 +50,12 @@ func ClientConfigAddOptions(prefix string, f *pflag.FlagSet) { } func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher, httpBodySizeLimit int, payloadSigner *data_streaming.PayloadSigner) (*Client, error) { + rpcClient := rpcclient.NewRpcClient(config, nil) + dataStreamer, err := data_streaming.NewDataStreamer[server_api.StoreResult]( - config().URL, httpBodySizeLimit, payloadSigner, + rpcClient, data_streaming.DataStreamingRPCMethods{ StartStream: "daprovider_startChunkedStore", StreamChunk: "daprovider_sendChunk", @@ -64,7 +66,7 @@ func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher, httpBo return nil, err } - client := &Client{rpcclient.NewRpcClient(config, nil), dataStreamer} + client := &Client{rpcClient, dataStreamer} if err = client.Start(ctx); err != nil { return nil, fmt.Errorf("error starting daprovider client: %w", err) } diff --git a/daprovider/das/data_streaming/protocol_test.go b/daprovider/das/data_streaming/protocol_test.go index 6b035eb080..f8d1593bde 100644 --- a/daprovider/das/data_streaming/protocol_test.go +++ b/daprovider/das/data_streaming/protocol_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -54,7 +55,12 @@ func test(t *testing.T, messageSizeMean, messageSizeStdDev, concurrency int) { signer, verifier := prepareCrypto(t) serverUrl := launchServer(t, ctx, verifier) - streamer, err := NewDataStreamer[ProtocolResult]("http://"+serverUrl, maxStoreChunkBodySize, DefaultPayloadSigner(signer), rpcMethods) + clientConfig := func() *rpcclient.ClientConfig { return &rpcclient.ClientConfig{URL: "http://" + serverUrl} } + rpcClient := rpcclient.NewRpcClient(clientConfig, nil) + err := rpcClient.Start(ctx) + testhelpers.RequireImpl(t, err) + + streamer, err := NewDataStreamer[ProtocolResult](maxStoreChunkBodySize, DefaultPayloadSigner(signer), rpcClient, rpcMethods) testhelpers.RequireImpl(t, err) var wg sync.WaitGroup diff --git a/daprovider/das/data_streaming/sender.go b/daprovider/das/data_streaming/sender.go index cf31b16770..0cbfa4c9e1 100644 --- a/daprovider/das/data_streaming/sender.go +++ b/daprovider/das/data_streaming/sender.go @@ -12,14 +12,15 @@ import ( "golang.org/x/sync/errgroup" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/rpc" + + "github.com/offchainlabs/nitro/util/rpcclient" ) // DataStreamer allows sending arbitrarily big payloads with JSON RPC. It follows a simple chunk-based protocol. // lint:require-exhaustive-initialization type DataStreamer[Result any] struct { // rpcClient is the underlying client for making RPC calls to the receiver. - rpcClient *rpc.Client + rpcClient *rpcclient.RpcClient // chunkSize is the preconfigured size limit on a single data chunk to be sent. chunkSize uint64 // dataSigner is used for sender authentication during the protocol. @@ -37,17 +38,11 @@ type DataStreamingRPCMethods struct { // NewDataStreamer creates a new DataStreamer instance. // // Requirements: -// - connecting to `url` must succeed; // - `maxStoreChunkBodySize` must be big enough (it should cover `sendChunkJSONBoilerplate` and leave some space for the data); // - `dataSigner` must not be nil; // // otherwise an `error` is returned. -func NewDataStreamer[T any](url string, maxStoreChunkBodySize int, dataSigner *PayloadSigner, rpcMethods DataStreamingRPCMethods) (*DataStreamer[T], error) { - rpcClient, err := rpc.Dial(url) - if err != nil { - return nil, err - } - +func NewDataStreamer[T any](maxStoreChunkBodySize int, dataSigner *PayloadSigner, rpcClient *rpcclient.RpcClient, rpcMethods DataStreamingRPCMethods) (*DataStreamer[T], error) { chunkSize, err := calculateEffectiveChunkSize(maxStoreChunkBodySize, rpcMethods) if err != nil { return nil, err diff --git a/daprovider/das/rpc_client.go b/daprovider/das/rpc_client.go index ff93699d47..7c9c5d28ab 100644 --- a/daprovider/das/rpc_client.go +++ b/daprovider/das/rpc_client.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/daprovider/das/dasutil" "github.com/offchainlabs/nitro/daprovider/das/data_streaming" "github.com/offchainlabs/nitro/util/pretty" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" ) @@ -62,7 +63,17 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu payloadSigner := data_streaming.CustomPayloadSigner(func(bytes []byte, extras ...uint64) ([]byte, error) { return applyDasSigner(signer, bytes, extras...) }) - dataStreamer, err = data_streaming.NewDataStreamer[StoreResult](target, maxStoreChunkBodySize, payloadSigner, rpcMethods) + rpcClient := rpcclient.NewRpcClient(func() *rpcclient.ClientConfig { + config := rpcclient.DefaultClientConfig + config.URL = target + return &config + }, nil) + err := rpcClient.Start(context.Background()) + if err != nil { + return nil, err + } + + dataStreamer, err = data_streaming.NewDataStreamer[StoreResult](maxStoreChunkBodySize, payloadSigner, rpcClient, rpcMethods) if err != nil { return nil, err } From 95aafbf48081e14717094aa6eccc0f2caa8fdabb Mon Sep 17 00:00:00 2001 From: Mikhail Rogachev Date: Tue, 30 Sep 2025 12:42:00 +0200 Subject: [PATCH 45/56] Add multi-dimensional gas metrics (#3726) * Add multi-dimensional gas metrics * Add total multigas metrics * Change multigas metrics from histograms to counters --------- Co-authored-by: Pepper Lebeck-Jobe --- execution/gethexec/executionengine.go | 40 +++++++++++++++++++++------ go-ethereum | 2 +- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index d80b5ab10c..0ce2b70b4e 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -23,6 +23,7 @@ import ( "path" "runtime/pprof" "runtime/trace" + "strings" "sync" "sync/atomic" "testing" @@ -30,6 +31,7 @@ import ( "github.com/google/uuid" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -53,14 +55,16 @@ import ( ) var ( - l1GasPriceEstimateGauge = metrics.NewRegisteredGauge("arb/l1gasprice/estimate", nil) - baseFeeGauge = metrics.NewRegisteredGauge("arb/block/basefee", nil) - blockGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/gasused", nil, metrics.NewBoundedHistogramSample()) - txCountHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/count", nil, metrics.NewBoundedHistogramSample()) - txGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/gasused", nil, metrics.NewBoundedHistogramSample()) - gasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/gas_used", nil) - blockExecutionTimer = metrics.NewRegisteredHistogram("arb/block/execution", nil, metrics.NewBoundedHistogramSample()) - blockWriteToDbTimer = metrics.NewRegisteredHistogram("arb/block/writetodb", nil, metrics.NewBoundedHistogramSample()) + l1GasPriceEstimateGauge = metrics.NewRegisteredGauge("arb/l1gasprice/estimate", nil) + baseFeeGauge = metrics.NewRegisteredGauge("arb/block/basefee", nil) + blockGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/gasused", nil, metrics.NewBoundedHistogramSample()) + txCountHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/count", nil, metrics.NewBoundedHistogramSample()) + txGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/gasused", nil, metrics.NewBoundedHistogramSample()) + gasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/gas_used", nil) + multiGasUsedSinceStartupCounters = make([]*metrics.Counter, multigas.NumResourceKind) + totalMultiGasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/multigas_used/total", nil) + blockExecutionTimer = metrics.NewRegisteredHistogram("arb/block/execution", nil, metrics.NewBoundedHistogramSample()) + blockWriteToDbTimer = metrics.NewRegisteredHistogram("arb/block/writetodb", nil, metrics.NewBoundedHistogramSample()) ) var ExecutionEngineBlockCreationStopped = errors.New("block creation stopped in execution engine") @@ -118,6 +122,13 @@ func NewL1PriceData() *L1PriceData { } } +func init() { + for dimension := multigas.ResourceKind(0); dimension < multigas.NumResourceKind; dimension++ { + metricName := fmt.Sprintf("arb/multigas_used/%v", strings.ToLower(dimension.String())) + multiGasUsedSinceStartupCounters[dimension] = metrics.NewRegisteredCounter(metricName, nil) + } +} + func NewExecutionEngine(bc *core.BlockChain, syncTillBlock uint64, exposeMultiGas bool) (*ExecutionEngine, error) { return &ExecutionEngine{ bc: bc, @@ -825,9 +836,20 @@ func (s *ExecutionEngine) appendBlock(block *types.Block, statedb *state.StateDB txCountHistogram.Update(int64(len(block.Transactions()) - 1)) var blockGasused uint64 for i := 1; i < len(receipts); i++ { - val := arbmath.SaturatingUSub(receipts[i].GasUsed, receipts[i].GasUsedForL1) + receipt := receipts[i] + val := arbmath.SaturatingUSub(receipt.GasUsed, receipt.GasUsedForL1) txGasUsedHistogram.Update(int64(val)) blockGasused += val + + if s.exposeMultiGas { + for kind := range multiGasUsedSinceStartupCounters { + amount := receipt.MultiGasUsed.Get(multigas.ResourceKind(kind)) + if amount > 0 { + multiGasUsedSinceStartupCounters[kind].Inc(int64(amount)) + } + } + totalMultiGasUsedSinceStartupCounter.Inc(int64(receipt.MultiGasUsed.SingleGas())) + } } blockGasUsedHistogram.Update(int64(blockGasused)) gasUsedSinceStartupCounter.Inc(int64(blockGasused)) diff --git a/go-ethereum b/go-ethereum index 56a5a60b09..49227c2f18 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 56a5a60b0905df75a24ae807e0c9fd84588777b6 +Subproject commit 49227c2f18ceb00fec0cd9f11fc15ce72072efdd From 9d4ce596b085e9a371792317c006eae559803c54 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul <8294320+gligneul@users.noreply.github.com> Date: Tue, 30 Sep 2025 10:48:46 -0300 Subject: [PATCH 46/56] Instrument multi-gas in precompiles (#3729) * Instrument multi-gas in precompiles Close NIT-1557 * Bump go-ethereum * Change balance multi-gas dimension * Test for context burn, burned and gas left * Fix lint warning --- arbos/burn/burn.go | 15 +++-- arbos/programs/native.go | 9 ++- arbos/programs/params.go | 3 +- arbos/programs/programs.go | 2 +- arbos/programs/wasm.go | 7 +- arbos/retryables/retryable.go | 3 +- arbos/storage/storage.go | 13 ++-- execution/nodeInterface/virtual-contracts.go | 6 +- gethhook/geth-hook.go | 3 +- go-ethereum | 2 +- precompiles/ArbInfo.go | 8 ++- precompiles/ArbNativeTokenManager.go | 5 +- precompiles/ArbRetryableTx.go | 13 ++-- precompiles/ArbRetryableTx_test.go | 2 +- precompiles/ArbWasm.go | 3 +- precompiles/ArbosTest.go | 4 +- precompiles/context.go | 19 +++--- precompiles/context_test.go | 70 ++++++++++++++++++++ precompiles/precompile.go | 60 +++++++++-------- precompiles/precompile_test.go | 2 +- precompiles/wrapper.go | 21 +++--- 21 files changed, 184 insertions(+), 86 deletions(-) create mode 100644 precompiles/context_test.go diff --git a/arbos/burn/burn.go b/arbos/burn/burn.go index fd10635578..439c0448ee 100644 --- a/arbos/burn/burn.go +++ b/arbos/burn/burn.go @@ -6,15 +6,16 @@ package burn import ( "fmt" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/util" ) type Burner interface { - Burn(amount uint64) error + Burn(kind multigas.ResourceKind, amount uint64) error Burned() uint64 - GasLeft() *uint64 // `SystemBurner`s panic (no notion of GasLeft) + GasLeft() uint64 // `SystemBurner`s panic (no notion of GasLeft) BurnOut() error Restrict(err error) HandleError(err error) error @@ -23,7 +24,7 @@ type Burner interface { } type SystemBurner struct { - gasBurnt uint64 + gasBurnt multigas.MultiGas tracingInfo *util.TracingInfo readOnly bool } @@ -35,20 +36,20 @@ func NewSystemBurner(tracingInfo *util.TracingInfo, readOnly bool) *SystemBurner } } -func (burner *SystemBurner) Burn(amount uint64) error { - burner.gasBurnt += amount +func (burner *SystemBurner) Burn(kind multigas.ResourceKind, amount uint64) error { + burner.gasBurnt.SaturatingIncrementInto(kind, amount) return nil } func (burner *SystemBurner) Burned() uint64 { - return burner.gasBurnt + return burner.gasBurnt.SingleGas() } func (burner *SystemBurner) BurnOut() error { panic("called BurnOut on a system burner") } -func (burner *SystemBurner) GasLeft() *uint64 { +func (burner *SystemBurner) GasLeft() uint64 { panic("called GasLeft on a system burner") } diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 059d3819bc..f00e2d6873 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -24,6 +24,7 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -75,7 +76,13 @@ func activateProgram( runCtx *core.MessageRunContext, ) (*activationInfo, error) { moduleActivationMandatory := true - info, asmMap, err := activateProgramInternal(program, codehash, wasm, page_limit, stylusVersion, arbosVersionForGas, debug, burner.GasLeft(), runCtx.WasmTargets(), moduleActivationMandatory) + suppliedGas := burner.GasLeft() + gasLeft := suppliedGas + info, asmMap, err := activateProgramInternal(program, codehash, wasm, page_limit, stylusVersion, arbosVersionForGas, debug, &gasLeft, runCtx.WasmTargets(), moduleActivationMandatory) + if gasLeft < suppliedGas { + // Ignore the out-of-gas error because we want to return the error above + burner.Burn(multigas.ResourceKindComputation, suppliedGas-gasLeft) //nolint:errcheck + } if err != nil { return nil, err } diff --git a/arbos/programs/params.go b/arbos/programs/params.go index d80f04e428..3c42094da4 100644 --- a/arbos/programs/params.go +++ b/arbos/programs/params.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -65,7 +66,7 @@ func (p Programs) Params() (*StylusParams, error) { sto := p.backingStorage.OpenCachedSubStorage(paramsKey) // assume reads are warm due to the frequency of access - if err := sto.Burner().Burn(1 * params.WarmStorageReadCostEIP2929); err != nil { + if err := sto.Burner().Burn(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929); err != nil { return &StylusParams{}, err } diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 28a2630475..b842bee8ab 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -451,7 +451,7 @@ func (p Programs) SetProgramCached( } // pay to cache the program, or to re-cache in case of upcoming revert - if err := p.programs.Burner().Burn(uint64(program.initCost)); err != nil { + if err := p.programs.Burner().Burn(multigas.ResourceKindStorageAccess, uint64(program.initCost)); err != nil { return err } moduleHash, err := p.moduleHashes.Get(codeHash) diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 204753f7fe..b6a0030695 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -10,6 +10,7 @@ import ( "errors" "unsafe" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -71,7 +72,8 @@ func activateProgram( errBuf := make([]byte, 1024) debugMode := arbmath.BoolToUint32(debug) moduleHash := common.Hash{} - gasPtr := burner.GasLeft() + gasSupplied := burner.GasLeft() + gasLeft := burner.GasLeft() asmEstimate := uint32(0) initGas := uint16(0) cachedInitGas := uint16(0) @@ -89,10 +91,11 @@ func activateProgram( debugMode, arbutil.SliceToUnsafePointer(codehash[:]), arbutil.SliceToUnsafePointer(moduleHash[:]), - unsafe.Pointer(gasPtr), + unsafe.Pointer(&gasLeft), arbutil.SliceToUnsafePointer(errBuf), uint32(len(errBuf)), ) + burner.Burn(multigas.ResourceKindComputation, gasSupplied-gasLeft) if errLen != 0 { err := errors.New(string(errBuf[:errLen])) return nil, err diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index acfa218f4f..43e344f96f 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -8,6 +8,7 @@ import ( "errors" "math/big" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" @@ -246,7 +247,7 @@ func (rs *RetryableState) Keepalive( newTimeout := timeout + RetryableLifetimeSeconds // Pay in advance for the work needed to reap the duplicate from the timeout queue - return newTimeout, rs.retryables.Burner().Burn(RetryableReapPrice) + return newTimeout, rs.retryables.Burner().Burn(multigas.ResourceKindComputation, RetryableReapPrice) } func (retryable *Retryable) Equals(other *Retryable) (bool, error) { // for testing diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 47c750040d..d06453b2d1 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -10,6 +10,7 @@ import ( "math/big" "sync/atomic" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" @@ -126,7 +127,7 @@ func (s *Storage) Account() common.Address { } func (s *Storage) Get(key common.Hash) (common.Hash, error) { - err := s.burner.Burn(StorageReadCost) + err := s.burner.Burn(multigas.ResourceKindStorageAccess, StorageReadCost) if err != nil { return common.Hash{}, err } @@ -163,7 +164,7 @@ func (s *Storage) Set(key common.Hash, value common.Hash) error { log.Error("Read-only burner attempted to mutate state", "key", key, "value", value) return vm.ErrWriteProtection } - err := s.burner.Burn(writeCost(value)) + err := s.burner.Burn(multigas.ResourceKindStorageAccess, writeCost(value)) if err != nil { return err } @@ -312,7 +313,7 @@ func (s *Storage) ClearBytes() error { } func (s *Storage) GetCodeHash(address common.Address) (common.Hash, error) { - err := s.burner.Burn(StorageCodeHashCost) + err := s.burner.Burn(multigas.ResourceKindStorageAccess, StorageCodeHashCost) if err != nil { return common.Hash{}, err } @@ -329,7 +330,7 @@ func (s *Storage) Keccak(data ...[]byte) ([]byte, error) { byteCount += uint64(len(part)) } cost := 30 + 6*arbmath.WordsForBytes(byteCount) - if err := s.burner.Burn(cost); err != nil { + if err := s.burner.Burn(multigas.ResourceKindComputation, cost); err != nil { return nil, err } return crypto.Keccak256(data...), nil @@ -373,7 +374,7 @@ func (s *Storage) NewSlot(offset uint64) StorageSlot { } func (ss *StorageSlot) Get() (common.Hash, error) { - err := ss.burner.Burn(StorageReadCost) + err := ss.burner.Burn(multigas.ResourceKindStorageAccess, StorageReadCost) if err != nil { return common.Hash{}, err } @@ -388,7 +389,7 @@ func (ss *StorageSlot) Set(value common.Hash) error { log.Error("Read-only burner attempted to mutate state", "value", value) return vm.ErrWriteProtection } - err := ss.burner.Burn(writeCost(value)) + err := ss.burner.Burn(multigas.ResourceKindStorageAccess, writeCost(value)) if err != nil { return err } diff --git a/execution/nodeInterface/virtual-contracts.go b/execution/nodeInterface/virtual-contracts.go index 2c77b00d0d..92232259e3 100644 --- a/execution/nodeInterface/virtual-contracts.go +++ b/execution/nodeInterface/virtual-contracts.go @@ -95,7 +95,7 @@ func init() { }() core.ReadyEVMForL2(evm, msg) - output, gasLeft, err := precompile.Call( + output, _, gasUsed, err := precompile.Call( msg.Data, address, address, msg.From, msg.Value, false, msg.GasLimit, evm, ) if err != nil { @@ -105,8 +105,8 @@ func init() { return returnMessage, nil, nil } res := &ExecutionResult{ - UsedGas: msg.GasLimit - gasLeft, - MaxUsedGas: msg.GasLimit - gasLeft, + UsedGas: gasUsed.SingleGas(), + MaxUsedGas: gasUsed.SingleGas(), Err: nil, ReturnData: output, ScheduledTxes: nil, diff --git a/gethhook/geth-hook.go b/gethhook/geth-hook.go index abbce1631c..b3b881729c 100644 --- a/gethhook/geth-hook.go +++ b/gethhook/geth-hook.go @@ -7,6 +7,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/vm" @@ -37,7 +38,7 @@ func (p ArbosPrecompileWrapper) RunAdvanced( input []byte, gasSupplied uint64, info *vm.AdvancedPrecompileCall, -) (ret []byte, gasLeft uint64, err error) { +) (ret []byte, gasLeft uint64, usedMultiGas multigas.MultiGas, err error) { // Precompiles don't actually enter evm execution like normal calls do, // so we need to increment the depth here to simulate the callstack change. diff --git a/go-ethereum b/go-ethereum index 49227c2f18..914f7a78ea 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 49227c2f18ceb00fec0cd9f11fc15ce72072efdd +Subproject commit 914f7a78ead98713b4d1e6c82d3f6464d18d0701 diff --git a/precompiles/ArbInfo.go b/precompiles/ArbInfo.go index 85590d7d87..d347ff7009 100644 --- a/precompiles/ArbInfo.go +++ b/precompiles/ArbInfo.go @@ -4,6 +4,7 @@ package precompiles import ( + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/util/arbmath" @@ -16,7 +17,7 @@ type ArbInfo struct { // GetBalance retrieves an account's balance func (con ArbInfo) GetBalance(c ctx, evm mech, account addr) (huge, error) { - if err := c.Burn(params.BalanceGasEIP1884); err != nil { + if err := c.Burn(multigas.ResourceKindComputation, params.BalanceGasEIP1884); err != nil { return nil, err } return evm.StateDB.GetBalance(account).ToBig(), nil @@ -24,11 +25,12 @@ func (con ArbInfo) GetBalance(c ctx, evm mech, account addr) (huge, error) { // GetCode retrieves a contract's deployed code func (con ArbInfo) GetCode(c ctx, evm mech, account addr) ([]byte, error) { - if err := c.Burn(params.ColdSloadCostEIP2929); err != nil { + if err := c.Burn(multigas.ResourceKindStorageAccess, params.ColdSloadCostEIP2929); err != nil { return nil, err } code := evm.StateDB.GetCode(account) - if err := c.Burn(params.CopyGas * arbmath.WordsForBytes(uint64(len(code)))); err != nil { + words := arbmath.WordsForBytes(uint64(len(code))) + if err := c.Burn(multigas.ResourceKindStorageAccess, params.CopyGas*words); err != nil { return nil, err } return code, nil diff --git a/precompiles/ArbNativeTokenManager.go b/precompiles/ArbNativeTokenManager.go index 6a7833e416..f0a165dfd2 100644 --- a/precompiles/ArbNativeTokenManager.go +++ b/precompiles/ArbNativeTokenManager.go @@ -8,6 +8,7 @@ import ( "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/params" ) @@ -29,7 +30,7 @@ func (con ArbNativeTokenManager) MintNativeToken(c ctx, evm mech, amount huge) e if !con.hasAccess(c) { return c.BurnOut() } - if err := c.Burn(mintBurnGasCost); err != nil { + if err := c.Burn(multigas.ResourceKindStorageAccess, mintBurnGasCost); err != nil { return err } @@ -43,7 +44,7 @@ func (con ArbNativeTokenManager) BurnNativeToken(c ctx, evm mech, amount huge) e if !con.hasAccess(c) { return c.BurnOut() } - if err := c.Burn(mintBurnGasCost); err != nil { + if err := c.Burn(multigas.ResourceKindStorageAccess, mintBurnGasCost); err != nil { return err } diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index 9da33ede10..6e4207aef0 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -7,6 +7,7 @@ import ( "errors" "math/big" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -56,7 +57,7 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er return hash{}, err } writeBytes := arbmath.WordsForBytes(byteCount) - if err := c.Burn(params.SloadGas * writeBytes); err != nil { + if err := c.Burn(multigas.ResourceKindStorageAccess, params.SloadGas*writeBytes); err != nil { return hash{}, err } @@ -99,10 +100,10 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er gasCostToReturnResult := params.CopyGas gasPoolUpdateCost := storage.StorageReadCost + storage.StorageWriteCost futureGasCosts := eventCost + gasCostToReturnResult + gasPoolUpdateCost - if c.gasLeft < futureGasCosts { - return hash{}, c.Burn(futureGasCosts) // this will error + if c.GasLeft() < futureGasCosts { + return hash{}, c.Burn(multigas.ResourceKindComputation, futureGasCosts) // this will error } - gasToDonate := c.gasLeft - futureGasCosts + gasToDonate := c.GasLeft() - futureGasCosts if gasToDonate < params.TxGas { return hash{}, errors.New("not enough gas to run redeem attempt") } @@ -121,7 +122,7 @@ func (con ArbRetryableTx) Redeem(c ctx, evm mech, ticketId bytes32) (bytes32, er // To prepare for the enqueued retry event, we burn gas here, adding it back to the pool right before retrying. // The gas payer for this tx will get a credit for the wei they paid for this gas when retrying. // We burn as much gas as we can, leaving only enough to pay for copying out the return data. - if err := c.Burn(gasToDonate); err != nil { + if err := c.Burn(multigas.ResourceKindComputation, gasToDonate); err != nil { return hash{}, err } @@ -164,7 +165,7 @@ func (con ArbRetryableTx) Keepalive(c ctx, evm mech, ticketId bytes32) (huge, er return nil, con.oldNotFoundError(c) } updateCost := arbmath.WordsForBytes(nbytes) * params.SstoreSetGas / 100 - if err := c.Burn(updateCost); err != nil { + if err := c.Burn(multigas.ResourceKindStorageAccess, updateCost); err != nil { return big.NewInt(0), err } diff --git a/precompiles/ArbRetryableTx_test.go b/precompiles/ArbRetryableTx_test.go index 1d951fb169..6c2ba3ff3a 100644 --- a/precompiles/ArbRetryableTx_test.go +++ b/precompiles/ArbRetryableTx_test.go @@ -69,7 +69,7 @@ func TestRetryableRedeem(t *testing.T) { Require(t, err) retryAddress := common.HexToAddress("6e") - _, gasLeft, err := Precompiles()[retryAddress].Call( + _, gasLeft, _, err := Precompiles()[retryAddress].Call( redeemCalldata, retryAddress, retryAddress, diff --git a/precompiles/ArbWasm.go b/precompiles/ArbWasm.go index e864e49059..dc7b11a032 100644 --- a/precompiles/ArbWasm.go +++ b/precompiles/ArbWasm.go @@ -4,6 +4,7 @@ package precompiles import ( + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/vm" @@ -38,7 +39,7 @@ func (con ArbWasm) ActivateProgram(c ctx, evm mech, value huge, program addr) (u programs := c.State.Programs() // charge a fixed cost up front to begin activation - if err := c.Burn(1659168); err != nil { + if err := c.Burn(multigas.ResourceKindComputation, 1659168); err != nil { return 0, nil, err } version, codeHash, moduleHash, dataFee, takeAllGas, err := programs.ActivateProgram(evm, program, runCtx, debug) diff --git a/precompiles/ArbosTest.go b/precompiles/ArbosTest.go index 08ba9cb1e3..5bbe7c5ded 100644 --- a/precompiles/ArbosTest.go +++ b/precompiles/ArbosTest.go @@ -5,6 +5,8 @@ package precompiles import ( "errors" + + "github.com/ethereum/go-ethereum/arbitrum/multigas" ) // ArbosTest provides a method of burning arbitrary amounts of gas, which exists for historical reasons. @@ -18,6 +20,6 @@ func (con ArbosTest) BurnArbGas(c ctx, gasAmount huge) error { return errors.New("not a uint64") } //nolint:errcheck - c.Burn(gasAmount.Uint64()) // burn the amount, even if it's more than the user has + c.Burn(multigas.ResourceKindComputation, gasAmount.Uint64()) // burn the amount, even if it's more than the user has return nil } diff --git a/precompiles/context.go b/precompiles/context.go index 6dd149f002..9cc3b12a38 100644 --- a/precompiles/context.go +++ b/precompiles/context.go @@ -6,6 +6,7 @@ package precompiles import ( "math/big" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -27,33 +28,33 @@ type ctx = *Context type Context struct { caller addr gasSupplied uint64 - gasLeft uint64 + gasUsed multigas.MultiGas txProcessor *arbos.TxProcessor State *arbosState.ArbosState tracingInfo *util.TracingInfo readOnly bool } -func (c *Context) Burn(amount uint64) error { - if c.gasLeft < amount { +func (c *Context) Burn(kind multigas.ResourceKind, amount uint64) error { + if c.GasLeft() < amount { return c.BurnOut() } - c.gasLeft -= amount + c.gasUsed.SaturatingIncrementInto(kind, amount) return nil } //nolint:unused func (c *Context) Burned() uint64 { - return c.gasSupplied - c.gasLeft + return c.gasUsed.SingleGas() } func (c *Context) BurnOut() error { - c.gasLeft = 0 + c.gasUsed.SaturatingIncrementInto(multigas.ResourceKindComputation, c.GasLeft()) return vm.ErrOutOfGas } -func (c *Context) GasLeft() *uint64 { - return &c.gasLeft +func (c *Context) GasLeft() uint64 { + return c.gasSupplied - c.gasUsed.SingleGas() } func (c *Context) Restrict(err error) { @@ -81,7 +82,7 @@ func testContext(caller addr, evm mech) *Context { ctx := &Context{ caller: caller, gasSupplied: ^uint64(0), - gasLeft: ^uint64(0), + gasUsed: multigas.ZeroGas(), tracingInfo: tracingInfo, readOnly: false, } diff --git a/precompiles/context_test.go b/precompiles/context_test.go new file mode 100644 index 0000000000..63a06ba990 --- /dev/null +++ b/precompiles/context_test.go @@ -0,0 +1,70 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package precompiles + +import ( + "errors" + "testing" + + "github.com/ethereum/go-ethereum/arbitrum/multigas" + "github.com/ethereum/go-ethereum/core/vm" +) + +func TestContextBurn(t *testing.T) { + // Start with 1000 gas available + ctx := Context{ + gasSupplied: 1_000, + gasUsed: multigas.ZeroGas(), + } + if got, want := ctx.GasLeft(), uint64(1000); got != want { + t.Errorf("wrong gas left: got %v, want %v", got, want) + } + if got, want := ctx.Burned(), uint64(0); got != want { + t.Errorf("wrong gas burned: got %v, want %v", got, want) + } + + // Burn 700 storage access + if err := ctx.Burn(multigas.ResourceKindStorageAccess, 700); err != nil { + t.Errorf("unexpected error from burn: %v", err) + } + if got, want := ctx.GasLeft(), uint64(300); got != want { + t.Errorf("wrong gas left: got %v, want %v", got, want) + } + if got, want := ctx.Burned(), uint64(700); got != want { + t.Errorf("wrong gas burned: got %v, want %v", got, want) + } + + // Burn 200 storage growth + if err := ctx.Burn(multigas.ResourceKindStorageGrowth, 200); err != nil { + t.Errorf("unexpected error from burn: %v", err) + } + if got, want := ctx.GasLeft(), uint64(100); got != want { + t.Errorf("wrong gas left: got %v, want %v", got, want) + } + if got, want := ctx.Burned(), uint64(900); got != want { + t.Errorf("wrong gas burned: got %v, want %v", got, want) + } + + // Burn 200 more storage growth, which should error with out of gas + if err := ctx.Burn(multigas.ResourceKindStorageGrowth, 200); !errors.Is(err, vm.ErrOutOfGas) { + t.Errorf("wrong erro from burn: got %v, want %v", err, vm.ErrOutOfGas) + } + if got, want := ctx.GasLeft(), uint64(0); got != want { + t.Errorf("wrong gas left: got %v, want %v", got, want) + } + if got, want := ctx.Burned(), uint64(1000); got != want { + t.Errorf("wrong gas burned: got %v, want %v", got, want) + } + + // Check the multigas dimensions + if got, want := ctx.gasUsed.Get(multigas.ResourceKindStorageAccess), uint64(700); got != want { + t.Errorf("wrong storage access: got %v, want %v", got, want) + } + if got, want := ctx.gasUsed.Get(multigas.ResourceKindStorageGrowth), uint64(200); got != want { + t.Errorf("wrong storage growth: got %v, want %v", got, want) + } + if got, want := ctx.gasUsed.Get(multigas.ResourceKindComputation), uint64(100); got != want { + t.Errorf("wrong computation: got %v, want %v", got, want) + } +} diff --git a/precompiles/precompile.go b/precompiles/precompile.go index af0591f352..46e42a719d 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -45,7 +46,7 @@ type ArbosPrecompile interface { readOnly bool, gasSupplied uint64, evm *vm.EVM, - ) (output []byte, gasLeft uint64, err error) + ) (output []byte, gasLeft uint64, usedMultiGas multigas.MultiGas, err error) Precompile() *Precompile Name() string @@ -367,7 +368,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr // an error occurred during gascost() return []reflect.Value{emitCost[1]} } - if err := callerCtx.Burn(cost); err != nil { + if err := callerCtx.Burn(multigas.ResourceKindHistoryGrowth, cost); err != nil { // the user has run out of gas return []reflect.Value{reflect.ValueOf(vm.ErrOutOfGas)} } @@ -544,7 +545,7 @@ func Precompiles() map[addr]ArbosPrecompile { } return &Context{ gasSupplied: gasLimit, - gasLeft: gasLimit, + gasUsed: multigas.ZeroGas(), } } @@ -692,44 +693,44 @@ func (p *Precompile) Call( readOnly bool, gasSupplied uint64, evm *vm.EVM, -) (output []byte, gasLeft uint64, err error) { +) (output []byte, gasLeft uint64, multiGasUsed multigas.MultiGas, err error) { arbosVersion := arbosState.ArbOSVersion(evm.StateDB) if arbosVersion < p.arbosVersion { // the precompile isn't yet active, so treat this call as if it were to a contract that doesn't exist - return []byte{}, gasSupplied, nil + return []byte{}, gasSupplied, multigas.ZeroGas(), nil } if len(input) < 4 { // ArbOS precompiles always have canonical method selectors - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } id := *(*[4]byte)(input) method, ok := p.methods[id] if !ok || arbosVersion < method.arbosVersion || (method.maxArbosVersion > 0 && arbosVersion > method.maxArbosVersion) { // method does not exist or hasn't yet been activated - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } if method.purity >= view && actingAsAddress != precompileAddress { // should not access precompile superpowers when not acting as the precompile - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } if method.purity >= write && readOnly { // tried to write to global state in read-only mode - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } if method.purity < payable && value.Sign() != 0 { // tried to pay something that's non-payable - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } callerCtx := &Context{ caller: caller, gasSupplied: gasSupplied, - gasLeft: gasSupplied, + gasUsed: multigas.ZeroGas(), readOnly: method.purity <= view, tracingInfo: util.NewTracingInfo(evm, caller, precompileAddress, util.TracingDuringEVM), } @@ -737,16 +738,16 @@ func (p *Precompile) Call( // len(input) must be at least 4 because of the check near the start of this function // #nosec G115 argsCost := params.CopyGas * arbmath.WordsForBytes(uint64(len(input)-4)) - if err := callerCtx.Burn(argsCost); err != nil { + if err := callerCtx.Burn(multigas.ResourceKindL2Calldata, argsCost); err != nil { // user cannot afford the argument data supplied - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } if method.purity != pure { // impure methods may need the ArbOS state, so open & update the call context now state, err := arbosState.OpenArbosState(evm.StateDB, callerCtx) if err != nil { - return nil, 0, err + return nil, 0, multigas.ComputationGas(gasSupplied), err } callerCtx.State = state } @@ -756,10 +757,10 @@ func (p *Precompile) Call( callerCtx.txProcessor = txProcessor case *vm.DefaultTxProcessor: log.Error("processing hook not set") - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted default: log.Error("unknown processing hook") - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } reflectArgs := []reflect.Value{ @@ -783,7 +784,7 @@ func (p *Precompile) Call( args, err := method.template.Inputs.Unpack(input[4:]) if err != nil { // calldata does not match the method's signature - return nil, 0, vm.ErrExecutionReverted + return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } for _, arg := range args { converted := reflect.ValueOf(arg).Convert(method.handler.Type.In(len(reflectArgs))) @@ -797,20 +798,22 @@ func (p *Precompile) Call( errRet, ok := reflectResult[resultCount].Interface().(error) if !ok { log.Error("final precompile return value must be error") - return nil, callerCtx.gasLeft, vm.ErrExecutionReverted + return nil, callerCtx.GasLeft(), callerCtx.gasUsed, vm.ErrExecutionReverted } var solErr *SolError isSolErr := errors.As(errRet, &solErr) if isSolErr { resultCost := params.CopyGas * arbmath.WordsForBytes(uint64(len(solErr.data))) - if err := callerCtx.Burn(resultCost); err != nil { + if err := callerCtx.Burn(multigas.ResourceKindComputation, resultCost); err != nil { // user cannot afford the result data returned - return nil, 0, vm.ErrExecutionReverted + return nil, 0, callerCtx.gasUsed, vm.ErrExecutionReverted } - return solErr.data, callerCtx.gasLeft, vm.ErrExecutionReverted + return solErr.data, callerCtx.GasLeft(), callerCtx.gasUsed, vm.ErrExecutionReverted } if errors.Is(errRet, programs.ErrProgramActivation) { - return nil, 0, errRet + // Ensure we burn all remaining gas + callerCtx.BurnOut() //nolint:errcheck + return nil, 0, callerCtx.gasUsed, errRet } if !errors.Is(errRet, vm.ErrOutOfGas) { log.Debug( @@ -820,10 +823,11 @@ func (p *Precompile) Call( } // nolint:errorlint if arbosVersion >= params.ArbosVersion_11 || errRet == vm.ErrExecutionReverted { - return nil, callerCtx.gasLeft, vm.ErrExecutionReverted + return nil, callerCtx.GasLeft(), callerCtx.gasUsed, vm.ErrExecutionReverted } // Preserve behavior with old versions which would zero out gas on this type of error - return nil, 0, errRet + callerCtx.BurnOut() //nolint:errcheck + return nil, 0, callerCtx.gasUsed, errRet } result := make([]interface{}, resultCount) for i := 0; i < resultCount; i++ { @@ -833,16 +837,16 @@ func (p *Precompile) Call( encoded, err := method.template.Outputs.PackValues(result) if err != nil { log.Error("could not encode precompile result", "err", err) - return nil, callerCtx.gasLeft, vm.ErrExecutionReverted + return nil, callerCtx.GasLeft(), callerCtx.gasUsed, vm.ErrExecutionReverted } resultCost := params.CopyGas * arbmath.WordsForBytes(uint64(len(encoded))) - if err := callerCtx.Burn(resultCost); err != nil { + if err := callerCtx.Burn(multigas.ResourceKindComputation, resultCost); err != nil { // user cannot afford the result data returned - return nil, 0, vm.ErrExecutionReverted + return nil, 0, callerCtx.gasUsed, vm.ErrExecutionReverted } - return encoded, callerCtx.gasLeft, nil + return encoded, callerCtx.GasLeft(), callerCtx.gasUsed, nil } func (p *Precompile) Precompile() *Precompile { diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index b653b141a6..abec30b044 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -51,7 +51,7 @@ func TestEvents(t *testing.T) { caller := common.HexToAddress("aaaaaaaabbbbbbbbccccccccdddddddd") number := big.NewInt(0x9364) - output, gasLeft, err := contract.Call( + output, gasLeft, _, err := contract.Call( data, debugContractAddr, debugContractAddr, diff --git a/precompiles/wrapper.go b/precompiles/wrapper.go index 736c7c1aec..861ab6c699 100644 --- a/precompiles/wrapper.go +++ b/precompiles/wrapper.go @@ -7,6 +7,7 @@ import ( "errors" "math/big" + "github.com/ethereum/go-ethereum/arbitrum/multigas" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" @@ -35,7 +36,7 @@ func (wrapper *DebugPrecompile) Call( readOnly bool, gasSupplied uint64, evm *vm.EVM, -) ([]byte, uint64, error) { +) ([]byte, uint64, multigas.MultiGas, error) { debugMode := evm.ChainConfig().DebugMode() @@ -44,7 +45,7 @@ func (wrapper *DebugPrecompile) Call( return con.Call(input, precompileAddress, actingAsAddress, caller, value, readOnly, gasSupplied, evm) } // Take all gas. - return nil, 0, errors.New("debug precompiles are disabled") + return nil, 0, multigas.ComputationGas(gasSupplied), errors.New("debug precompiles are disabled") } func (wrapper *DebugPrecompile) Precompile() *Precompile { @@ -77,33 +78,33 @@ func (wrapper *OwnerPrecompile) Call( readOnly bool, gasSupplied uint64, evm *vm.EVM, -) ([]byte, uint64, error) { +) ([]byte, uint64, multigas.MultiGas, error) { con := wrapper.precompile burner := &Context{ gasSupplied: gasSupplied, - gasLeft: gasSupplied, + gasUsed: multigas.ZeroGas(), tracingInfo: util.NewTracingInfo(evm, caller, precompileAddress, util.TracingDuringEVM), } state, err := arbosState.OpenArbosState(evm.StateDB, burner) if err != nil { - return nil, burner.gasLeft, err + return nil, burner.GasLeft(), burner.gasUsed, err } owners := state.ChainOwners() isOwner, err := owners.IsMember(caller) if err != nil { - return nil, burner.gasLeft, err + return nil, burner.GasLeft(), burner.gasUsed, err } if !isOwner { - return nil, burner.gasLeft, errors.New("unauthorized caller to access-controlled method") + return nil, burner.GasLeft(), burner.gasUsed, errors.New("unauthorized caller to access-controlled method") } - output, _, err := con.Call(input, precompileAddress, actingAsAddress, caller, value, readOnly, gasSupplied, evm) + output, _, _, err := con.Call(input, precompileAddress, actingAsAddress, caller, value, readOnly, gasSupplied, evm) if err != nil { - return output, gasSupplied, err // we don't deduct gas since we don't want to charge the owner + return output, gasSupplied, multigas.ZeroGas(), err // we don't deduct gas since we don't want to charge the owner } version := arbosState.ArbOSVersion(evm.StateDB) @@ -114,7 +115,7 @@ func (wrapper *OwnerPrecompile) Call( } } - return output, gasSupplied, err // we don't deduct gas since we don't want to charge the owner + return output, gasSupplied, multigas.ZeroGas(), err // we don't deduct gas since we don't want to charge the owner } func (wrapper *OwnerPrecompile) Precompile() *Precompile { From 70556deb38338fb7ef499ad519986550d9a363d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Tue, 30 Sep 2025 19:33:13 +0200 Subject: [PATCH 47/56] Enable CodeCov analysis (#3714) * Reduce CI * Run basic test results action * Parse junitfile argument for gotestsum * Add permissions: pull-requests: write * ... in a right place. Also fail if tests fail * Fail test intentionally * Inspect the problem * parse single file * fail another test * print why 22 tests failed * single rerun * run for defaults * fix l3challenge test mode * revert geth update * shortening script * run script in CI * keep last 20 lines instead of first 2048 characters * move codecov steps just after testing * revert changes to ci.yml * Try nextest * Upload Rust results * Upload go test results * revert changes to docker * merge results from different workflows * needs * line breaks * line breaks * rm icon * minor fixes * restore CI * needs fix * permissions: pull-requests: write * revert * whitechars --- .github/workflows/_arbitrator.yml | 32 ++++++++++++++- .github/workflows/_codecov.yml | 34 ++++++++++++++++ .github/workflows/_go-tests.yml | 50 ++++++++++++++++++----- .github/workflows/ci.yml | 11 +++++ .github/workflows/gotestsum.sh | 40 +++++++++++++----- .github/workflows/process_junit.py | 65 ++++++++++++++++++++++++++++++ 6 files changed, 211 insertions(+), 21 deletions(-) create mode 100644 .github/workflows/_codecov.yml create mode 100644 .github/workflows/process_junit.py diff --git a/.github/workflows/_arbitrator.yml b/.github/workflows/_arbitrator.yml index 09c4e1dbcd..84b8941b09 100644 --- a/.github/workflows/_arbitrator.yml +++ b/.github/workflows/_arbitrator.yml @@ -37,6 +37,18 @@ jobs: # so make sure it doesn't conflict with anything! no-cache: true + - name: Install cargo-nextest + uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9 + + - name: Create minimal nextest.toml config file + run: | + cat << EOF > nextest.toml + [profile.ci.junit] + path = "junit.xml" + EOF + - name: Make arbitrator libraries run: make -j wasm-ci-build @@ -44,7 +56,25 @@ jobs: run: cargo clippy --all --manifest-path arbitrator/Cargo.toml -- -D warnings - name: Run rust tests - run: cargo test -p arbutil -p prover -p jit -p stylus --release --manifest-path arbitrator/prover/Cargo.toml + id: run-rust-tests + continue-on-error: true + run: >- + cargo nextest run -p arbutil -p prover -p jit -p stylus --release + --manifest-path arbitrator/prover/Cargo.toml + --profile ci --config-file nextest.toml + + - name: Upload rust test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: rust-junit-reports + path: ./arbitrator/target/nextest/ci/junit.xml + + - name: Fail if rust tests failed + if: steps.run-rust-tests.outcome == 'failure' + run: | + echo "Rust tests failed. Failing the workflow as required." + exit 1 - name: Check stylus_bechmark run: cargo check --manifest-path arbitrator/tools/stylus_benchmark/Cargo.toml diff --git a/.github/workflows/_codecov.yml b/.github/workflows/_codecov.yml new file mode 100644 index 0000000000..9d5d0bccd6 --- /dev/null +++ b/.github/workflows/_codecov.yml @@ -0,0 +1,34 @@ +--- +name: codecov +on: + workflow_call: + +jobs: + report_summary: + name: Aggregate test results + runs-on: ubuntu-4 + permissions: + pull-requests: write + + steps: + - name: Checkout repository + uses: actions/checkout@v5 + + - name: Download Go JUnit reports + uses: actions/download-artifact@v4 + with: + name: go-junit-reports + path: downloaded-reports/go + + - name: Download Rust JUnit reports + uses: actions/download-artifact@v4 + continue-on-error: true # Rust pipeline might have not been run + with: + name: rust-junit-reports + path: downloaded-reports/rust + + - name: Run Basic Test Results Action (Unified Report) + uses: codecov/basic-test-results@v1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + directory: 'downloaded-reports/' diff --git a/.github/workflows/_go-tests.yml b/.github/workflows/_go-tests.yml index c65fe18e07..a9a0f06c7a 100644 --- a/.github/workflows/_go-tests.yml +++ b/.github/workflows/_go-tests.yml @@ -16,6 +16,7 @@ jobs: image: redis ports: - 6379:6379 + steps: - name: Checkout uses: actions/checkout@v5 @@ -39,17 +40,40 @@ jobs: - name: Build all lint dependencies run: make -j8 build-node-deps + # --------------------- PATHDB MODE --------------------- + - name: run tests without race detection and path state scheme if: matrix.test-mode == 'pathdb' run: >- ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags cionly --timeout 90m --cover --test_state_scheme path + # --------------------- DEFAULTS MODE --------------------- + - name: run tests without race detection and hash state scheme if: matrix.test-mode == 'defaults' + id: run-tests-defaults + continue-on-error: true run: >- ${{ github.workspace }}/.github/workflows/gotestsum.sh - --tags cionly --timeout 60m --test_state_scheme hash + --tags cionly --timeout 60m --test_state_scheme hash --junitfile test-results/junit.xml + + - name: Process JUnit XML logs + if: matrix.test-mode == 'defaults' && always() + run: python3 ${{ github.workspace }}/.github/workflows/process_junit.py test-results/ + + - name: Upload Go test Artifacts + if: matrix.test-mode == 'defaults' && always() + uses: actions/upload-artifact@v4 + with: + name: go-junit-reports + path: test-results/junit_*.xml + + - name: Fail if tests failed + if: matrix.test-mode == 'defaults' && steps.run-tests-defaults.outcome == 'failure' + run: | + echo "One or more tests failed." + exit 1 - name: run redis tests if: matrix.test-mode == 'defaults' @@ -57,6 +81,15 @@ jobs: gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/... -coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./... -- --test_redis=redis://localhost:6379/0 + - name: Upload coverage to Codecov + if: matrix.test-mode == 'defaults' + uses: codecov/codecov-action@v5 + with: + fail_ci_if_error: false + files: ./coverage.txt,./coverage-redis.txt + verbose: false + token: ${{ secrets.CODECOV_TOKEN }} + - name: create block input json file if: matrix.test-mode == 'defaults' run: >- @@ -83,6 +116,8 @@ jobs: exit 1 fi + # --------------------- CHALLENGE MODES --------------------- + - name: build challenge tests if: matrix.test-mode == 'challenge' run: go test -tags challengetest ./... -run=^$ -v @@ -93,23 +128,18 @@ jobs: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags challengetest --run TestL3Challenge --timeout 120m --cover + # --------------------- CHALLENGE MODES --------------------- + - name: run stylus tests if: matrix.test-mode == 'stylus' run: >- ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags stylustest --run TestProgramArbitrator --timeout 60m --cover + # --------------------- ARCHIVE LOGS FOR ALL MODES --------------------- + - name: Archive detailed run log uses: actions/upload-artifact@v4 with: name: ${{ matrix.test-mode }}-full.log path: full.log - - - name: Upload coverage to Codecov - if: matrix.test-mode == 'defaults' - uses: codecov/codecov-action@v5 - with: - fail_ci_if_error: false - files: ./coverage.txt,./coverage-redis.txt - verbose: false - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 746d4fb4c4..553662682d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,8 +36,10 @@ jobs: - 'arbitrator/**' - 'contracts/**' - 'Makefile' + - '.github/workflows/_arbitrator.yml' bold_legacy: - 'bold/legacy/**' + - '.github/workflows/_bold-legacy.yml' # --- Fast: Build + Lint only (required by can_proceed) --- fast: @@ -63,6 +65,15 @@ jobs: uses: ./.github/workflows/_go-tests.yml secrets: inherit + # --- Summarize test results (not required by can_proceed) --- + codecov: + needs: [go-tests, arbitrator] + if: always() + uses: ./.github/workflows/_codecov.yml + secrets: inherit + permissions: + pull-requests: write + can_proceed: name: can_proceed runs-on: ubuntu-4 diff --git a/.github/workflows/gotestsum.sh b/.github/workflows/gotestsum.sh index 787bad06ae..b29575583c 100755 --- a/.github/workflows/gotestsum.sh +++ b/.github/workflows/gotestsum.sh @@ -11,6 +11,7 @@ timeout="" tags="" run="" test_state_scheme="" +junitfile="" log=true race=false cover=false @@ -48,10 +49,16 @@ while [[ $# -gt 0 ]]; do cover=true shift ;; - --nolog) - log=false - shift - ;; + --nolog) + log=false + shift + ;; + --junitfile) + shift + check_missing_value $# "$1" "--junitfile" + junitfile=$1 + shift + ;; *) echo "Invalid argument: $1" exit 1 @@ -61,7 +68,20 @@ done packages=$(go list ./...) for package in $packages; do - cmd="stdbuf -oL gotestsum --format short-verbose --packages=\"$package\" --rerun-fails=3 --rerun-fails-max-failures=30 --no-color=false --" + # Add the gotestsum flags first + cmd="stdbuf -oL gotestsum --format short-verbose --packages=\"$package\" --rerun-fails=1 --rerun-fails-max-failures=30 --no-color=false" + + if [ "$junitfile" != "" ]; then + # Since we run tests package-by-package, we must make the JUnit file name unique + # to avoid overwriting. We'll append the package name (slugified) to the base file. + sanitized_package_name=$(echo "$package" | tr -c '[:alnum:]' '_') + unique_junit_file="${junitfile%.*}_${sanitized_package_name}.xml" + cmd="$cmd --junitfile \"$unique_junit_file\"" + fi + + # Append the separator and go test arguments + cmd="$cmd --" + if [ "$timeout" != "" ]; then cmd="$cmd -timeout $timeout" fi @@ -88,11 +108,11 @@ for package in $packages; do cmd="$cmd -args -- --test_loglevel=8" # Use error log level, which is the value 8 in the slog level enum for tests. fi - if [ "$log" == true ]; then - cmd="$cmd > >(stdbuf -oL tee -a full.log | grep -vE \"DEBUG|TRACE|INFO|seal\")" - else - cmd="$cmd | grep -vE \"DEBUG|TRACE|INFO|seal\"" - fi + if [ "$log" == true ]; then + cmd="$cmd > >(stdbuf -oL tee -a full.log | grep -vE \"DEBUG|TRACE|INFO|seal\")" + else + cmd="$cmd | grep -vE \"DEBUG|TRACE|INFO|seal\"" + fi echo "" echo running tests for "$package" diff --git a/.github/workflows/process_junit.py b/.github/workflows/process_junit.py new file mode 100644 index 0000000000..c77cfd5d02 --- /dev/null +++ b/.github/workflows/process_junit.py @@ -0,0 +1,65 @@ +import sys +import os +import glob +import xml.etree.ElementTree as ET + +LINES_TO_KEEP = 20 + +def shorten_content(element: ET.Element): + original_content = element.text + if not original_content: + return + + lines = original_content.splitlines() + + if len(lines) > LINES_TO_KEEP: + header = f"... [CONTENT TRUNCATED: Keeping last {LINES_TO_KEEP} lines]\n" + truncated_lines = lines[-LINES_TO_KEEP:] + content = header + '\n'.join(truncated_lines) + else: + content = original_content + + element.text = content + + +def process_single_file(filepath: str) -> bool: + print(f" Processing: {filepath}") + try: + tree = ET.parse(filepath) + root = tree.getroot() + + for elem in root.iter(): + if elem.tag in ['failure']: + shorten_content(elem) + + tree.write(filepath, encoding='UTF-8', xml_declaration=True) + return True + + except ET.ParseError as e: + print(f" Error parsing XML file {filepath}: {e}", file=sys.stderr) + return False + except Exception as e: + print(f" An unexpected error occurred processing {filepath}: {e}", file=sys.stderr) + return False + + +def process_junit_files(report_dir): + search_path = os.path.join(report_dir, 'junit*.xml') + file_paths = glob.glob(search_path) + + if not file_paths: + print(f"No JUnit XML files found in {report_dir} matching 'junit*.xml'. Exiting gracefully.") + sys.exit(0) + + print(f"Found {len(file_paths)} JUnit XML files to process.") + + success_count = 0 + for filepath in file_paths: + if process_single_file(filepath): + success_count += 1 + + print(f"\nProcessing complete: Successfully modified {success_count} of {len(file_paths)} reports.") + + +if __name__ == '__main__': + process_junit_files(sys.argv[1]) From 242220e501d0050be32a6280a3f53b881a86d0dd Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg <65945052+tsahee@users.noreply.github.com> Date: Tue, 30 Sep 2025 13:43:30 -0600 Subject: [PATCH 48/56] Refactor caller context creationi and remove redundant argument (#3733) * Refactor caller context creationi and remove redundant argument * fir merge conflicts --- execution/nodeInterface/virtual-contracts.go | 2 +- gethhook/geth-hook.go | 2 +- precompiles/ArbRetryableTx_test.go | 1 - precompiles/context.go | 41 ++++++++++++++++++++ precompiles/precompile.go | 41 +++++--------------- precompiles/precompile_test.go | 1 - precompiles/wrapper.go | 16 +++++--- 7 files changed, 64 insertions(+), 40 deletions(-) diff --git a/execution/nodeInterface/virtual-contracts.go b/execution/nodeInterface/virtual-contracts.go index 92232259e3..c709cc7131 100644 --- a/execution/nodeInterface/virtual-contracts.go +++ b/execution/nodeInterface/virtual-contracts.go @@ -96,7 +96,7 @@ func init() { core.ReadyEVMForL2(evm, msg) output, _, gasUsed, err := precompile.Call( - msg.Data, address, address, msg.From, msg.Value, false, msg.GasLimit, evm, + msg.Data, address, msg.From, msg.Value, false, msg.GasLimit, evm, ) if err != nil { return msg, nil, err diff --git a/gethhook/geth-hook.go b/gethhook/geth-hook.go index b3b881729c..ab65902d94 100644 --- a/gethhook/geth-hook.go +++ b/gethhook/geth-hook.go @@ -46,7 +46,7 @@ func (p ArbosPrecompileWrapper) RunAdvanced( defer info.Evm.DecrementDepth() return p.inner.Call( - input, info.PrecompileAddress, info.ActingAsAddress, + input, info.ActingAsAddress, info.Caller, info.Value, info.ReadOnly, gasSupplied, info.Evm, ) } diff --git a/precompiles/ArbRetryableTx_test.go b/precompiles/ArbRetryableTx_test.go index 6c2ba3ff3a..645ad19d04 100644 --- a/precompiles/ArbRetryableTx_test.go +++ b/precompiles/ArbRetryableTx_test.go @@ -72,7 +72,6 @@ func TestRetryableRedeem(t *testing.T) { _, gasLeft, _, err := Precompiles()[retryAddress].Call( redeemCalldata, retryAddress, - retryAddress, common.Address{}, big.NewInt(0), false, diff --git a/precompiles/context.go b/precompiles/context.go index 9cc3b12a38..7221c96781 100644 --- a/precompiles/context.go +++ b/precompiles/context.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -98,3 +99,43 @@ func testContext(caller addr, evm mech) *Context { } return ctx } + +func makeContext(p *Precompile, method *PrecompileMethod, caller common.Address, gas uint64, evm *vm.EVM) (*Context, error) { + txProcessor, ok := evm.ProcessingHook.(*arbos.TxProcessor) + if !ok { + log.Error("processing hook not set") + return nil, vm.ErrExecutionReverted + } + + readOnly := method.purity <= view + + callerCtx := &Context{ + caller: caller, + gasSupplied: gas, + gasUsed: multigas.ZeroGas(), + readOnly: readOnly, + txProcessor: txProcessor, + tracingInfo: util.NewTracingInfo(evm, caller, p.address, util.TracingDuringEVM), + } + + if method.purity != pure { + state, err := arbosState.OpenArbosState(evm.StateDB, callerCtx) + if err != nil { + return nil, err + } + callerCtx.State = state + } + + if method.purity >= write && evm.ReadOnly() { + toBurn, err := callerCtx.State.L2PricingState().PerTxGasLimit() + if err != nil { + return nil, err + } + err = callerCtx.Burn(multigas.ResourceKindComputation, toBurn) + if err != nil { + return nil, err + } + } + + return callerCtx, nil +} diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 46e42a719d..1ad1e486ff 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -27,7 +27,6 @@ import ( "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/programs" - "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -39,7 +38,6 @@ type ArbosPrecompile interface { // In that case, unless this precompile is pure, it should probably revert. Call( input []byte, - precompileAddress common.Address, actingAsAddress common.Address, caller common.Address, value *big.Int, @@ -50,6 +48,7 @@ type ArbosPrecompile interface { Precompile() *Precompile Name() string + Address() common.Address } type purity uint8 @@ -683,10 +682,13 @@ func (p *Precompile) ArbosVersion() uint64 { return p.arbosVersion } +func (p *Precompile) Address() common.Address { + return p.address +} + // Call a precompile in typed form, deserializing its inputs and serializing its outputs func (p *Precompile) Call( input []byte, - precompileAddress common.Address, actingAsAddress common.Address, caller common.Address, value *big.Int, @@ -712,7 +714,7 @@ func (p *Precompile) Call( return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } - if method.purity >= view && actingAsAddress != precompileAddress { + if method.purity >= view && actingAsAddress != p.address { // should not access precompile superpowers when not acting as the precompile return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } @@ -727,12 +729,9 @@ func (p *Precompile) Call( return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } - callerCtx := &Context{ - caller: caller, - gasSupplied: gasSupplied, - gasUsed: multigas.ZeroGas(), - readOnly: method.purity <= view, - tracingInfo: util.NewTracingInfo(evm, caller, precompileAddress, util.TracingDuringEVM), + callerCtx, err := makeContext(p, method, caller, gasSupplied, evm) + if err != nil { + return nil, 0, multigas.ComputationGas(gasSupplied), err } // len(input) must be at least 4 because of the check near the start of this function @@ -743,26 +742,6 @@ func (p *Precompile) Call( return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted } - if method.purity != pure { - // impure methods may need the ArbOS state, so open & update the call context now - state, err := arbosState.OpenArbosState(evm.StateDB, callerCtx) - if err != nil { - return nil, 0, multigas.ComputationGas(gasSupplied), err - } - callerCtx.State = state - } - - switch txProcessor := evm.ProcessingHook.(type) { - case *arbos.TxProcessor: - callerCtx.txProcessor = txProcessor - case *vm.DefaultTxProcessor: - log.Error("processing hook not set") - return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted - default: - log.Error("unknown processing hook") - return nil, 0, multigas.ComputationGas(gasSupplied), vm.ErrExecutionReverted - } - reflectArgs := []reflect.Value{ p.implementer, reflect.ValueOf(callerCtx), @@ -818,7 +797,7 @@ func (p *Precompile) Call( if !errors.Is(errRet, vm.ErrOutOfGas) { log.Debug( "precompile reverted with non-solidity error", - "precompile", precompileAddress, "input", input, "err", errRet, + "precompile", p.address, "input", input, "err", errRet, ) } // nolint:errorlint diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index abec30b044..0612b9c994 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -54,7 +54,6 @@ func TestEvents(t *testing.T) { output, gasLeft, _, err := contract.Call( data, debugContractAddr, - debugContractAddr, caller, number, false, diff --git a/precompiles/wrapper.go b/precompiles/wrapper.go index 861ab6c699..17ef00cd0f 100644 --- a/precompiles/wrapper.go +++ b/precompiles/wrapper.go @@ -27,9 +27,12 @@ func debugOnly(address addr, impl ArbosPrecompile) (addr, ArbosPrecompile) { return address, &DebugPrecompile{impl} } +func (wrapper *DebugPrecompile) Address() common.Address { + return wrapper.precompile.Address() +} + func (wrapper *DebugPrecompile) Call( input []byte, - precompileAddress common.Address, actingAsAddress common.Address, caller common.Address, value *big.Int, @@ -42,7 +45,7 @@ func (wrapper *DebugPrecompile) Call( if debugMode { con := wrapper.precompile - return con.Call(input, precompileAddress, actingAsAddress, caller, value, readOnly, gasSupplied, evm) + return con.Call(input, actingAsAddress, caller, value, readOnly, gasSupplied, evm) } // Take all gas. return nil, 0, multigas.ComputationGas(gasSupplied), errors.New("debug precompiles are disabled") @@ -69,9 +72,12 @@ func ownerOnly(address addr, impl ArbosPrecompile, emit func(mech, bytes4, addr, } } +func (wrapper *OwnerPrecompile) Address() common.Address { + return wrapper.precompile.Address() +} + func (wrapper *OwnerPrecompile) Call( input []byte, - precompileAddress common.Address, actingAsAddress common.Address, caller common.Address, value *big.Int, @@ -84,7 +90,7 @@ func (wrapper *OwnerPrecompile) Call( burner := &Context{ gasSupplied: gasSupplied, gasUsed: multigas.ZeroGas(), - tracingInfo: util.NewTracingInfo(evm, caller, precompileAddress, util.TracingDuringEVM), + tracingInfo: util.NewTracingInfo(evm, caller, wrapper.precompile.Address(), util.TracingDuringEVM), } state, err := arbosState.OpenArbosState(evm.StateDB, burner) if err != nil { @@ -101,7 +107,7 @@ func (wrapper *OwnerPrecompile) Call( return nil, burner.GasLeft(), burner.gasUsed, errors.New("unauthorized caller to access-controlled method") } - output, _, _, err := con.Call(input, precompileAddress, actingAsAddress, caller, value, readOnly, gasSupplied, evm) + output, _, _, err := con.Call(input, actingAsAddress, caller, value, readOnly, gasSupplied, evm) if err != nil { return output, gasSupplied, multigas.ZeroGas(), err // we don't deduct gas since we don't want to charge the owner From 56547acccaa25d007a4cf3486b23bb05d4f143a9 Mon Sep 17 00:00:00 2001 From: Aman Sanghi <102982411+amsanghi@users.noreply.github.com> Date: Wed, 1 Oct 2025 05:39:18 +0530 Subject: [PATCH 49/56] Merge v1.16.4 (#3734) * Instrument multi-gas in precompiles Close NIT-1557 * Bump go-ethereum * Change balance multi-gas dimension * Test for context burn, burned and gas left * Fix lint warning * Merge v1.16.4 --------- Co-authored-by: Gabriel de Quadros Ligneul Co-authored-by: Pepper Lebeck-Jobe --- arbos/arbosState/arbosstate.go | 6 +++--- arbos/arbosState/initialize.go | 2 +- execution/gethexec/contract_adapter.go | 26 ++++++++++++------------ execution/nodeInterface/NodeInterface.go | 4 ++-- go-ethereum | 2 +- go.mod | 11 +++++----- go.sum | 22 +++++++++++--------- 7 files changed, 38 insertions(+), 35 deletions(-) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 238e14ce52..7338dc8485 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -210,7 +210,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p // To work around this, we give precompiles fake code. for addr, version := range PrecompileMinArbOSVersions { if version == 0 { - stateDB.SetCode(addr, []byte{byte(vm.INVALID)}) + stateDB.SetCode(addr, []byte{byte(vm.INVALID)}, tracing.CodeChangeUnspecified) } } @@ -371,7 +371,7 @@ func (state *ArbosState) UpgradeArbosVersion( case params.ArbosVersion_40: // EIP-2935: Add support for historical block hashes. stateDB.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified) - stateDB.SetCode(params.HistoryStorageAddress, params.HistoryStorageCodeArbitrum) + stateDB.SetCode(params.HistoryStorageAddress, params.HistoryStorageCodeArbitrum, tracing.CodeChangeUnspecified) // The MaxWasmSize was a constant before arbos version 40, and can // be read as a parameter after arbos version 40. params, err := state.Programs().Params() @@ -402,7 +402,7 @@ func (state *ArbosState) UpgradeArbosVersion( // install any new precompiles for addr, version := range PrecompileMinArbOSVersions { if version == nextArbosVersion { - stateDB.SetCode(addr, []byte{byte(vm.INVALID)}) + stateDB.SetCode(addr, []byte{byte(vm.INVALID)}, tracing.CodeChangeUnspecified) } } diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index f050b5fcc8..e3671c2290 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -177,7 +177,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.BlockChainCo statedb.SetBalance(account.Addr, uint256.MustFromBig(account.EthBalance), tracing.BalanceChangeUnspecified) statedb.SetNonce(account.Addr, account.Nonce, tracing.NonceChangeUnspecified) if account.ContractInfo != nil { - statedb.SetCode(account.Addr, account.ContractInfo.Code) + statedb.SetCode(account.Addr, account.ContractInfo.Code, tracing.CodeChangeUnspecified) for k, v := range account.ContractInfo.ContractStorage { statedb.SetState(account.Addr, k, v) } diff --git a/execution/gethexec/contract_adapter.go b/execution/gethexec/contract_adapter.go index 6e182c3844..aa136b8ebf 100644 --- a/execution/gethexec/contract_adapter.go +++ b/execution/gethexec/contract_adapter.go @@ -74,19 +74,19 @@ func (a *contractAdapter) CallContract(ctx context.Context, call ethereum.CallMs } msg := &core.Message{ - From: call.From, - To: call.To, - Value: big.NewInt(0), - GasLimit: math.MaxUint64, - GasPrice: big.NewInt(0), - GasFeeCap: big.NewInt(0), - GasTipCap: big.NewInt(0), - Data: call.Data, - AccessList: call.AccessList, - SkipNonceChecks: true, - SkipFromEOACheck: true, - TxRunContext: core.NewMessageEthcallContext(), // Indicate this is an eth_call - SkipL1Charging: true, // Skip L1 data fees + From: call.From, + To: call.To, + Value: big.NewInt(0), + GasLimit: math.MaxUint64, + GasPrice: big.NewInt(0), + GasFeeCap: big.NewInt(0), + GasTipCap: big.NewInt(0), + Data: call.Data, + AccessList: call.AccessList, + SkipNonceChecks: true, + SkipTransactionChecks: true, + TxRunContext: core.NewMessageEthcallContext(), // Indicate this is an eth_call + SkipL1Charging: true, // Skip L1 data fees } evm := a.apiBackend.GetEVM(ctx, state, header, &vm.Config{NoBaseFee: true}, nil) diff --git a/execution/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go index 9fe520e84d..3169cd9a07 100644 --- a/execution/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -532,7 +532,7 @@ func (n NodeInterface) GasEstimateL1Component( if !ok { return 0, nil, nil, errors.New("failed to cast to stateDB") } - msg := args.ToMessage(evm.Context.BaseFee, randomGas, n.header, sdb, core.NewMessageEthcallContext(), true, true) + msg := args.ToMessage(evm.Context.BaseFee, randomGas, n.header, sdb, core.NewMessageEthcallContext(), true) pricing := c.State.L1PricingState() l1BaseFeeEstimate, err := pricing.PricePerUnit() @@ -592,7 +592,7 @@ func (n NodeInterface) GasEstimateComponents( if !ok { return 0, 0, nil, nil, errors.New("failed to cast to stateDB") } - msg := args.ToMessage(evm.Context.BaseFee, gasCap, n.header, sdb, core.NewMessageGasEstimationContext(), true, true) + msg := args.ToMessage(evm.Context.BaseFee, gasCap, n.header, sdb, core.NewMessageGasEstimationContext(), true) brotliCompressionLevel, err := c.State.BrotliCompressionLevel() if err != nil { return 0, 0, nil, nil, fmt.Errorf("failed to get brotli compression level: %w", err) diff --git a/go-ethereum b/go-ethereum index 914f7a78ea..80a4f7e202 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 914f7a78ead98713b4d1e6c82d3f6464d18d0701 +Subproject commit 80a4f7e2022ec2b37ddd4bb706af1c855a9460f6 diff --git a/go.mod b/go.mod index 03c9eec434..dffb0c7316 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( go.uber.org/automaxprocs v1.5.2 golang.org/x/crypto v0.36.0 golang.org/x/sync v0.12.0 - golang.org/x/sys v0.34.0 + golang.org/x/sys v0.36.0 golang.org/x/term v0.30.0 golang.org/x/tools v0.29.0 google.golang.org/api v0.187.0 @@ -63,10 +63,11 @@ require ( cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect - github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect github.com/dchest/siphash v1.2.3 // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.3 // indirect + github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect @@ -155,7 +156,7 @@ require ( github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/h2non/filetype v1.0.6 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect - github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect + github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect @@ -185,7 +186,7 @@ require ( github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/supranational/blst v0.3.14 // indirect + github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/urfave/cli/v2 v2.27.5 // indirect diff --git a/go.sum b/go.sum index 663b2eefd1..f89ef510c2 100644 --- a/go.sum +++ b/go.sum @@ -134,8 +134,8 @@ github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEf github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= -github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= +github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -170,8 +170,10 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= -github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU= +github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -315,8 +317,8 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= -github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= @@ -508,8 +510,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= -github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= @@ -639,8 +641,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= From d5417e1c7107b13daec912286daca3f1380c734f Mon Sep 17 00:00:00 2001 From: viktorking7 <140458814+viktorking7@users.noreply.github.com> Date: Wed, 1 Oct 2025 08:54:46 +0200 Subject: [PATCH 50/56] Update ticker.go (#3728) Co-authored-by: Pepper Lebeck-Jobe --- timeboost/ticker.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/timeboost/ticker.go b/timeboost/ticker.go index f9bfc18ed4..1244377160 100644 --- a/timeboost/ticker.go +++ b/timeboost/ticker.go @@ -33,10 +33,15 @@ func (t *roundTicker) start(timeBeforeRoundStart time.Duration) { nextTick += t.roundTimingInfo.Round } + // Use NewTimer instead of time.After to allow cancellation and avoid leaking timers + timer := time.NewTimer(nextTick) select { - case <-time.After(nextTick): + case <-timer.C: t.c <- time.Now() case <-t.done: + if !timer.Stop() { + <-timer.C + } close(t.c) return } From c46e1e91b1aaf75b8465313deb354cc401ecee03 Mon Sep 17 00:00:00 2001 From: Forostovec Date: Wed, 1 Oct 2025 09:59:54 +0300 Subject: [PATCH 51/56] feat(nitro-val): implement ValidationNodeConfig.Validate with logging and persistent checks (#3735) Co-authored-by: Pepper Lebeck-Jobe --- cmd/nitro-val/config.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index 89269203ef..fdabdd0f12 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "io" "reflect" "time" @@ -145,8 +146,13 @@ func (c *ValidationNodeConfig) GetReloadInterval() time.Duration { } func (c *ValidationNodeConfig) Validate() error { - // TODO - return nil + if _, err := genericconf.HandlerFromLogType(c.LogType, io.Discard); err != nil { + return fmt.Errorf("invalid log-type: %w", err) + } + if _, err := genericconf.ToSlogLevel(c.LogLevel); err != nil { + return fmt.Errorf("invalid log-level: %w", err) + } + return c.Persistent.Validate() } var DefaultValidationNodeStackConfig = node.Config{ From 16f5868cbf9dba452295a7b0f0990cab6b8beb9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Wed, 1 Oct 2025 14:11:33 +0200 Subject: [PATCH 52/56] Use also head_ref for limiting CI concurrency (#3741) * s/github.ref/github.head_ref * actually... both * add fallback value --- .github/workflows/ci.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker.yml | 2 +- .github/workflows/submodule-pin-check.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 553662682d..774beb690e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,7 +3,7 @@ name: CI run-name: CI triggered from @${{ github.actor }} of ${{ github.head_ref }} concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-${{ github.ref }} cancel-in-progress: true on: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 8fb52c58f2..512abd7fd8 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -2,7 +2,7 @@ name: CodeQL concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-${{ github.ref }} cancel-in-progress: true on: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c4bc249b93..e4fba9a903 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -3,7 +3,7 @@ name: Docker build CI run-name: Docker build CI triggered from @${{ github.actor }} of ${{ github.head_ref }} concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-${{ github.ref }} cancel-in-progress: true on: diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 4a1c3b4db6..fcda0c146c 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -2,7 +2,7 @@ name: Check Submodule Pins concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-${{ github.ref }} cancel-in-progress: true on: From 25a3d928250322f9f5f00428abdd289d25a13f36 Mon Sep 17 00:00:00 2001 From: futreall <86553580+futreall@users.noreply.github.com> Date: Wed, 1 Oct 2025 14:28:11 +0200 Subject: [PATCH 53/56] remove: delete unused HashPlusInt function from arbos/util (#3730) * remove: delete unused HashPlusInt function from arbos/util * Update util.go --------- Co-authored-by: Pepper Lebeck-Jobe --- arbos/util/util.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arbos/util/util.go b/arbos/util/util.go index d620983933..30bd166a21 100644 --- a/arbos/util/util.go +++ b/arbos/util/util.go @@ -200,10 +200,6 @@ func UintToHash(val uint64) common.Hash { return common.BigToHash(new(big.Int).SetUint64(val)) } -func HashPlusInt(x common.Hash, y int64) common.Hash { - return common.BigToHash(new(big.Int).Add(x.Big(), big.NewInt(y))) // BUGBUG: BigToHash(x) converts abs(x) to a Hash -} - func RemapL1Address(l1Addr common.Address) common.Address { sumBytes := new(big.Int).Add(new(big.Int).SetBytes(l1Addr.Bytes()), AddressAliasOffset).Bytes() if len(sumBytes) > 20 { From 6c74ee241db6586e6296a36aa250f4f02a02c1de Mon Sep 17 00:00:00 2001 From: Pepper Lebeck-Jobe Date: Wed, 1 Oct 2025 19:30:00 +0100 Subject: [PATCH 54/56] Add support for consensus-v50-rc.5 to Dockerfile (#3744) --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d15a959ea6..f2b15f7dd5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -241,8 +241,9 @@ RUN ./download-machine.sh consensus-v41 0xa18d6266cef250802c3cb2bfefe947ea1aa9a3 #RUN ./download-machine.sh consensus-v50-alpha.1 0x28cfd8d81613ce4ebe750e77bfd95d6d95d4f53240488095a11c1ad3a494fa82 #RUN ./download-machine.sh consensus-v50-rc.1 0x8fd725477d8ef58183a1a943c375a8495a22cd2d7d701ac917fe20d69993e88e #RUN ./download-machine.sh consensus-v50-rc.2 0xc1ea4d6d2791bf5bdf6de3c2166ce4aab8fe16ca4ad5c226e8ae31a8b77f1a08 -RUN ./download-machine.sh consensus-v50-rc.3 0x385fa2524d86d4ebc340988224f8686b3f485c7c9f7bc1015a64c85a9c76a6b0 +#RUN ./download-machine.sh consensus-v50-rc.3 0x385fa2524d86d4ebc340988224f8686b3f485c7c9f7bc1015a64c85a9c76a6b0 RUN ./download-machine.sh consensus-v50-rc.4 0x393be710f252e8217d66fe179739eba1ed471f0d5a847b5905c30926d853241a +RUN ./download-machine.sh consensus-v50-rc.5 0xb90895a56a59c0267c2004a0e103ad725bd98d5a05c3262806ab4ccb3f997558 RUN ./download-machine.sh consensus-v40 0xdb698a2576298f25448bc092e52cf13b1e24141c997135d70f217d674bbeb69a FROM golang:1.25-bookworm AS node-builder From d432fb008c04d86cc0bcb1c16c93be274e76eaa5 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 1 Oct 2025 14:56:36 -0500 Subject: [PATCH 55/56] remove account recreation (#3746) Co-authored-by: Joshua Colvin --- system_tests/bold_challenge_protocol_test.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go index 9525ae0123..d528eadcf3 100644 --- a/system_tests/bold_challenge_protocol_test.go +++ b/system_tests/bold_challenge_protocol_test.go @@ -37,12 +37,12 @@ import ( "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/bold/chain-abstraction" - "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation" - "github.com/offchainlabs/nitro/bold/challenge-manager" + protocol "github.com/offchainlabs/nitro/bold/chain-abstraction" + solimpl "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation" + challengemanager "github.com/offchainlabs/nitro/bold/challenge-manager" modes "github.com/offchainlabs/nitro/bold/challenge-manager/types" - "github.com/offchainlabs/nitro/bold/layer2-state-provider" - "github.com/offchainlabs/nitro/bold/testing" + l2stateprovider "github.com/offchainlabs/nitro/bold/layer2-state-provider" + challenge_testing "github.com/offchainlabs/nitro/bold/testing" "github.com/offchainlabs/nitro/bold/testing/setup" butil "github.com/offchainlabs/nitro/bold/util" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -557,9 +557,6 @@ func createTestNodeOnL1ForBoldProtocol( l2info = NewArbTestInfo(t, chainConfig.ChainID) } - l1info.GenerateAccount("RollupOwner") - l1info.GenerateAccount("Sequencer") - l1info.GenerateAccount("User") l1info.GenerateAccount("Asserter") l1info.GenerateAccount("EvilAsserter") From 949806aefdd7bf3b1359069b3d2443baaa047bf3 Mon Sep 17 00:00:00 2001 From: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> Date: Thu, 2 Oct 2025 10:54:36 +0200 Subject: [PATCH 56/56] Add DA proof support to daprovider interface (#3600) * Add DA proof support to daprovider interface This is part of a series of changes for the Custom DA project. Summary: - Added a new daprovider.Validator interface with methods for generating proofs for Custom DA systems. - Added a reference implementation of a Custom DA provider. - Added a daprovider factory that supports anytrust and referenceda modes. In a follow-up PR the nitro node startup sequence will be modified to use this. Currently only the separate daprovider server uses this. - Replaced the AnyTrust-specific (aka das) provider server with a unified provider server that works with anytrust or referenceda modes. - Extended the DA Client with new RPC methods for generating proofs. Notes: The separate provider server executable is a thin RPC server wrapper around the anytrust and referenceda implementations. The idea is that people wanting to integrate their own DA system can use this as a guide for how to implement their own RPC service that lives outside the nitro codebase; we won't be including support for any additional DA implementations in provider server executable that we distribute. For legacy AnyTrust deployments we will most likely continue to have nitro spawn the daprovider server in-process to avoid needing to run an extra service, but by channeling everything through the JSON-RPC interface it reduces surface area of what we have to support. The Reference DA (referenceda) implementation is a minimal working example of how one could implement a daprovider, including support for validating the certificate against trusted signers. It uses an in-memory data storage backend. In various places there is commented out code related to the osp contract bindings that haven't yet been committed to a nitro-contracts branch that we want to use in nitro master. This PR shouldn't change any existing functionality or behavior, except the daprovider executable (which isn't used in production) has some new configuration options: ``` --mode must be "anytrust" or "referenceda" --provider-server.* server config eg address, port, etc --anytrust.* was previosly called das-server --referenceda.* referenceda specific options ``` As much as possible we will try to rename references to "das" to "anytrust". When we launched Anytrust, we only had one offchain data availability mode so we just called it "das" at the time. This PR doesn't include new test code, but testing was done with the end-to-end block validator and challenge system tests on the custom-da branch. * Reduce code duplication in factory, more cert validation * Fix for interface change on master * Remove DACert flag from KnownHeaderBits for now We will add DACertificateMessageHeaderFlag back to KnownHeaderBits in a future PR; removing it for now avoids changing the replay binary unnecessarily when merging in this PR. * Remove preimageType from daprovider.Validator * Remove IsValidHeaderByte from RPC API daprovider.Readers are now registered with their respective header bytes at initialization. For external DA providers where the RPC client is used this becomes single call at init to daprovider_getSupportedHeaderBytes. Where previously in the inbox and block validator components we had loops over each provider, checking each provider if they handle that type of message, we now just check if we have a provider registered for that message. This means less RPC calls and makes the interface that providers need to implement a bit simpler, with room for taking it out completely for CustomDA if we can detect from other config that the external provider is expected to be a CustomDA provider. * Move daprovider api types to own go pkg * Use Promises with daprovider.Reader iface * dasserver migration to provider_server We moved the contents of dasserver to provider_server on the custom-da branch and I accidentally left dasserver in place on this branch when moving things across. I don't want to bring over the changes to the daprovider construction logic in node.go yet so I just made the das_migration.go file which we'll delete later. * Split rpc payload and preimage methods In normal execution we only care about the payload, and in validation we only care about the preimages (we will reconstruct the payload using the preimages). This change separates these concerns and simplifies the signatures of these methods. * Removed the validateSeqMsg param from Reader For DAS and ReferenceDA we almost always want to validate the certificate. The only time we don't is in the replay binary when we want to panic instead if it's invalid on the first read of the message, otherwise we want to ignore it and assume it's already valid. So this means that it doesn't make sense for validateSeqMsg to be part of the reader API at all, which is great because it means we can simplify the API. We can pass into the reader at construction time how to handle this because in replay it always constructs a new (fake, non-network calling) reader for each invocation. * Use Promises with Validator interface * Add missing import * Resgister DAS reader with both flag variants * Use cancelable promises * Fix reader registration for batch correctness checking * Use promise for GetSupportedHeaderBytes too --- arbnode/batch_poster.go | 35 ++- arbnode/inbox_tracker.go | 4 +- .../extraction/message_extraction_function.go | 4 +- .../message_extraction_function_test.go | 6 +- arbnode/mel/extraction/types.go | 2 +- arbnode/mel/runner/mel.go | 4 +- arbnode/mel/runner/mel_test.go | 2 +- arbnode/node.go | 33 ++- arbstate/inbox.go | 63 ++--- cmd/daprovider/daprovider.go | 156 ++++++++--- cmd/replay/main.go | 12 +- daprovider/daclient/daclient.go | 113 +++++--- daprovider/das/dasutil/dasutil.go | 113 ++++++-- daprovider/factory/factory.go | 242 ++++++++++++++++++ daprovider/reader.go | 110 ++++++-- daprovider/referenceda/certificate.go | 117 +++++++++ daprovider/referenceda/config.go | 41 +++ daprovider/referenceda/reference_reader.go | 154 +++++++++++ daprovider/referenceda/reference_validator.go | 153 +++++++++++ daprovider/referenceda/reference_writer.go | 63 +++++ daprovider/referenceda/storage.go | 54 ++++ daprovider/registry.go | 77 ++++++ daprovider/server/das_migration.go | 162 ++++++++++++ .../provider_server.go} | 142 +++++----- daprovider/server_api/types.go | 29 +++ daprovider/util.go | 10 +- daprovider/validator.go | 34 +++ daprovider/writer.go | 2 +- staker/stateless_block_validator.go | 47 ++-- util/containers/promise.go | 6 + 30 files changed, 1735 insertions(+), 255 deletions(-) create mode 100644 daprovider/factory/factory.go create mode 100644 daprovider/referenceda/certificate.go create mode 100644 daprovider/referenceda/config.go create mode 100644 daprovider/referenceda/reference_reader.go create mode 100644 daprovider/referenceda/reference_validator.go create mode 100644 daprovider/referenceda/reference_writer.go create mode 100644 daprovider/referenceda/storage.go create mode 100644 daprovider/registry.go create mode 100644 daprovider/server/das_migration.go rename daprovider/{das/dasserver/dasserver.go => server/provider_server.go} (56%) create mode 100644 daprovider/server_api/types.go create mode 100644 daprovider/validator.go diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index cfc72bfece..10d5169a7b 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -107,7 +107,7 @@ type BatchPoster struct { gasRefunderAddr common.Address building *buildingBatch dapWriter daprovider.Writer - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] @@ -330,7 +330,7 @@ type BatchPosterOpts struct { TransactOpts *bind.TransactOpts DAPWriter daprovider.Writer ParentChainID *big.Int - DAPReaders []daprovider.Reader + DAPReaders *daprovider.ReaderRegistry } func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) { @@ -1793,9 +1793,36 @@ func (b *BatchPoster) MaybePostSequencerBatch(ctx context.Context) (bool, error) } if config.CheckBatchCorrectness { - dapReaders := b.dapReaders + // Create a new registry for checking batch correctness + // We need to copy existing readers and potentially add a simulated blob reader + dapReaders := daprovider.NewReaderRegistry() + + // Copy all existing readers from the batch poster's registry + // These readers can fetch data that was already posted to + // external DA systems (eg AnyTrust) before this batch transaction + if b.dapReaders != nil { + for _, headerByte := range b.dapReaders.SupportedHeaderBytes() { + // Skip blob reader, we'll add simulated reader instead after this loop + if headerByte == daprovider.BlobHashesHeaderFlag { + continue + } + if reader, found := b.dapReaders.GetByHeaderByte(headerByte); found { + if err := dapReaders.Register(headerByte, reader); err != nil { + return false, fmt.Errorf("failed to register reader for header byte 0x%02x: %w", headerByte, err) + } + } + } + } + + // For EIP-4844 blob transactions, the blobs are created locally and will be + // included with the L1 transaction itself (as blob sidecars). Since these blobs + // don't exist on L1 yet, we need a simulated reader that can "read" from the + // local kzgBlobs we just created. This is different from other DA systems where + // data is posted externally first and only a reference is included in the L1 tx. if b.building.use4844 { - dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&simulatedBlobReader{kzgBlobs})) + if err := dapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(&simulatedBlobReader{kzgBlobs})); err != nil { + return false, fmt.Errorf("failed to register simulated blob reader: %w", err) + } } seqMsg := binary.BigEndian.AppendUint64([]byte{}, l1BoundMinTimestamp) seqMsg = binary.BigEndian.AppendUint64(seqMsg, l1BoundMaxTimestamp) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 991d443805..7aa56b7db2 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -39,14 +39,14 @@ type InboxTracker struct { txStreamer *TransactionStreamer mutex sync.Mutex validator *staker.BlockValidator - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry snapSyncConfig SnapSyncConfig batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders []daprovider.Reader, snapSyncConfig SnapSyncConfig) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders *daprovider.ReaderRegistry, snapSyncConfig SnapSyncConfig) (*InboxTracker, error) { tracker := &InboxTracker{ db: db, txStreamer: txStreamer, diff --git a/arbnode/mel/extraction/message_extraction_function.go b/arbnode/mel/extraction/message_extraction_function.go index a194ec6205..21e6aee193 100644 --- a/arbnode/mel/extraction/message_extraction_function.go +++ b/arbnode/mel/extraction/message_extraction_function.go @@ -51,7 +51,7 @@ func ExtractMessages( ctx context.Context, inputState *mel.State, parentChainHeader *types.Header, - dataProviders []daprovider.Reader, + dataProviders *daprovider.ReaderRegistry, delayedMsgDatabase DelayedMessageDatabase, receiptFetcher ReceiptFetcher, txsFetcher TransactionsFetcher, @@ -81,7 +81,7 @@ func extractMessagesImpl( ctx context.Context, inputState *mel.State, parentChainHeader *types.Header, - dataProviders []daprovider.Reader, + dataProviders *daprovider.ReaderRegistry, delayedMsgDatabase DelayedMessageDatabase, txsFetcher TransactionsFetcher, receiptFetcher ReceiptFetcher, diff --git a/arbnode/mel/extraction/message_extraction_function_test.go b/arbnode/mel/extraction/message_extraction_function_test.go index 97b82b4210..2404b947b8 100644 --- a/arbnode/mel/extraction/message_extraction_function_test.go +++ b/arbnode/mel/extraction/message_extraction_function_test.go @@ -31,7 +31,7 @@ func TestExtractMessages(t *testing.T) { lookupDelayedMsgs func(context.Context, *mel.State, *types.Header, ReceiptFetcher, TransactionsFetcher) ([]*mel.DelayedInboxMessage, error) serializer func(context.Context, *mel.SequencerInboxBatch, *types.Transaction, uint, ReceiptFetcher) ([]byte, error) parseReport func(io.Reader) (*big.Int, common.Address, common.Hash, uint64, *big.Int, uint64, error) - parseSequencerMsg func(context.Context, uint64, common.Hash, []byte, []daprovider.Reader, daprovider.KeysetValidationMode) (*arbstate.SequencerMessage, error) + parseSequencerMsg func(context.Context, uint64, common.Hash, []byte, *daprovider.ReaderRegistry, daprovider.KeysetValidationMode) (*arbstate.SequencerMessage, error) extractBatchMessages func(context.Context, *mel.State, *arbstate.SequencerMessage, DelayedMessageDatabase) ([]*arbostypes.MessageWithMetadata, error) expectedError string expectedMsgCount uint64 @@ -319,7 +319,7 @@ func successfulParseSequencerMsg( batchNum uint64, batchBlockHash common.Hash, data []byte, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode, ) (*arbstate.SequencerMessage, error) { return nil, nil @@ -330,7 +330,7 @@ func failingParseSequencerMsg( batchNum uint64, batchBlockHash common.Hash, data []byte, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode, ) (*arbstate.SequencerMessage, error) { return nil, errors.New("failed to parse sequencer message") diff --git a/arbnode/mel/extraction/types.go b/arbnode/mel/extraction/types.go index 1b1de95931..a628e1b9e5 100644 --- a/arbnode/mel/extraction/types.go +++ b/arbnode/mel/extraction/types.go @@ -62,7 +62,7 @@ type sequencerMessageParserFunc func( batchNum uint64, batchBlockHash common.Hash, data []byte, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode, ) (*arbstate.SequencerMessage, error) diff --git a/arbnode/mel/runner/mel.go b/arbnode/mel/runner/mel.go index 9e7134c053..ddbed50c79 100644 --- a/arbnode/mel/runner/mel.go +++ b/arbnode/mel/runner/mel.go @@ -40,7 +40,7 @@ type MessageExtractor struct { addrs *chaininfo.RollupAddresses melDB *Database msgConsumer mel.MessageConsumer - dataProviders []daprovider.Reader + dataProviders *daprovider.ReaderRegistry startParentChainBlockHash common.Hash fsm *fsm.Fsm[action, FSMState] retryInterval time.Duration @@ -54,7 +54,7 @@ func NewMessageExtractor( rollupAddrs *chaininfo.RollupAddresses, melDB *Database, msgConsumer mel.MessageConsumer, - dataProviders []daprovider.Reader, + dataProviders *daprovider.ReaderRegistry, startParentChainBlockHash common.Hash, retryInterval time.Duration, ) (*MessageExtractor, error) { diff --git a/arbnode/mel/runner/mel_test.go b/arbnode/mel/runner/mel_test.go index 89b19cf502..8823576a06 100644 --- a/arbnode/mel/runner/mel_test.go +++ b/arbnode/mel/runner/mel_test.go @@ -42,7 +42,7 @@ func TestMessageExtractor(t *testing.T) { &chaininfo.RollupAddresses{}, melDb, messageConsumer, - []daprovider.Reader{}, + daprovider.NewReaderRegistry(), common.Hash{}, 0, ) diff --git a/arbnode/node.go b/arbnode/node.go index 78e1c46ada..125a94ca73 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -40,7 +40,7 @@ import ( "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/daclient" "github.com/offchainlabs/nitro/daprovider/das" - "github.com/offchainlabs/nitro/daprovider/das/dasserver" + dapserver "github.com/offchainlabs/nitro/daprovider/server" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -560,7 +560,7 @@ func getDAS( dataSigner signature.DataSignerFunc, l1client *ethclient.Client, stack *node.Node, -) (daprovider.Writer, func(), []daprovider.Reader, error) { +) (daprovider.Writer, func(), *daprovider.ReaderRegistry, error) { if config.DAProvider.Enable && config.DataAvailability.Enable { return nil, nil, nil, errors.New("da-provider and data-availability cannot be enabled together") } @@ -589,13 +589,13 @@ func getDAS( } }() - serverConfig := dasserver.DefaultServerConfig + serverConfig := dapserver.DefaultDASServerConfig serverConfig.Port = 0 // Initializes server at a random available port serverConfig.DataAvailability = config.DataAvailability serverConfig.EnableDAWriter = config.BatchPoster.Enable serverConfig.JWTSecret = jwtPath withDAWriter = config.BatchPoster.Enable - dasServer, closeFn, err := dasserver.NewServer(ctx, &serverConfig, dataSigner, l1client, l1Reader, deployInfo.SequencerInbox) + dasServer, closeFn, err := dapserver.NewServerForDAS(ctx, &serverConfig, dataSigner, l1client, l1Reader, deployInfo.SequencerInbox) if err != nil { return nil, nil, nil, err } @@ -620,13 +620,26 @@ func getDAS( if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daClient == nil { return nil, nil, nil, errors.New("data availability service required but unconfigured") } - var dapReaders []daprovider.Reader + + dapReaders := daprovider.NewReaderRegistry() if daClient != nil { - dapReaders = append(dapReaders, daClient) + promise := daClient.GetSupportedHeaderBytes() + result, err := promise.Await(ctx) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get supported header bytes from DA client: %w", err) + } + if err := dapReaders.RegisterAll(result.HeaderBytes, daClient); err != nil { + return nil, nil, nil, fmt.Errorf("failed to register DA client: %w", err) + } } if blobReader != nil { - dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) + if err := dapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(blobReader)); err != nil { + return nil, nil, nil, fmt.Errorf("failed to register blob reader: %w", err) + } } + // AnyTrust now always uses the daClient, which is already registered, + // so we don't need to register it separately here. + if withDAWriter { return daClient, dasServerCloseFn, dapReaders, nil } @@ -637,7 +650,7 @@ func getInboxTrackerAndReader( ctx context.Context, arbDb ethdb.Database, txStreamer *TransactionStreamer, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, config *Config, configFetcher ConfigFetcher, l1client *ethclient.Client, @@ -849,7 +862,7 @@ func getStatelessBlockValidator( txStreamer *TransactionStreamer, exec execution.ExecutionRecorder, arbDb ethdb.Database, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, stack *node.Node, latestWasmModuleRoot common.Hash, ) (*staker.StatelessBlockValidator, error) { @@ -899,7 +912,7 @@ func getBatchPoster( syncMonitor *SyncMonitor, deployInfo *chaininfo.RollupAddresses, parentChainID *big.Int, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, stakerAddr common.Address, ) (*BatchPoster, error) { var batchPoster *BatchPoster diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 022b99e419..a0c308e626 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -51,7 +51,7 @@ const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*SequencerMessage, error) { +func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode) (*SequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -76,40 +76,39 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - // We try to extract payload from the first occurring valid DA reader in the dapReaders list - if len(payload) > 0 { - foundDA := false - var err error - for _, dapReader := range dapReaders { - if dapReader != nil && dapReader.IsValidHeaderByte(ctx, payload[0]) { - payload, _, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) - if err != nil { - // Matches the way keyset validation was done inside DAS readers i.e logging the error - // But other daproviders might just want to return the error - if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(payload[0]) { - if keysetValidationMode == daprovider.KeysetPanicIfInvalid { - panic(err.Error()) - } else { - log.Error(err.Error()) - } + // Use the registry to find the appropriate reader for the header byte + if len(payload) > 0 && dapReaders != nil { + if dapReader, found := dapReaders.GetByHeaderByte(payload[0]); found { + promise := dapReader.RecoverPayload(batchNum, batchBlockHash, data) + result, err := promise.Await(ctx) + if err != nil { + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(payload[0]) { + if keysetValidationMode == daprovider.KeysetPanicIfInvalid { + panic(err.Error()) } else { - return nil, err + log.Error(err.Error()) } + } else { + return nil, err } - if payload == nil { - return parsedMsg, nil - } - foundDA = true - break + } else { + payload = result.Payload } - } - - if !foundDA { + if payload == nil { + return parsedMsg, nil + } + } else { + // No reader found for this header byte - check if it's a known type if daprovider.IsDASMessageHeaderByte(payload[0]) { - log.Error("No DAS Reader configured, but sequencer message found with DAS header") + return nil, fmt.Errorf("no DAS reader configured for DAS message (header byte 0x%02x)", payload[0]) } else if daprovider.IsBlobHashesHeaderByte(payload[0]) { return nil, daprovider.ErrNoBlobReader + } else if daprovider.IsDACertificateMessageHeaderByte(payload[0]) { + return nil, fmt.Errorf("no DACertificate reader configured for certificate message (header byte 0x%02x)", payload[0]) } + // Otherwise it's not a DA message, continue processing } } @@ -167,17 +166,21 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry cachedSequencerMessage *SequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 cachedSegmentTimestamp uint64 cachedSegmentBlockNumber uint64 cachedSubMessageNumber uint64 - keysetValidationMode daprovider.KeysetValidationMode + // keysetValidationMode is used for error handling in ParseSequencerMessage. + // Note: DAS readers now handle validation internally based on their construction-time mode, + // but ParseSequencerMessage still needs this to decide whether to panic or log on validation errors. + // In replay mode, this allows proper error handling based on the position within the message. + keysetValidationMode daprovider.KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders *daprovider.ReaderRegistry, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, diff --git a/cmd/daprovider/daprovider.go b/cmd/daprovider/daprovider.go index c11658c283..c98bc248bb 100644 --- a/cmd/daprovider/daprovider.go +++ b/cmd/daprovider/daprovider.go @@ -12,25 +12,35 @@ import ( "github.com/knadh/koanf/parsers/json" "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" + "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/das" - "github.com/offchainlabs/nitro/daprovider/das/dasserver" + "github.com/offchainlabs/nitro/daprovider/factory" + "github.com/offchainlabs/nitro/daprovider/referenceda" + dapserver "github.com/offchainlabs/nitro/daprovider/server" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" ) type Config struct { - DASServer dasserver.ServerConfig `koanf:"das-server"` + Mode factory.DAProviderMode `koanf:"mode"` + ProviderServer dapserver.ServerConfig `koanf:"provider-server"` WithDataSigner bool `koanf:"with-data-signer"` DataSignerWallet genericconf.WalletConfig `koanf:"data-signer-wallet"` + // Mode-specific configs + Anytrust das.DataAvailabilityConfig `koanf:"anytrust"` + ReferenceDA referenceda.Config `koanf:"referenceda"` + Conf genericconf.ConfConfig `koanf:"conf"` LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` @@ -42,9 +52,12 @@ type Config struct { } var DefaultConfig = Config{ - DASServer: dasserver.DefaultServerConfig, + Mode: "", // Must be explicitly set + ProviderServer: dapserver.DefaultServerConfig, WithDataSigner: false, DataSignerWallet: arbnode.DefaultBatchPosterL1WalletConfig, + Anytrust: das.DefaultDataAvailabilityConfig, + ReferenceDA: referenceda.DefaultConfig, Conf: genericconf.ConfConfigDefault, LogLevel: "INFO", LogType: "plaintext", @@ -61,6 +74,7 @@ func printSampleUsage(progname string) { func parseDAProvider(args []string) (*Config, error) { f := pflag.NewFlagSet("daprovider", pflag.ContinueOnError) + f.String("mode", string(DefaultConfig.Mode), "DA provider mode (anytrust or referenceda) - REQUIRED") f.Bool("with-data-signer", DefaultConfig.WithDataSigner, "set to enable data signing when processing store requests. If enabled requires data-signer-wallet config") genericconf.WalletConfigAddOptions("data-signer-wallet", f, DefaultConfig.DataSignerWallet.Pathname) @@ -73,7 +87,12 @@ func parseDAProvider(args []string) (*Config, error) { f.String("log-level", DefaultConfig.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", DefaultConfig.LogType, "log type (plaintext or json)") - dasserver.ServerConfigAddOptions("das-server", f) + dapserver.ServerConfigAddOptions("provider-server", f) + + // Add mode-specific options + das.DataAvailabilityConfigAddDaserverOptions("anytrust", f) + referenceda.ConfigAddOptions("referenceda", f) + genericconf.ConfConfigAddOptions("conf", f) k, err := confighelpers.BeginCommonParse(f, args) @@ -81,7 +100,7 @@ func parseDAProvider(args []string) (*Config, error) { return nil, err } - if err = das.FixKeysetCLIParsing("das-server.data-availability.rpc-aggregator.backends", k); err != nil { + if err = das.FixKeysetCLIParsing("anytrust.rpc-aggregator.backends", k); err != nil { return nil, err } @@ -92,7 +111,7 @@ func parseDAProvider(args []string) (*Config, error) { if config.Conf.Dump { err = confighelpers.DumpConfig(k, map[string]interface{}{ - "das-server.data-availability.key.priv-key": "", + "anytrust.key.priv-key": "", }) if err != nil { return nil, fmt.Errorf("error removing extra parameters before dump: %w", err) @@ -124,6 +143,12 @@ func startup() error { if err != nil { confighelpers.PrintErrorAndExit(err, printSampleUsage) } + + // Validate mode + if config.Mode == "" { + return errors.New("--mode must be explicitly specified (anytrust or referenceda)") + } + logLevel, err := genericconf.ToSlogLevel(config.LogLevel) if err != nil { confighelpers.PrintErrorAndExit(err, printSampleUsage) @@ -154,50 +179,114 @@ func startup() error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if !config.DASServer.DataAvailability.Enable { - return errors.New("--das-server.data-availability.enable is a required to start a das-server") - } + // Mode-specific validation and setup + var l1Client *ethclient.Client + var l1Reader *headerreader.HeaderReader + var seqInboxAddr common.Address + var dataSigner signature.DataSignerFunc - if config.DASServer.DataAvailability.ParentChainNodeURL == "" || config.DASServer.DataAvailability.ParentChainNodeURL == "none" { - return errors.New("--das-server.data-availability.parent-chain-node-url is a required to start a das-server") - } + if config.Mode == factory.ModeAnyTrust { + if !config.Anytrust.Enable { + return errors.New("--anytrust.enable is required to start an AnyTrust provider server") + } + + if config.Anytrust.ParentChainNodeURL == "" || config.Anytrust.ParentChainNodeURL == "none" { + return errors.New("--anytrust.parent-chain-node-url is required to start an AnyTrust provider server") + } + + if config.Anytrust.SequencerInboxAddress == "" || config.Anytrust.SequencerInboxAddress == "none" { + return errors.New("--anytrust.sequencer-inbox-address must be set to a valid L1 contract address") + } + + l1Client, err = das.GetL1Client(ctx, config.Anytrust.ParentChainConnectionAttempts, config.Anytrust.ParentChainNodeURL) + if err != nil { + return err + } + + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) + if err != nil { + return err + } - if config.DASServer.DataAvailability.SequencerInboxAddress == "" || config.DASServer.DataAvailability.SequencerInboxAddress == "none" { - return errors.New("sequencer-inbox-address must be set to a valid L1 URL and contract address") + seqInboxAddrPtr, err := das.OptionalAddressFromString(config.Anytrust.SequencerInboxAddress) + if err != nil { + return err + } + if seqInboxAddrPtr == nil { + return errors.New("must provide --anytrust.sequencer-inbox-address set to a valid contract address") + } + seqInboxAddr = *seqInboxAddrPtr + + if config.WithDataSigner && config.ProviderServer.EnableDAWriter { + l1ChainId, err := l1Client.ChainID(ctx) + if err != nil { + return fmt.Errorf("couldn't read L1 chainid: %w", err) + } + if _, dataSigner, err = util.OpenWallet("data-signer", &config.DataSignerWallet, l1ChainId); err != nil { + return err + } + } + } else if config.Mode == factory.ModeReferenceDA { + if !config.ReferenceDA.Enable { + return errors.New("--referenceda.enable is required to start a ReferenceDA provider server") + } } - l1Client, err := das.GetL1Client(ctx, config.DASServer.DataAvailability.ParentChainConnectionAttempts, config.DASServer.DataAvailability.ParentChainNodeURL) + // Create DA provider factory based on mode + providerFactory, err := factory.NewDAProviderFactory( + config.Mode, + &config.Anytrust, + &config.ReferenceDA, + dataSigner, + l1Client, + l1Reader, + seqInboxAddr, + config.ProviderServer.EnableDAWriter, + ) if err != nil { return err } - arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) - if err != nil { + if err := providerFactory.ValidateConfig(); err != nil { return err } - seqInboxAddr, err := das.OptionalAddressFromString(config.DASServer.DataAvailability.SequencerInboxAddress) + // Create reader/writer/validator using factory + var cleanupFuncs []func() + + reader, readerCleanup, err := providerFactory.CreateReader(ctx) if err != nil { return err } - if seqInboxAddr == nil { - return errors.New("must provide --das-server.data-availability.sequencer-inbox-address set to a valid contract address or 'none'") + if readerCleanup != nil { + cleanupFuncs = append(cleanupFuncs, readerCleanup) } - var dataSigner signature.DataSignerFunc - if config.WithDataSigner && config.DASServer.EnableDAWriter { - l1ChainId, err := l1Client.ChainID(ctx) + var writer daprovider.Writer + if config.ProviderServer.EnableDAWriter { + var writerCleanup func() + writer, writerCleanup, err = providerFactory.CreateWriter(ctx) if err != nil { - return fmt.Errorf("couldn't read L1 chainid: %w", err) - } - if _, dataSigner, err = util.OpenWallet("data-signer", &config.DataSignerWallet, l1ChainId); err != nil { return err } + if writerCleanup != nil { + cleanupFuncs = append(cleanupFuncs, writerCleanup) + } + } + + // Create validator (may be nil for AnyTrust mode) + validator, validatorCleanup, err := providerFactory.CreateValidator(ctx) + if err != nil { + return err + } + if validatorCleanup != nil { + cleanupFuncs = append(cleanupFuncs, validatorCleanup) } - log.Info("Starting json rpc server", "addr", config.DASServer.Addr, "port", config.DASServer.Port) - dasServer, closeFn, err := dasserver.NewServer(ctx, &config.DASServer, dataSigner, l1Client, l1Reader, *seqInboxAddr) + log.Info("Starting json rpc server", "mode", config.Mode, "addr", config.ProviderServer.Addr, "port", config.ProviderServer.Port) + headerBytes := providerFactory.GetSupportedHeaderBytes() + providerServer, err := dapserver.NewServerWithDAPProvider(ctx, &config.ProviderServer, reader, writer, validator, headerBytes) if err != nil { return err } @@ -208,12 +297,15 @@ func startup() error { <-sigint - if err = dasServer.Shutdown(ctx); err != nil { + if err = providerServer.Shutdown(ctx); err != nil { return err } - if closeFn != nil { - closeFn() + + // Call all cleanup functions + for _, cleanup := range cleanupFuncs { + cleanup() } + if l1Reader != nil && l1Reader.Started() { l1Reader.StopAndWait() } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 15a56e8858..69d93a192d 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -240,11 +240,17 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = daprovider.KeysetDontValidate } - var dapReaders []daprovider.Reader + dapReaders := daprovider.NewReaderRegistry() if dasReader != nil { - dapReaders = append(dapReaders, dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher)) + err = dapReaders.SetupDASReader(dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher, keysetValidationMode)) + if err != nil { + panic(fmt.Sprintf("Failed to register DAS reader: %v", err)) + } + } + err = dapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) + if err != nil { + panic(fmt.Sprintf("Failed to register blob reader: %v", err)) } - dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) diff --git a/daprovider/daclient/daclient.go b/daprovider/daclient/daclient.go index 47f8b342c1..1782e79740 100644 --- a/daprovider/daclient/daclient.go +++ b/daprovider/daclient/daclient.go @@ -1,3 +1,6 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + package daclient import ( @@ -8,9 +11,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/server_api" + "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/rpcclient" ) @@ -49,44 +53,57 @@ func NewClient(ctx context.Context, config rpcclient.ClientConfigFetcher) (*Clie return client, nil } -// IsValidHeaderByteResult is the result struct that data availability providers should use to respond if the given headerByte corresponds to their DA service -type IsValidHeaderByteResult struct { - IsValid bool `json:"is-valid,omitempty"` -} - -func (c *Client) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - var isValidHeaderByteResult IsValidHeaderByteResult - if err := c.CallContext(ctx, &isValidHeaderByteResult, "daprovider_isValidHeaderByte", headerByte); err != nil { - log.Error("Error returned from daprovider_isValidHeaderByte rpc method, defaulting to result as false", "err", err) - return false - } - return isValidHeaderByteResult.IsValid +type SupportedHeaderBytesResult struct { + HeaderBytes []byte } -// RecoverPayloadFromBatchResult is the result struct that data availability providers should use to respond with underlying payload and updated preimages map to a RecoverPayloadFromBatch fetch request -type RecoverPayloadFromBatchResult struct { - Payload hexutil.Bytes `json:"payload,omitempty"` - Preimages daprovider.PreimagesMap `json:"preimages,omitempty"` +func (c *Client) GetSupportedHeaderBytes() containers.PromiseInterface[SupportedHeaderBytesResult] { + promise, ctx := containers.NewPromiseWithContext[SupportedHeaderBytesResult](context.Background()) + go func() { + var result server_api.SupportedHeaderBytesResult + if err := c.CallContext(ctx, &result, "daprovider_getSupportedHeaderBytes"); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_getSupportedHeaderBytes rpc method: %w", err)) + } else { + promise.Produce(SupportedHeaderBytesResult{HeaderBytes: result.HeaderBytes}) + } + }() + return promise } -func (c *Client) RecoverPayloadFromBatch( - ctx context.Context, +// RecoverPayload fetches the underlying payload from the DA provider +func (c *Client) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages daprovider.PreimagesMap, - validateSeqMsg bool, -) ([]byte, daprovider.PreimagesMap, error) { - var recoverPayloadFromBatchResult RecoverPayloadFromBatchResult - if err := c.CallContext(ctx, &recoverPayloadFromBatchResult, "daprovider_recoverPayloadFromBatch", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg), preimages, validateSeqMsg); err != nil { - return nil, nil, fmt.Errorf("error returned from daprovider_recoverPayloadFromBatch rpc method, err: %w", err) - } - return recoverPayloadFromBatchResult.Payload, recoverPayloadFromBatchResult.Preimages, nil +) containers.PromiseInterface[daprovider.PayloadResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PayloadResult](context.Background()) + go func() { + var result daprovider.PayloadResult + if err := c.CallContext(ctx, &result, "daprovider_recoverPayload", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg)); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_recoverPayload rpc method, err: %w", err)) + } else { + promise.Produce(result) + } + }() + return promise } -// StoreResult is the result struct that data availability providers should use to respond with a commitment to a Store request for posting batch data to their DA service -type StoreResult struct { - SerializedDACert hexutil.Bytes `json:"serialized-da-cert,omitempty"` +// CollectPreimages collects preimages from the DA provider +func (c *Client) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PreimagesResult](context.Background()) + go func() { + var result daprovider.PreimagesResult + if err := c.CallContext(ctx, &result, "daprovider_collectPreimages", hexutil.Uint64(batchNum), batchBlockHash, hexutil.Bytes(sequencerMsg)); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_collectPreimages rpc method, err: %w", err)) + } else { + promise.Produce(result) + } + }() + return promise } func (c *Client) Store( @@ -95,9 +112,43 @@ func (c *Client) Store( timeout uint64, disableFallbackStoreDataOnChain bool, ) ([]byte, error) { - var storeResult StoreResult + var storeResult server_api.StoreResult if err := c.CallContext(ctx, &storeResult, "daprovider_store", hexutil.Bytes(message), hexutil.Uint64(timeout), disableFallbackStoreDataOnChain); err != nil { return nil, fmt.Errorf("error returned from daprovider_store rpc method, err: %w", err) } return storeResult.SerializedDACert, nil } + +// GenerateReadPreimageProof generates a proof for a specific preimage at a given offset +// This method calls the external DA provider's RPC endpoint to generate the proof +func (c *Client) GenerateReadPreimageProof( + certHash common.Hash, + offset uint64, + certificate []byte, +) containers.PromiseInterface[daprovider.PreimageProofResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PreimageProofResult](context.Background()) + go func() { + var generateProofResult server_api.GenerateReadPreimageProofResult + if err := c.CallContext(ctx, &generateProofResult, "daprovider_generateReadPreimageProof", certHash, hexutil.Uint64(offset), hexutil.Bytes(certificate)); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_generateProof rpc method, err: %w", err)) + } else { + promise.Produce(daprovider.PreimageProofResult{Proof: generateProofResult.Proof}) + } + }() + return promise +} + +func (c *Client) GenerateCertificateValidityProof( + certificate []byte, +) containers.PromiseInterface[daprovider.ValidityProofResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.ValidityProofResult](context.Background()) + go func() { + var generateCertificateValidityProofResult server_api.GenerateCertificateValidityProofResult + if err := c.CallContext(ctx, &generateCertificateValidityProofResult, "daprovider_generateCertificateValidityProof", hexutil.Bytes(certificate)); err != nil { + promise.ProduceError(fmt.Errorf("error returned from daprovider_generateCertificateValidityProof rpc method, err: %w", err)) + } else { + promise.Produce(daprovider.ValidityProofResult{Proof: generateCertificateValidityProofResult.Proof}) + } + }() + return promise +} diff --git a/daprovider/das/dasutil/dasutil.go b/daprovider/das/dasutil/dasutil.go index 7c24508768..286fb8e026 100644 --- a/daprovider/das/dasutil/dasutil.go +++ b/daprovider/das/dasutil/dasutil.go @@ -20,6 +20,7 @@ import ( "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/daprovider/das/dastree" + "github.com/offchainlabs/nitro/util/containers" ) type DASReader interface { @@ -40,31 +41,67 @@ type DASKeysetFetcher interface { // NewReaderForDAS is generally meant to be only used by nitro. // DA Providers should implement methods in the Reader interface independently -func NewReaderForDAS(dasReader DASReader, keysetFetcher DASKeysetFetcher) *readerForDAS { +func NewReaderForDAS(dasReader DASReader, keysetFetcher DASKeysetFetcher, validationMode daprovider.KeysetValidationMode) *readerForDAS { return &readerForDAS{ - dasReader: dasReader, - keysetFetcher: keysetFetcher, + dasReader: dasReader, + keysetFetcher: keysetFetcher, + validationMode: validationMode, } } type readerForDAS struct { - dasReader DASReader - keysetFetcher DASKeysetFetcher + dasReader DASReader + keysetFetcher DASKeysetFetcher + validationMode daprovider.KeysetValidationMode } -func (d *readerForDAS) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - return daprovider.IsDASMessageHeaderByte(headerByte) +// recoverInternal is the shared implementation for both RecoverPayload and CollectPreimages +func (d *readerForDAS) recoverInternal( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + needPayload bool, + needPreimages bool, +) ([]byte, daprovider.PreimagesMap, error) { + // Convert validation mode to boolean for the internal function + validateSeqMsg := d.validationMode != daprovider.KeysetDontValidate + return recoverPayloadFromDasBatchInternal(ctx, batchNum, sequencerMsg, d.dasReader, d.keysetFetcher, validateSeqMsg, needPayload, needPreimages) } -func (d *readerForDAS) RecoverPayloadFromBatch( - ctx context.Context, +// RecoverPayload fetches the underlying payload from the DA provider +func (d *readerForDAS) RecoverPayload( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages daprovider.PreimagesMap, - validateSeqMsg bool, -) ([]byte, daprovider.PreimagesMap, error) { - return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, d.keysetFetcher, preimages, validateSeqMsg) +) containers.PromiseInterface[daprovider.PayloadResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PayloadResult](context.Background()) + go func() { + payload, _, err := d.recoverInternal(ctx, batchNum, sequencerMsg, true, false) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PayloadResult{Payload: payload}) + } + }() + return promise +} + +// CollectPreimages collects preimages from the DA provider +func (d *readerForDAS) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PreimagesResult](context.Background()) + go func() { + _, preimages, err := d.recoverInternal(ctx, batchNum, sequencerMsg, false, true) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PreimagesResult{Preimages: preimages}) + } + }() + return promise } // NewWriterForDAS is generally meant to be only used by nitro. @@ -99,6 +136,7 @@ var ( const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week +// RecoverPayloadFromDasBatch is deprecated, use recoverPayloadFromDasBatchInternal func RecoverPayloadFromDasBatch( ctx context.Context, batchNum uint64, @@ -108,8 +146,41 @@ func RecoverPayloadFromDasBatch( preimages daprovider.PreimagesMap, validateSeqMsg bool, ) ([]byte, daprovider.PreimagesMap, error) { + needPreimages := preimages != nil + payload, recoveredPreimages, err := recoverPayloadFromDasBatchInternal(ctx, batchNum, sequencerMsg, dasReader, keysetFetcher, validateSeqMsg, true, needPreimages) + if err != nil { + return nil, nil, err + } + // If preimages were passed in, copy recovered preimages into the provided map + if preimages != nil && recoveredPreimages != nil { + for piType, piMap := range recoveredPreimages { + if preimages[piType] == nil { + preimages[piType] = make(map[common.Hash][]byte) + } + for hash, preimage := range piMap { + preimages[piType][hash] = preimage + } + } + return payload, preimages, nil + } + return payload, recoveredPreimages, nil +} + +// recoverPayloadFromDasBatchInternal is the shared implementation +func recoverPayloadFromDasBatchInternal( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + dasReader DASReader, + keysetFetcher DASKeysetFetcher, + validateSeqMsg bool, + needPayload bool, + needPreimages bool, +) ([]byte, daprovider.PreimagesMap, error) { + var preimages daprovider.PreimagesMap var preimageRecorder daprovider.PreimageRecorder - if preimages != nil { + if needPreimages { + preimages = make(daprovider.PreimagesMap) preimageRecorder = daprovider.RecordPreimagesTo(preimages) } cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) @@ -178,13 +249,17 @@ func RecoverPayloadFromDasBatch( } dataHash := cert.DataHash - payload, err := getByHash(ctx, dataHash) - if err != nil { - log.Error("Couldn't fetch DAS batch contents", "err", err) - return nil, nil, err + var payload []byte + // We need to fetch the payload if either we need to return it or need to record preimages + if needPayload || needPreimages { + payload, err = getByHash(ctx, dataHash) + if err != nil { + log.Error("Couldn't fetch DAS batch contents", "err", err) + return nil, nil, err + } } - if preimageRecorder != nil { + if preimageRecorder != nil && payload != nil { if version == 0 { treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) preimageRecorder(dataHash, payload, arbutil.Keccak256PreimageType) diff --git a/daprovider/factory/factory.go b/daprovider/factory/factory.go new file mode 100644 index 0000000000..c581786069 --- /dev/null +++ b/daprovider/factory/factory.go @@ -0,0 +1,242 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package factory + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/das" + "github.com/offchainlabs/nitro/daprovider/das/dasutil" + "github.com/offchainlabs/nitro/daprovider/referenceda" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/signature" +) + +type DAProviderMode string + +const ( + ModeAnyTrust DAProviderMode = "anytrust" + ModeReferenceDA DAProviderMode = "referenceda" +) + +type DAProviderFactory interface { + CreateReader(ctx context.Context) (daprovider.Reader, func(), error) + CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) + CreateValidator(ctx context.Context) (daprovider.Validator, func(), error) + ValidateConfig() error + GetSupportedHeaderBytes() []byte +} + +type AnyTrustFactory struct { + config *das.DataAvailabilityConfig + dataSigner signature.DataSignerFunc + l1Client *ethclient.Client + l1Reader *headerreader.HeaderReader + seqInboxAddr common.Address + enableWriter bool +} + +type ReferenceDAFactory struct { + config *referenceda.Config + enableWriter bool + dataSigner signature.DataSignerFunc + l1Client *ethclient.Client +} + +func NewDAProviderFactory( + mode DAProviderMode, + anytrust *das.DataAvailabilityConfig, + referencedaCfg *referenceda.Config, + dataSigner signature.DataSignerFunc, + l1Client *ethclient.Client, + l1Reader *headerreader.HeaderReader, + seqInboxAddr common.Address, + enableWriter bool, +) (DAProviderFactory, error) { + switch mode { + case ModeAnyTrust: + return &AnyTrustFactory{ + config: anytrust, + dataSigner: dataSigner, + l1Client: l1Client, + l1Reader: l1Reader, + seqInboxAddr: seqInboxAddr, + enableWriter: enableWriter, + }, nil + case ModeReferenceDA: + factory := &ReferenceDAFactory{ + config: referencedaCfg, + enableWriter: enableWriter, + dataSigner: dataSigner, + l1Client: l1Client, + } + return factory, nil + default: + return nil, fmt.Errorf("unsupported DA provider mode: %s", mode) + } +} + +// AnyTrust Factory Implementation +func (f *AnyTrustFactory) GetSupportedHeaderBytes() []byte { + // Support both DAS without tree flag (0x80) and with tree flag (0x88) + return []byte{ + daprovider.DASMessageHeaderFlag, + daprovider.DASMessageHeaderFlag | daprovider.TreeDASMessageHeaderFlag, + } +} + +func (f *AnyTrustFactory) ValidateConfig() error { + if !f.config.Enable { + return errors.New("anytrust data availability must be enabled") + } + + if f.enableWriter { + if !f.config.RPCAggregator.Enable || !f.config.RestAggregator.Enable { + return errors.New("rpc-aggregator.enable and rest-aggregator.enable must be set when running writer mode") + } + } else { + if f.config.RPCAggregator.Enable { + return errors.New("rpc-aggregator is only for writer mode") + } + if !f.config.RestAggregator.Enable { + return errors.New("rest-aggregator.enable must be set for reader mode") + } + } + + return nil +} + +func (f *AnyTrustFactory) CreateReader(ctx context.Context) (daprovider.Reader, func(), error) { + var daReader dasutil.DASReader + var keysetFetcher *das.KeysetFetcher + var lifecycleManager *das.LifecycleManager + var err error + + if f.enableWriter { + _, daReader, keysetFetcher, lifecycleManager, err = das.CreateDAReaderAndWriter( + ctx, f.config, f.dataSigner, f.l1Client, f.seqInboxAddr) + } else { + daReader, keysetFetcher, lifecycleManager, err = das.CreateDAReader( + ctx, f.config, f.l1Reader, &f.seqInboxAddr) + } + + if err != nil { + return nil, nil, err + } + + daReader = das.NewReaderTimeoutWrapper(daReader, f.config.RequestTimeout) + if f.config.PanicOnError { + daReader = das.NewReaderPanicWrapper(daReader) + } + + reader := dasutil.NewReaderForDAS(daReader, keysetFetcher, daprovider.KeysetValidate) + cleanupFn := func() { + if lifecycleManager != nil { + lifecycleManager.StopAndWaitUntil(0) + } + } + return reader, cleanupFn, nil +} + +func (f *AnyTrustFactory) CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) { + if !f.enableWriter { + return nil, nil, nil + } + + daWriter, _, _, lifecycleManager, err := das.CreateDAReaderAndWriter( + ctx, f.config, f.dataSigner, f.l1Client, f.seqInboxAddr) + if err != nil { + return nil, nil, err + } + + if f.config.PanicOnError { + daWriter = das.NewWriterPanicWrapper(daWriter) + } + + writer := dasutil.NewWriterForDAS(daWriter) + cleanupFn := func() { + if lifecycleManager != nil { + lifecycleManager.StopAndWaitUntil(0) + } + } + return writer, cleanupFn, nil +} + +func (f *AnyTrustFactory) CreateValidator(ctx context.Context) (daprovider.Validator, func(), error) { + // AnyTrust doesn't use the Validator interface + return nil, nil, nil +} + +// ReferenceDA Factory Implementation +func (f *ReferenceDAFactory) GetSupportedHeaderBytes() []byte { + return []byte{daprovider.DACertificateMessageHeaderFlag} +} + +func (f *ReferenceDAFactory) ValidateConfig() error { + if !f.config.Enable { + return errors.New("referenceda must be enabled") + } + return nil +} + +func (f *ReferenceDAFactory) CreateReader(ctx context.Context) (daprovider.Reader, func(), error) { + if f.config.ValidatorContract == "" { + return nil, nil, errors.New("validator-contract address not configured for reference DA reader") + } + validatorAddr := common.HexToAddress(f.config.ValidatorContract) + storage := referenceda.GetInMemoryStorage() + reader := referenceda.NewReader(storage, f.l1Client, validatorAddr) + return reader, nil, nil +} + +func (f *ReferenceDAFactory) CreateWriter(ctx context.Context) (daprovider.Writer, func(), error) { + if !f.enableWriter { + return nil, nil, nil + } + + if f.dataSigner == nil { + // Try to create signer from config + var signer signature.DataSignerFunc + if f.config.SigningKey.PrivateKey != "" { + privKey, err := crypto.HexToECDSA(f.config.SigningKey.PrivateKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid private key: %w", err) + } + signer = signature.DataSignerFromPrivateKey(privKey) + } else if f.config.SigningKey.KeyFile != "" { + keyData, err := os.ReadFile(f.config.SigningKey.KeyFile) + if err != nil { + return nil, nil, fmt.Errorf("failed to read key file: %w", err) + } + privKey, err := crypto.HexToECDSA(strings.TrimSpace(string(keyData))) + if err != nil { + return nil, nil, fmt.Errorf("invalid private key in file: %w", err) + } + signer = signature.DataSignerFromPrivateKey(privKey) + } else { + return nil, nil, errors.New("no signing key configured for reference DA writer") + } + f.dataSigner = signer + } + + writer := referenceda.NewWriter(f.dataSigner) + return writer, nil, nil +} + +func (f *ReferenceDAFactory) CreateValidator(ctx context.Context) (daprovider.Validator, func(), error) { + if f.config.ValidatorContract == "" { + return nil, nil, errors.New("validator-contract address not configured for reference DA validator") + } + validatorAddr := common.HexToAddress(f.config.ValidatorContract) + return referenceda.NewValidator(f.l1Client, validatorAddr), nil, nil +} diff --git a/daprovider/reader.go b/daprovider/reader.go index 5cfe6f9718..35f21b56bf 100644 --- a/daprovider/reader.go +++ b/daprovider/reader.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package daprovider @@ -6,27 +6,54 @@ package daprovider import ( "context" "fmt" + "strings" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/util/containers" ) +// CertificateValidationError represents an error in certificate validation +type CertificateValidationError struct { + Reason string +} + +func (e *CertificateValidationError) Error() string { + return e.Reason +} + +// IsCertificateValidationError checks if an error is a certificate validation error +func IsCertificateValidationError(err error) bool { + return err != nil && strings.Contains(err.Error(), "certificate validation failed") +} + +// PayloadResult contains the recovered payload data +type PayloadResult struct { + Payload []byte +} + +// PreimagesResult contains the collected preimages +type PreimagesResult struct { + Preimages PreimagesMap +} + type Reader interface { - // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider - IsValidHeaderByte(ctx context.Context, headerByte byte) bool + // RecoverPayload fetches the underlying payload from the DA provider given the batch header information + RecoverPayload( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + ) containers.PromiseInterface[PayloadResult] - // RecoverPayloadFromBatch fetches the underlying payload and a map of preimages from the DA provider given the batch header information - RecoverPayloadFromBatch( - ctx context.Context, + // CollectPreimages collects preimages from the DA provider given the batch header information + CollectPreimages( batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages PreimagesMap, - validateSeqMsg bool, - ) ([]byte, PreimagesMap, error) + ) containers.PromiseInterface[PreimagesResult] } // NewReaderForBlobReader is generally meant to be only used by nitro. @@ -39,17 +66,14 @@ type readerForBlobReader struct { blobReader BlobReader } -func (b *readerForBlobReader) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { - return IsBlobHashesHeaderByte(headerByte) -} - -func (b *readerForBlobReader) RecoverPayloadFromBatch( +// recoverInternal is the shared implementation for both RecoverPayload and CollectPreimages +func (b *readerForBlobReader) recoverInternal( ctx context.Context, batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages PreimagesMap, - validateSeqMsg bool, + needPayload bool, + needPreimages bool, ) ([]byte, PreimagesMap, error) { blobHashes := sequencerMsg[41:] if len(blobHashes)%len(common.Hash{}) != 0 { @@ -63,7 +87,10 @@ func (b *readerForBlobReader) RecoverPayloadFromBatch( if err != nil { return nil, nil, fmt.Errorf("failed to get blobs: %w", err) } - if preimages != nil { + + var preimages PreimagesMap + if needPreimages { + preimages = make(PreimagesMap) preimageRecorder := RecordPreimagesTo(preimages) for i, blob := range kzgBlobs { // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable @@ -72,10 +99,51 @@ func (b *readerForBlobReader) RecoverPayloadFromBatch( preimageRecorder(versionedHashes[i], b[:], arbutil.EthVersionedHashPreimageType) } } - payload, err := blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return nil, nil, nil + + var payload []byte + if needPayload { + payload, err = blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil, nil + } } + return payload, preimages, nil } + +// RecoverPayload fetches the underlying payload from the DA provider +func (b *readerForBlobReader) RecoverPayload( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[PayloadResult] { + promise, ctx := containers.NewPromiseWithContext[PayloadResult](context.Background()) + go func() { + payload, _, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, true, false) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(PayloadResult{Payload: payload}) + } + }() + return promise +} + +// CollectPreimages collects preimages from the DA provider +func (b *readerForBlobReader) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[PreimagesResult] { + promise, ctx := containers.NewPromiseWithContext[PreimagesResult](context.Background()) + go func() { + _, preimages, err := b.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, false, true) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(PreimagesResult{Preimages: preimages}) + } + }() + return promise +} diff --git a/daprovider/referenceda/certificate.go b/daprovider/referenceda/certificate.go new file mode 100644 index 0000000000..91d05babf3 --- /dev/null +++ b/daprovider/referenceda/certificate.go @@ -0,0 +1,117 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package referenceda + +import ( + "crypto/sha256" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/util/signature" +) + +// Certificate represents a ReferenceDA certificate with signature +type Certificate struct { + Header byte + DataHash [32]byte + V uint8 + R [32]byte + S [32]byte +} + +// NewCertificate creates a certificate from data and signs it +func NewCertificate(data []byte, signer signature.DataSignerFunc) (*Certificate, error) { + dataHash := sha256.Sum256(data) + + sig, err := signer(dataHash[:]) + if err != nil { + return nil, fmt.Errorf("failed to sign data hash: %w", err) + } + + cert := &Certificate{ + Header: daprovider.DACertificateMessageHeaderFlag, + DataHash: dataHash, + V: sig[64] + 27, + } + copy(cert.R[:], sig[0:32]) + copy(cert.S[:], sig[32:64]) + + return cert, nil +} + +// Serialize converts certificate to bytes (98 bytes total) +func (c *Certificate) Serialize() []byte { + result := make([]byte, 98) + result[0] = c.Header + copy(result[1:33], c.DataHash[:]) + result[33] = c.V + copy(result[34:66], c.R[:]) + copy(result[66:98], c.S[:]) + return result +} + +// Deserialize creates a certificate from bytes +func Deserialize(data []byte) (*Certificate, error) { + if len(data) != 98 { + return nil, fmt.Errorf("invalid certificate length: expected 98, got %d", len(data)) + } + + cert := &Certificate{ + Header: data[0], + V: data[33], + } + copy(cert.DataHash[:], data[1:33]) + copy(cert.R[:], data[34:66]) + copy(cert.S[:], data[66:98]) + + if cert.Header != daprovider.DACertificateMessageHeaderFlag { + return nil, fmt.Errorf("invalid certificate header: %x", cert.Header) + } + + return cert, nil +} + +// RecoverSigner recovers the signer address from the certificate +func (c *Certificate) RecoverSigner() (common.Address, error) { + if c.V < 27 { + return common.Address{}, fmt.Errorf("invalid signature V value: %d (must be >= 27)", c.V) + } + + sig := make([]byte, 65) + copy(sig[0:32], c.R[:]) + copy(sig[32:64], c.S[:]) + sig[64] = c.V - 27 + + pubKey, err := crypto.SigToPub(c.DataHash[:], sig) + if err != nil { + return common.Address{}, fmt.Errorf("failed to recover signer: %w", err) + } + + return crypto.PubkeyToAddress(*pubKey), nil +} + +// ValidateWithContract checks if the certificate is signed by a trusted signer using the contract +// TODO: Uncomment the following once we have merged customda contracts changes. +/* +func (c *Certificate) ValidateWithContract(validator *ospgen.ReferenceDAProofValidator, opts *bind.CallOpts) error { + signer, err := c.RecoverSigner() + if err != nil { + return err + } + + isTrusted, err := validator.TrustedSigners(opts, signer) + if err != nil { + return fmt.Errorf("failed to check trusted signer: %w", err) + } + + if !isTrusted { + return fmt.Errorf("certificate signed by untrusted signer: %s", signer.Hex()) + } + + return nil + } +*/ diff --git a/daprovider/referenceda/config.go b/daprovider/referenceda/config.go new file mode 100644 index 0000000000..6f0ad5c76d --- /dev/null +++ b/daprovider/referenceda/config.go @@ -0,0 +1,41 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + flag "github.com/spf13/pflag" +) + +type Config struct { + Enable bool `koanf:"enable"` + SigningKey SigningKeyConfig `koanf:"signing-key"` + ValidatorContract string `koanf:"validator-contract"` +} + +type SigningKeyConfig struct { + PrivateKey string `koanf:"private-key"` + KeyFile string `koanf:"key-file"` +} + +var DefaultSigningKeyConfig = SigningKeyConfig{ + PrivateKey: "", + KeyFile: "", +} + +var DefaultConfig = Config{ + Enable: false, + SigningKey: DefaultSigningKeyConfig, + ValidatorContract: "", +} + +func SigningKeyConfigAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".private-key", DefaultSigningKeyConfig.PrivateKey, "hex-encoded private key for signing certificates") + f.String(prefix+".key-file", DefaultSigningKeyConfig.KeyFile, "path to file containing private key") +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultConfig.Enable, "enable reference DA provider implementation") + SigningKeyConfigAddOptions(prefix+".signing-key", f) + f.String(prefix+".validator-contract", DefaultConfig.ValidatorContract, "address of the ReferenceDAProofValidator contract") +} diff --git a/daprovider/referenceda/reference_reader.go b/daprovider/referenceda/reference_reader.go new file mode 100644 index 0000000000..8ebeb14a60 --- /dev/null +++ b/daprovider/referenceda/reference_reader.go @@ -0,0 +1,154 @@ +// Copyright 2021-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "crypto/sha256" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/util/containers" +) + +// Reader implements the daprovider.Reader interface for ReferenceDA +type Reader struct { + storage *InMemoryStorage + l1Client *ethclient.Client + validatorAddr common.Address +} + +// NewReader creates a new ReferenceDA reader +func NewReader(storage *InMemoryStorage, l1Client *ethclient.Client, validatorAddr common.Address) *Reader { + return &Reader{ + storage: storage, + l1Client: l1Client, + validatorAddr: validatorAddr, + } +} + +// recoverInternal is the shared implementation for both RecoverPayload and CollectPreimages +func (r *Reader) recoverInternal( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + needPayload bool, + needPreimages bool, +) ([]byte, daprovider.PreimagesMap, error) { + if len(sequencerMsg) <= 40 { + return nil, nil, fmt.Errorf("sequencer message too small") + } + + // Skip the 40-byte L1 header and get the certificate + certBytes := sequencerMsg[40:] + + // Deserialize certificate + cert, err := Deserialize(certBytes) + if err != nil { + return nil, nil, fmt.Errorf("failed to deserialize certificate: %w", err) + } + + // Validate certificate - always validate for ReferenceDA + // TODO: Uncomment the following once we have merged customda contracts changes. + /* + // Create contract binding + validator, err := ospgen.NewReferenceDAProofValidator(r.validatorAddr, r.l1Client) + if err != nil { + return nil, nil, fmt.Errorf("failed to create validator binding: %w", err) + } + + // Validate using contract + callOpts := &bind.CallOpts{Context: ctx} + err = cert.ValidateWithContract(validator, callOpts) + if err != nil { + return nil, nil, fmt.Errorf("certificate validation failed: %w", err) + } + */ + + log.Debug("ReferenceDA reader extracting hash", + "certificateLen", len(certBytes), + "sha256Hash", common.Hash(cert.DataHash).Hex(), + "certificateHex", fmt.Sprintf("0x%x", certBytes)) + + // Retrieve the data from storage using the hash + var payload []byte + if needPayload || needPreimages { + payload, err = r.storage.GetByHash(ctx, common.BytesToHash(cert.DataHash[:])) + if err != nil { + return nil, nil, fmt.Errorf("failed to retrieve data from storage: %w", err) + } + if payload == nil { + return nil, nil, fmt.Errorf("data not found in storage for hash %s", common.Hash(cert.DataHash).Hex()) + } + + // Verify data matches certificate hash (SHA256) + actualHash := sha256.Sum256(payload) + if actualHash != cert.DataHash { + return nil, nil, fmt.Errorf("data hash mismatch: expected %s, got %s", common.Hash(cert.DataHash).Hex(), common.Hash(actualHash).Hex()) + } + } + + // Record preimages if needed + var preimages daprovider.PreimagesMap + if needPreimages && payload != nil { + preimages = make(daprovider.PreimagesMap) + preimageRecorder := daprovider.RecordPreimagesTo(preimages) + + // Record the mapping from certificate hash to actual payload data + // This is what the replay binary expects: keccak256(certificate) -> payload + certHash := crypto.Keccak256Hash(certBytes) + preimageRecorder(certHash, payload, arbutil.DACertificatePreimageType) + } + + log.Debug("ReferenceDA batch recovery completed", + "batchNum", batchNum, + "blockHash", batchBlockHash, + "sha256", common.Hash(cert.DataHash).Hex(), + "payloadSize", len(payload)) + + return payload, preimages, nil +} + +// RecoverPayload fetches the underlying payload from the DA provider +func (r *Reader) RecoverPayload( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[daprovider.PayloadResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PayloadResult](context.Background()) + go func() { + payload, _, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, true, false) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PayloadResult{Payload: payload}) + } + }() + return promise +} + +// CollectPreimages collects preimages from the DA provider +func (r *Reader) CollectPreimages( + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, +) containers.PromiseInterface[daprovider.PreimagesResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PreimagesResult](context.Background()) + go func() { + _, preimages, err := r.recoverInternal(ctx, batchNum, batchBlockHash, sequencerMsg, false, true) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PreimagesResult{Preimages: preimages}) + } + }() + return promise +} diff --git a/daprovider/referenceda/reference_validator.go b/daprovider/referenceda/reference_validator.go new file mode 100644 index 0000000000..432dde47ab --- /dev/null +++ b/daprovider/referenceda/reference_validator.go @@ -0,0 +1,153 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "encoding/binary" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/util/containers" +) + +type Validator struct { + storage *InMemoryStorage + l1Client *ethclient.Client + validatorAddr common.Address +} + +func NewValidator(l1Client *ethclient.Client, validatorAddr common.Address) *Validator { + return &Validator{ + storage: GetInMemoryStorage(), + l1Client: l1Client, + validatorAddr: validatorAddr, + } +} + +// GenerateReadPreimageProof creates a ReadPreimage proof for ReferenceDA +// The proof enhancer will prepend the standardized header [certKeccak256, offset, certSize, certificate] +// So we only need to return the custom data: [Version(1), PreimageSize(8), PreimageData] +func (v *Validator) generateReadPreimageProofInternal(ctx context.Context, certHash common.Hash, offset uint64, certificate []byte) ([]byte, error) { + // Deserialize certificate to extract data hash + cert, err := Deserialize(certificate) + if err != nil { + return nil, fmt.Errorf("failed to deserialize certificate: %w", err) + } + + // Extract data hash (SHA256) from certificate + dataHash := cert.DataHash + + // Get preimage from storage using SHA256 hash + preimage, err := v.storage.GetByHash(ctx, dataHash) + if err != nil { + return nil, fmt.Errorf("failed to get preimage: %w", err) + } + if preimage == nil { + return nil, fmt.Errorf("preimage not found for hash %x", dataHash) + } + + // Build custom proof data: [Version(1), PreimageSize(8), PreimageData] + // The certificate is NOT included here as it's already in the standardized header + proof := make([]byte, 1+8+len(preimage)) + proof[0] = 1 // Version + binary.BigEndian.PutUint64(proof[1:9], uint64(len(preimage))) + copy(proof[9:], preimage) + + return proof, nil +} + +// GenerateReadPreimageProof creates a ReadPreimage proof for ReferenceDA +// The proof enhancer will prepend the standardized header [certKeccak256, offset, certSize, certificate] +// So we only need to return the custom data: [Version(1), PreimageSize(8), PreimageData] +func (v *Validator) GenerateReadPreimageProof(certHash common.Hash, offset uint64, certificate []byte) containers.PromiseInterface[daprovider.PreimageProofResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.PreimageProofResult](context.Background()) + go func() { + proof, err := v.generateReadPreimageProofInternal(ctx, certHash, offset, certificate) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.PreimageProofResult{Proof: proof}) + } + }() + return promise +} + +// GenerateCertificateValidityProof creates a certificate validity proof for ReferenceDA +// The ReferenceDA implementation returns a two-byte proof with: +// - claimedValid (1 byte): 1 if valid, 0 if invalid +// - version (1 byte): 0x01 for version 1 +// +// This validates the certificate signature against trusted signers from the contract. +// Invalid certificates (wrong format, untrusted signer) return claimedValid=0. +// Only transient errors (like RPC failures) return an error. +func (v *Validator) generateCertificateValidityProofInternal(ctx context.Context, certificate []byte) ([]byte, error) { + // Try to deserialize certificate + cert, err := Deserialize(certificate) + if err != nil { + // Certificate is malformed (wrong length, etc.) + // We return invalid proof rather than error for validation failures + return []byte{0, 0x01}, nil //nolint:nilerr // Invalid certificate, version 1 + } + + // Check if signer is trusted using contract + signer, err := cert.RecoverSigner() + if err != nil { + // Invalid signature - can't recover signer + // We return invalid proof rather than error for validation failures + return []byte{0, 0x01}, nil //nolint:nilerr // Invalid certificate, version 1 + } + + // TODO: Remove/uncomment the following once we have merged customda contracts changes. + // For now we will always just say the cert is untrusted. + _ = signer + isTrusted := false + /* + // Create contract binding + validator, err := ospgen.NewReferenceDAProofValidator(v.validatorAddr, v.l1Client) + if err != nil { + // This is a transient error - can't connect to contract + return nil, fmt.Errorf("failed to create validator binding: %w", err) + } + + // Query contract to check if signer is trusted + isTrusted, err = validator.TrustedSigners(&bind.CallOpts{Context: ctx}, signer) + if err != nil { + // This is a transient error - RPC call failed + return nil, fmt.Errorf("failed to check trusted signer: %w", err) + } + */ + + if !isTrusted { + // Signer is not trusted + return []byte{0, 0x01}, nil // Invalid certificate, version 1 + } + + // Certificate is valid (signed by trusted signer) + return []byte{1, 0x01}, nil // Valid certificate, version 1 +} + +// GenerateCertificateValidityProof creates a certificate validity proof for ReferenceDA +// The ReferenceDA implementation returns a two-byte proof with: +// - claimedValid (1 byte): 1 if valid, 0 if invalid +// - version (1 byte): 0x01 for version 1 +// +// This validates the certificate signature against trusted signers from the contract. +// Invalid certificates (wrong format, untrusted signer) return claimedValid=0. +// Only transient errors (like RPC failures) return an error. +func (v *Validator) GenerateCertificateValidityProof(certificate []byte) containers.PromiseInterface[daprovider.ValidityProofResult] { + promise, ctx := containers.NewPromiseWithContext[daprovider.ValidityProofResult](context.Background()) + go func() { + proof, err := v.generateCertificateValidityProofInternal(ctx, certificate) + if err != nil { + promise.ProduceError(err) + } else { + promise.Produce(daprovider.ValidityProofResult{Proof: proof}) + } + }() + return promise +} diff --git a/daprovider/referenceda/reference_writer.go b/daprovider/referenceda/reference_writer.go new file mode 100644 index 0000000000..5730aaebb5 --- /dev/null +++ b/daprovider/referenceda/reference_writer.go @@ -0,0 +1,63 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/offchainlabs/nitro/util/signature" +) + +// Writer implements the daprovider.Writer interface for ReferenceDA +type Writer struct { + storage *InMemoryStorage + signer signature.DataSignerFunc +} + +// NewWriter creates a new ReferenceDA writer +func NewWriter(signer signature.DataSignerFunc) *Writer { + return &Writer{ + storage: GetInMemoryStorage(), + signer: signer, + } +} + +func (w *Writer) Store( + ctx context.Context, + message []byte, + timeout uint64, + disableFallbackStoreDataOnChain bool, +) ([]byte, error) { + if w.signer == nil { + return nil, fmt.Errorf("no signer configured") + } + + // Create and sign certificate + cert, err := NewCertificate(message, w.signer) + if err != nil { + return nil, err + } + + // Store the message in the singleton storage + err = w.storage.Store(ctx, message) + if err != nil { + return nil, err + } + + // Serialize certificate for on-chain storage + certificate := cert.Serialize() + hashKey := common.BytesToHash(cert.DataHash[:]) + + log.Debug("ReferenceDA batch stored with signature", + "sha256", hashKey.Hex(), + "certificateSize", len(certificate), + "batchSize", len(message), + ) + + return certificate, nil +} diff --git a/daprovider/referenceda/storage.go b/daprovider/referenceda/storage.go new file mode 100644 index 0000000000..8daa4ade43 --- /dev/null +++ b/daprovider/referenceda/storage.go @@ -0,0 +1,54 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package referenceda + +import ( + "context" + "crypto/sha256" + "sync" + + "github.com/ethereum/go-ethereum/common" +) + +// InMemoryStorage implements PreimageStorage interface for in-memory storage +type InMemoryStorage struct { + mu sync.RWMutex + preimages map[common.Hash][]byte +} + +var ( + // singleton instance of InMemoryStorage + storageInstance *InMemoryStorage + storageOnce sync.Once +) + +// GetInMemoryStorage returns the singleton instance of InMemoryStorage +func GetInMemoryStorage() *InMemoryStorage { + storageOnce.Do(func() { + storageInstance = &InMemoryStorage{ + preimages: make(map[common.Hash][]byte), + } + }) + return storageInstance +} + +func (s *InMemoryStorage) Store(ctx context.Context, data []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + hash := sha256.Sum256(data) + s.preimages[common.BytesToHash(hash[:])] = data + return nil +} + +func (s *InMemoryStorage) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + data, exists := s.preimages[hash] + if !exists { + return nil, nil + } + return data, nil +} diff --git a/daprovider/registry.go b/daprovider/registry.go new file mode 100644 index 0000000000..6af2335c65 --- /dev/null +++ b/daprovider/registry.go @@ -0,0 +1,77 @@ +// Copyright 2021-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package daprovider + +import ( + "fmt" +) + +// ReaderRegistry maintains a mapping of header bytes to their corresponding readers +type ReaderRegistry struct { + readers map[byte]Reader +} + +// NewReaderRegistry creates a new reader registry +func NewReaderRegistry() *ReaderRegistry { + return &ReaderRegistry{ + readers: make(map[byte]Reader), + } +} + +// Register associates a header byte with a reader +func (r *ReaderRegistry) Register(headerByte byte, reader Reader) error { + if reader == nil { + return fmt.Errorf("cannot register nil reader") + } + if existing, exists := r.readers[headerByte]; exists && existing != reader { + return fmt.Errorf("header byte 0x%02x already registered", headerByte) + } + r.readers[headerByte] = reader + return nil +} + +// RegisterAll associates multiple header bytes with a reader +func (r *ReaderRegistry) RegisterAll(headerBytes []byte, reader Reader) error { + for _, headerByte := range headerBytes { + if err := r.Register(headerByte, reader); err != nil { + return err + } + } + return nil +} + +// GetByHeaderByte returns the reader associated with the given header byte +func (r *ReaderRegistry) GetByHeaderByte(headerByte byte) (Reader, bool) { + reader, exists := r.readers[headerByte] + return reader, exists +} + +// SupportedHeaderBytes returns all registered header bytes +func (r *ReaderRegistry) SupportedHeaderBytes() []byte { + bytes := make([]byte, 0, len(r.readers)) + for b := range r.readers { + bytes = append(bytes, b) + } + return bytes +} + +// SetupDASReader registers a DAS reader for the DAS header bytes (with and without Tree flag) +func (r *ReaderRegistry) SetupDASReader(reader Reader) error { + // Register for DAS without tree flag (0x80) + if err := r.Register(DASMessageHeaderFlag, reader); err != nil { + return err + } + // Register for DAS with tree flag (0x88 = 0x80 | 0x08) + return r.Register(DASMessageHeaderFlag|TreeDASMessageHeaderFlag, reader) +} + +// SetupBlobReader registers a blob reader for the blob header byte +func (r *ReaderRegistry) SetupBlobReader(reader Reader) error { + return r.Register(BlobHashesHeaderFlag, reader) +} + +// SetupDACertificateReader registers a DA certificate reader for the certificate header byte +func (r *ReaderRegistry) SetupDACertificateReader(reader Reader) error { + return r.Register(DACertificateMessageHeaderFlag, reader) +} diff --git a/daprovider/server/das_migration.go b/daprovider/server/das_migration.go new file mode 100644 index 0000000000..f17a0f34d6 --- /dev/null +++ b/daprovider/server/das_migration.go @@ -0,0 +1,162 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +// Package dapserver contains temporary DAS migration code +// TODO: This file is temporary and will be removed once DA provider initialization +// is moved out of arbnode/node.go on the custom-da branch +package dapserver + +import ( + "context" + "net/http" + "time" + + "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/das" + "github.com/offchainlabs/nitro/daprovider/das/dasutil" + "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/signature" +) + +// DASServerConfig is the configuration for a DAS server +// TODO: This is temporary and duplicates dasserver.ServerConfig +// It will be removed when DAS initialization moves to the factory pattern +type DASServerConfig struct { + Addr string `koanf:"addr"` + Port uint64 `koanf:"port"` + JWTSecret string `koanf:"jwtsecret"` + EnableDAWriter bool `koanf:"enable-da-writer"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + ServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"server-timeouts"` + RPCServerBodyLimit int `koanf:"rpc-server-body-limit"` +} + +// DefaultDASServerConfig provides default values for DAS server configuration +// TODO: This is temporary and will be removed with the migration +var DefaultDASServerConfig = DASServerConfig{ + Addr: "localhost", + Port: 9880, + JWTSecret: "", + EnableDAWriter: false, + ServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, + RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, + DataAvailability: das.DefaultDataAvailabilityConfig, +} + +// ServerConfigAddDASOptions adds DAS-specific command-line options +// TODO: This is temporary and will be removed when DAS config moves elsewhere +func ServerConfigAddDASOptions(prefix string, f *pflag.FlagSet) { + f.String(prefix+".addr", DefaultDASServerConfig.Addr, "JSON rpc server listening interface") + f.Uint64(prefix+".port", DefaultDASServerConfig.Port, "JSON rpc server listening port") + f.String(prefix+".jwtsecret", DefaultDASServerConfig.JWTSecret, "path to file with jwtsecret for validation") + f.Bool(prefix+".enable-da-writer", DefaultDASServerConfig.EnableDAWriter, "implies if the das server supports daprovider's writer interface") + f.Int(prefix+".rpc-server-body-limit", DefaultDASServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") + das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) + genericconf.HTTPServerTimeoutConfigAddOptions(prefix+".server-timeouts", f) +} + +// NewServerForDAS creates a new DA provider server configured for DAS/AnyTrust +// TODO: This is temporary. On the custom-da branch, this initialization logic +// moves to the factory pattern and this function will be removed. +// +// Returns: +// - *http.Server: The HTTP server instance +// - func(): Cleanup function to stop the DAS lifecycle manager +// - error: Any error that occurred during initialization +func NewServerForDAS( + ctx context.Context, + config *DASServerConfig, + dataSigner signature.DataSignerFunc, + l1Client *ethclient.Client, + l1Reader *headerreader.HeaderReader, + sequencerInboxAddr common.Address, +) (*http.Server, func(), error) { + // Initialize DAS components + var err error + var daWriter dasutil.DASWriter + var daReader dasutil.DASReader + var dasKeysetFetcher *das.KeysetFetcher + var dasLifecycleManager *das.LifecycleManager + + if config.EnableDAWriter { + // Create both reader and writer for sequencer nodes + daWriter, daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReaderAndWriter( + ctx, &config.DataAvailability, dataSigner, l1Client, sequencerInboxAddr, + ) + if err != nil { + return nil, nil, err + } + } else { + // Create only reader for non-sequencer nodes + daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReader( + ctx, &config.DataAvailability, l1Reader, &sequencerInboxAddr, + ) + if err != nil { + return nil, nil, err + } + } + + // Apply DAS-specific wrappers + daReader = das.NewReaderTimeoutWrapper(daReader, config.DataAvailability.RequestTimeout) + if config.DataAvailability.PanicOnError { + if daWriter != nil { + daWriter = das.NewWriterPanicWrapper(daWriter) + } + daReader = das.NewReaderPanicWrapper(daReader) + } + + // Convert to daprovider interfaces + var writer daprovider.Writer + if daWriter != nil { + writer = dasutil.NewWriterForDAS(daWriter) + } + reader := dasutil.NewReaderForDAS(daReader, dasKeysetFetcher, daprovider.KeysetValidate) + + // Translate DAS config to generic server config + serverConfig := ServerConfig{ + Addr: config.Addr, + Port: config.Port, + JWTSecret: config.JWTSecret, + EnableDAWriter: config.EnableDAWriter, + ServerTimeouts: config.ServerTimeouts, + RPCServerBodyLimit: config.RPCServerBodyLimit, + } + + // Create the generic DA provider server with DAS components + // Support both DAS without tree flag (0x80) and with tree flag (0x88) + server, err := NewServerWithDAPProvider( + ctx, + &serverConfig, + reader, + writer, + nil, // DAS doesn't use a validator + []byte{ + daprovider.DASMessageHeaderFlag, + daprovider.DASMessageHeaderFlag | daprovider.TreeDASMessageHeaderFlag, + }, + ) + if err != nil { + // Clean up lifecycle manager if server creation fails + if dasLifecycleManager != nil { + dasLifecycleManager.StopAndWaitUntil(2 * time.Second) + } + return nil, nil, err + } + + // Return server and cleanup function for the lifecycle manager + cleanupFn := func() { + if dasLifecycleManager != nil { + log.Info("Stopping DAS lifecycle manager") + dasLifecycleManager.StopAndWaitUntil(2 * time.Second) + } + } + + return server, cleanupFn, nil +} diff --git a/daprovider/das/dasserver/dasserver.go b/daprovider/server/provider_server.go similarity index 56% rename from daprovider/das/dasserver/dasserver.go rename to daprovider/server/provider_server.go index 452736cbec..6b67b80220 100644 --- a/daprovider/das/dasserver/dasserver.go +++ b/daprovider/server/provider_server.go @@ -1,4 +1,7 @@ -package dasserver +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package dapserver import ( "context" @@ -11,27 +14,24 @@ import ( "strings" "time" - "github.com/spf13/pflag" + flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/daprovider" - "github.com/offchainlabs/nitro/daprovider/daclient" - "github.com/offchainlabs/nitro/daprovider/das" - "github.com/offchainlabs/nitro/daprovider/das/dasutil" - "github.com/offchainlabs/nitro/util/headerreader" - "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/daprovider/server_api" ) type Server struct { - reader daprovider.Reader - writer daprovider.Writer + reader daprovider.Reader + writer daprovider.Writer + validator daprovider.Validator + headerBytes []byte // Supported header bytes for this provider } type ServerConfig struct { @@ -39,7 +39,6 @@ type ServerConfig struct { Port uint64 `koanf:"port"` JWTSecret string `koanf:"jwtsecret"` EnableDAWriter bool `koanf:"enable-da-writer"` - DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` ServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"server-timeouts"` RPCServerBodyLimit int `koanf:"rpc-server-body-limit"` } @@ -49,18 +48,16 @@ var DefaultServerConfig = ServerConfig{ Port: 9880, JWTSecret: "", EnableDAWriter: false, - DataAvailability: das.DefaultDataAvailabilityConfig, ServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, } -func ServerConfigAddOptions(prefix string, f *pflag.FlagSet) { +func ServerConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".addr", DefaultServerConfig.Addr, "JSON rpc server listening interface") f.Uint64(prefix+".port", DefaultServerConfig.Port, "JSON rpc server listening port") f.String(prefix+".jwtsecret", DefaultServerConfig.JWTSecret, "path to file with jwtsecret for validation") f.Bool(prefix+".enable-da-writer", DefaultServerConfig.EnableDAWriter, "implies if the das server supports daprovider's writer interface") f.Int(prefix+".rpc-server-body-limit", DefaultServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") - das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) genericconf.HTTPServerTimeoutConfigAddOptions(prefix+".server-timeouts", f) } @@ -77,63 +74,38 @@ func fetchJWTSecret(fileName string) ([]byte, error) { return nil, errors.New("JWT secret file not found") } -func NewServer(ctx context.Context, config *ServerConfig, dataSigner signature.DataSignerFunc, l1Client *ethclient.Client, l1Reader *headerreader.HeaderReader, sequencerInboxAddr common.Address) (*http.Server, func(), error) { - var err error - var daWriter dasutil.DASWriter - var daReader dasutil.DASReader - var dasKeysetFetcher *das.KeysetFetcher - var dasLifecycleManager *das.LifecycleManager - if config.EnableDAWriter { - daWriter, daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReaderAndWriter(ctx, &config.DataAvailability, dataSigner, l1Client, sequencerInboxAddr) - if err != nil { - return nil, nil, err - } - } else { - daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReader(ctx, &config.DataAvailability, l1Reader, &sequencerInboxAddr) - if err != nil { - return nil, nil, err - } - } - - daReader = das.NewReaderTimeoutWrapper(daReader, config.DataAvailability.RequestTimeout) - if config.DataAvailability.PanicOnError { - if daWriter != nil { - daWriter = das.NewWriterPanicWrapper(daWriter) - } - daReader = das.NewReaderPanicWrapper(daReader) - } - +// NewServerWithDAPProvider creates a new server with pre-created reader/writer/validator components +func NewServerWithDAPProvider(ctx context.Context, config *ServerConfig, reader daprovider.Reader, writer daprovider.Writer, validator daprovider.Validator, headerBytes []byte) (*http.Server, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", config.Addr, config.Port)) if err != nil { - return nil, nil, err + return nil, err } rpcServer := rpc.NewServer() if config.RPCServerBodyLimit > 0 { rpcServer.SetHTTPBodyLimit(config.RPCServerBodyLimit) } - var writer daprovider.Writer - if daWriter != nil { - writer = dasutil.NewWriterForDAS(daWriter) - } + server := &Server{ - reader: dasutil.NewReaderForDAS(daReader, dasKeysetFetcher), - writer: writer, + reader: reader, + writer: writer, + validator: validator, + headerBytes: headerBytes, } if err = rpcServer.RegisterName("daprovider", server); err != nil { - return nil, nil, err + return nil, err } addr, ok := listener.Addr().(*net.TCPAddr) if !ok { - return nil, nil, errors.New("failed getting dasserver address from listener") + return nil, errors.New("failed getting provider server address from listener") } var handler http.Handler if config.JWTSecret != "" { jwt, err := fetchJWTSecret(config.JWTSecret) if err != nil { - return nil, nil, fmt.Errorf("failed creating new dasserver: %w", err) + return nil, fmt.Errorf("failed creating new provider server: %w", err) } handler = node.NewHTTPHandlerStack(rpcServer, nil, nil, jwt) } else { @@ -151,7 +123,7 @@ func NewServer(ctx context.Context, config *ServerConfig, dataSigner signature.D go func() { if err := srv.Serve(listener); err != nil && !errors.Is(err, http.ErrServerClosed) { - log.Error("das-server's Serve method returned a non http.ErrServerClosed error", "err", err) + log.Error("provider server's Serve method returned a non http.ErrServerClosed error", "err", err) } }() @@ -162,33 +134,41 @@ func NewServer(ctx context.Context, config *ServerConfig, dataSigner signature.D _ = srv.Shutdown(shutdownCtx) }() - return srv, func() { - if dasLifecycleManager != nil { - dasLifecycleManager.StopAndWaitUntil(2 * time.Second) - } + return srv, nil +} + +func (s *Server) GetSupportedHeaderBytes(ctx context.Context) (*server_api.SupportedHeaderBytesResult, error) { + return &server_api.SupportedHeaderBytesResult{ + HeaderBytes: s.headerBytes, }, nil } -func (s *Server) IsValidHeaderByte(ctx context.Context, headerByte byte) (*daclient.IsValidHeaderByteResult, error) { - return &daclient.IsValidHeaderByteResult{IsValid: s.reader.IsValidHeaderByte(ctx, headerByte)}, nil +func (s *Server) RecoverPayload( + ctx context.Context, + batchNum hexutil.Uint64, + batchBlockHash common.Hash, + sequencerMsg hexutil.Bytes, +) (*daprovider.PayloadResult, error) { + promise := s.reader.RecoverPayload(uint64(batchNum), batchBlockHash, sequencerMsg) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + return &result, nil } -func (s *Server) RecoverPayloadFromBatch( +func (s *Server) CollectPreimages( ctx context.Context, batchNum hexutil.Uint64, batchBlockHash common.Hash, sequencerMsg hexutil.Bytes, - preimages daprovider.PreimagesMap, - validateSeqMsg bool, -) (*daclient.RecoverPayloadFromBatchResult, error) { - payload, preimages, err := s.reader.RecoverPayloadFromBatch(ctx, uint64(batchNum), batchBlockHash, sequencerMsg, preimages, validateSeqMsg) +) (*daprovider.PreimagesResult, error) { + promise := s.reader.CollectPreimages(uint64(batchNum), batchBlockHash, sequencerMsg) + result, err := promise.Await(ctx) if err != nil { return nil, err } - return &daclient.RecoverPayloadFromBatchResult{ - Payload: payload, - Preimages: preimages, - }, nil + return &result, nil } func (s *Server) Store( @@ -196,10 +176,36 @@ func (s *Server) Store( message hexutil.Bytes, timeout hexutil.Uint64, disableFallbackStoreDataOnChain bool, -) (*daclient.StoreResult, error) { +) (*server_api.StoreResult, error) { serializedDACert, err := s.writer.Store(ctx, message, uint64(timeout), disableFallbackStoreDataOnChain) if err != nil { return nil, err } - return &daclient.StoreResult{SerializedDACert: serializedDACert}, nil + return &server_api.StoreResult{SerializedDACert: serializedDACert}, nil +} + +func (s *Server) GenerateReadPreimageProof(ctx context.Context, certHash common.Hash, offset hexutil.Uint64, certificate hexutil.Bytes) (*server_api.GenerateReadPreimageProofResult, error) { + if s.validator == nil { + return nil, errors.New("validator not available") + } + // #nosec G115 + promise := s.validator.GenerateReadPreimageProof(certHash, uint64(offset), certificate) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + return &server_api.GenerateReadPreimageProofResult{Proof: hexutil.Bytes(result.Proof)}, nil +} + +func (s *Server) GenerateCertificateValidityProof(ctx context.Context, certificate hexutil.Bytes) (*server_api.GenerateCertificateValidityProofResult, error) { + if s.validator == nil { + return nil, errors.New("validator not available") + } + // #nosec G115 + promise := s.validator.GenerateCertificateValidityProof(certificate) + result, err := promise.Await(ctx) + if err != nil { + return nil, err + } + return &server_api.GenerateCertificateValidityProofResult{Proof: hexutil.Bytes(result.Proof)}, nil } diff --git a/daprovider/server_api/types.go b/daprovider/server_api/types.go new file mode 100644 index 0000000000..9a26c6d683 --- /dev/null +++ b/daprovider/server_api/types.go @@ -0,0 +1,29 @@ +// Copyright 2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package server_api + +import ( + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// SupportedHeaderBytesResult is the result struct that data availability providers should use to respond with their supported header bytes +type SupportedHeaderBytesResult struct { + HeaderBytes hexutil.Bytes `json:"headerBytes,omitempty"` +} + +// StoreResult is the result struct that data availability providers should use to respond with a commitment to a Store request for posting batch data to their DA service +type StoreResult struct { + SerializedDACert hexutil.Bytes `json:"serialized-da-cert,omitempty"` +} + +// GenerateReadPreimageProofResult is the result struct that data availability providers +// should use to respond with a proof for a specific preimage +type GenerateReadPreimageProofResult struct { + Proof hexutil.Bytes `json:"proof,omitempty"` +} + +// GenerateCertificateValidityProofResult is the result struct that data availability providers should use to respond with validity proof +type GenerateCertificateValidityProofResult struct { + Proof hexutil.Bytes `json:"proof,omitempty"` +} diff --git a/daprovider/util.go b/daprovider/util.go index e0454cc6b0..44aadec5a3 100644 --- a/daprovider/util.go +++ b/daprovider/util.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package daprovider @@ -75,6 +75,10 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5 // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 +// DACertificateMessageHeaderFlag indicates that this message uses a custom data availability system. +// Anytrust uses the legacy TreeDASMessageHeaderFlag instead despite also having a certificate. +const DACertificateMessageHeaderFlag byte = 0x01 + // KnownHeaderBits is all header bits with known meaning to this nitro version const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte @@ -105,6 +109,10 @@ func IsBlobHashesHeaderByte(header byte) bool { return hasBits(header, BlobHashesHeaderFlag) } +func IsDACertificateMessageHeaderByte(header byte) bool { + return header == DACertificateMessageHeaderFlag +} + func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } diff --git a/daprovider/validator.go b/daprovider/validator.go new file mode 100644 index 0000000000..0b6c15272b --- /dev/null +++ b/daprovider/validator.go @@ -0,0 +1,34 @@ +// Copyright 2024-2025, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package daprovider + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/offchainlabs/nitro/util/containers" +) + +// PreimageProofResult contains the generated preimage proof +type PreimageProofResult struct { + Proof []byte +} + +// ValidityProofResult contains the generated validity proof +type ValidityProofResult struct { + Proof []byte +} + +// Validator defines the interface for custom data availability systems. +// This interface is used to generate proofs for DACertificate certificates and preimages. +type Validator interface { + // GenerateReadPreimageProof generates a proof for a specific preimage at a given offset. + // The proof format depends on the implementation and must be compatible with the Solidity + // IDACertificateValidator contract. + // certHash is the keccak256 hash of the certificate. + GenerateReadPreimageProof(certHash common.Hash, offset uint64, certificate []byte) containers.PromiseInterface[PreimageProofResult] + + // GenerateCertificateValidityProof returns a proof of whether the certificate + // is valid according to the DA system's rules. + GenerateCertificateValidityProof(certificate []byte) containers.PromiseInterface[ValidityProofResult] +} diff --git a/daprovider/writer.go b/daprovider/writer.go index f49351e9b5..0008f19979 100644 --- a/daprovider/writer.go +++ b/daprovider/writer.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package daprovider diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 083971af5c..aff94cad45 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -44,7 +44,7 @@ type StatelessBlockValidator struct { inboxTracker InboxTrackerInterface streamer TransactionStreamerInterface db ethdb.Database - dapReaders []daprovider.Reader + dapReaders *daprovider.ReaderRegistry stack *node.Node latestWasmModuleRoot common.Hash } @@ -237,7 +237,7 @@ func NewStatelessBlockValidator( streamer TransactionStreamerInterface, recorder execution.ExecutionRecorder, arbdb ethdb.Database, - dapReaders []daprovider.Reader, + dapReaders *daprovider.ReaderRegistry, config func() *BlockValidatorConfig, stack *node.Node, latestWasmModuleRoot common.Hash, @@ -324,31 +324,30 @@ func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum ui return false, nil, err } preimages := make(daprovider.PreimagesMap) - if len(postedData) > 40 { - foundDA := false - for _, dapReader := range v.dapReaders { - if dapReader != nil && dapReader.IsValidHeaderByte(ctx, postedData[40]) { - var err error - var preimagesRecorded daprovider.PreimagesMap - _, preimagesRecorded, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, postedData, preimages, true) - if err != nil { - // Matches the way keyset validation was done inside DAS readers i.e logging the error - // But other daproviders might just want to return the error - if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(postedData[40]) { - log.Error(err.Error()) - } else { - return false, nil, err - } + if len(postedData) > 40 && v.dapReaders != nil { + headerByte := postedData[40] + if dapReader, found := v.dapReaders.GetByHeaderByte(headerByte); found { + promise := dapReader.CollectPreimages(batchNum, batchBlockHash, postedData) + result, err := promise.Await(ctx) + if err != nil { + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if strings.Contains(err.Error(), daprovider.ErrSeqMsgValidation.Error()) && daprovider.IsDASMessageHeaderByte(headerByte) { + log.Error(err.Error()) } else { - preimages = preimagesRecorded + return false, nil, err } - foundDA = true - break + } else { + preimages = result.Preimages } - } - if !foundDA { - if daprovider.IsDASMessageHeaderByte(postedData[40]) { - log.Error("No DAS Reader configured, but sequencer message found with DAS header") + } else { + // No reader found for this header byte - check if it's a known type + if daprovider.IsDASMessageHeaderByte(headerByte) { + log.Error("No DAS Reader configured for DAS message", "headerByte", fmt.Sprintf("0x%02x", headerByte)) + } else if daprovider.IsBlobHashesHeaderByte(headerByte) { + log.Error("No Blob Reader configured for blob message", "headerByte", fmt.Sprintf("0x%02x", headerByte)) + } else if daprovider.IsDACertificateMessageHeaderByte(headerByte) { + log.Error("No DACertificate Reader configured for certificate message", "headerByte", fmt.Sprintf("0x%02x", headerByte)) } } } diff --git a/util/containers/promise.go b/util/containers/promise.go index a66a82f960..6bc0644478 100644 --- a/util/containers/promise.go +++ b/util/containers/promise.go @@ -107,6 +107,12 @@ func NewPromise[R any](cancel func()) Promise[R] { } } +func NewPromiseWithContext[R any](parentCtx context.Context) (*Promise[R], context.Context) { + ctx, cancel := context.WithCancel(parentCtx) + promise := NewPromise[R](cancel) + return &promise, ctx +} + func NewReadyPromise[R any](val R, err error) PromiseInterface[R] { promise := NewPromise[R](nil) if err == nil {