diff --git a/.github/build-platforms.yml b/.github/build-platforms.yml
new file mode 100644
index 00000000000..456489e6031
--- /dev/null
+++ b/.github/build-platforms.yml
@@ -0,0 +1,17 @@
+# Build platforms configuration for Kubo
+# Matches https://github.com/ipfs/distributions/blob/master/dists/kubo/build_matrix
+# plus linux-riscv64 for emerging architecture support
+#
+# The Go compiler handles FUSE support automatically via build tags.
+# Platforms are simply listed - no need to specify FUSE capability.
+
+platforms:
+ - darwin-amd64
+ - darwin-arm64
+ - freebsd-amd64
+ - linux-amd64
+ - linux-arm64
+ - linux-riscv64
+ - openbsd-amd64
+ - windows-amd64
+ - windows-arm64
\ No newline at end of file
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 904b7815ab1..f6fd1cfbe2d 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -32,9 +32,9 @@ jobs:
uses: actions/checkout@v5
- name: Setup Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
- go-version: 1.25.x
+ go-version-file: 'go.mod'
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
diff --git a/.github/workflows/docker-check.yml b/.github/workflows/docker-check.yml
index e11f9830da7..1af049f9ce1 100644
--- a/.github/workflows/docker-check.yml
+++ b/.github/workflows/docker-check.yml
@@ -23,7 +23,7 @@ jobs:
timeout-minutes: 5
steps:
- uses: actions/checkout@v5
- - uses: hadolint/hadolint-action@v3.1.0
+ - uses: hadolint/hadolint-action@v3.3.0
with:
dockerfile: Dockerfile
failure-threshold: warning
diff --git a/.github/workflows/gateway-conformance.yml b/.github/workflows/gateway-conformance.yml
index 18d1959e6d8..3518afad779 100644
--- a/.github/workflows/gateway-conformance.yml
+++ b/.github/workflows/gateway-conformance.yml
@@ -46,17 +46,16 @@ jobs:
output: fixtures
# 2. Build the kubo-gateway
- - name: Setup Go
- uses: actions/setup-go@v5
- with:
- go-version: 1.25.x
- - uses: protocol/cache-go-action@v1
- with:
- name: ${{ github.job }}
- name: Checkout kubo-gateway
uses: actions/checkout@v5
with:
path: kubo-gateway
+ - name: Setup Go
+ uses: actions/setup-go@v6
+ with:
+ go-version-file: 'kubo-gateway/go.mod'
+ cache: true
+ cache-dependency-path: kubo-gateway/go.sum
- name: Build kubo-gateway
run: make build
working-directory: kubo-gateway
@@ -133,17 +132,16 @@ jobs:
output: fixtures
# 2. Build the kubo-gateway
- - name: Setup Go
- uses: actions/setup-go@v5
- with:
- go-version: 1.25.x
- - uses: protocol/cache-go-action@v1
- with:
- name: ${{ github.job }}
- name: Checkout kubo-gateway
uses: actions/checkout@v5
with:
path: kubo-gateway
+ - name: Setup Go
+ uses: actions/setup-go@v6
+ with:
+ go-version-file: 'kubo-gateway/go.mod'
+ cache: true
+ cache-dependency-path: kubo-gateway/go.sum
- name: Build kubo-gateway
run: make build
working-directory: kubo-gateway
diff --git a/.github/workflows/gobuild.yml b/.github/workflows/gobuild.yml
index 48665074fef..d3acffdd2c4 100644
--- a/.github/workflows/gobuild.yml
+++ b/.github/workflows/gobuild.yml
@@ -21,20 +21,38 @@ jobs:
env:
TEST_DOCKER: 0
TEST_VERBOSE: 1
- TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- - uses: actions/setup-go@v5
- with:
- go-version: 1.25.x
- uses: actions/checkout@v5
- - run: make cmd/ipfs-try-build
- env:
- TEST_FUSE: 1
- - run: make cmd/ipfs-try-build
- env:
- TEST_FUSE: 0
+ - uses: actions/setup-go@v6
+ with:
+ go-version-file: 'go.mod'
+ cache: true
+ cache-dependency-path: go.sum
+
+ - name: Build all platforms
+ run: |
+ # Read platforms from build-platforms.yml and build each one
+ echo "Building kubo for all platforms..."
+
+ # Read and build each platform
+ grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' | while read -r platform; do
+ if [ -z "$platform" ]; then
+ continue
+ fi
+
+ echo "::group::Building $platform"
+ GOOS=$(echo "$platform" | cut -d- -f1)
+ GOARCH=$(echo "$platform" | cut -d- -f2)
+
+ echo "Building $platform"
+ echo " GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs"
+ GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs
+ echo "::endgroup::"
+ done
+
+ echo "All platforms built successfully"
\ No newline at end of file
diff --git a/.github/workflows/golang-analysis.yml b/.github/workflows/golang-analysis.yml
index bb1a49570b0..676f23a4d18 100644
--- a/.github/workflows/golang-analysis.yml
+++ b/.github/workflows/golang-analysis.yml
@@ -25,9 +25,9 @@ jobs:
- uses: actions/checkout@v5
with:
submodules: recursive
- - uses: actions/setup-go@v5
+ - uses: actions/setup-go@v6
with:
- go-version: "1.25.x"
+ go-version-file: 'go.mod'
- name: Check that go.mod is tidy
uses: protocol/multiple-go-modules@v1.4
with:
diff --git a/.github/workflows/golint.yml b/.github/workflows/golint.yml
index 898e3e9363a..0bee5a5cb0f 100644
--- a/.github/workflows/golint.yml
+++ b/.github/workflows/golint.yml
@@ -22,15 +22,14 @@ jobs:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
- TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- - uses: actions/setup-go@v5
- with:
- go-version: 1.25.x
- uses: actions/checkout@v5
+ - uses: actions/setup-go@v6
+ with:
+ go-version-file: 'go.mod'
- run: make -O test_go_lint
diff --git a/.github/workflows/gotest.yml b/.github/workflows/gotest.yml
index 34d86352b24..f08fcaac430 100644
--- a/.github/workflows/gotest.yml
+++ b/.github/workflows/gotest.yml
@@ -22,19 +22,18 @@ jobs:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
- TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- - name: Set up Go
- uses: actions/setup-go@v5
- with:
- go-version: 1.25.x
- name: Check out Kubo
uses: actions/checkout@v5
+ - name: Set up Go
+ uses: actions/setup-go@v6
+ with:
+ go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
@@ -45,7 +44,7 @@ jobs:
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- name: Upload coverage to Codecov
- uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
+ uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
if: failure() || success()
with:
name: unittests
diff --git a/.github/workflows/interop.yml b/.github/workflows/interop.yml
index 2f7c7a78c78..d0f3b9a79c5 100644
--- a/.github/workflows/interop.yml
+++ b/.github/workflows/interop.yml
@@ -9,9 +9,6 @@ on:
branches:
- 'master'
-env:
- GO_VERSION: 1.25.x
-
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
@@ -29,17 +26,16 @@ jobs:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
- TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- - uses: actions/setup-go@v5
- with:
- go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v5
+ - uses: actions/setup-go@v6
+ with:
+ go-version-file: 'go.mod'
- run: make build
- uses: actions/upload-artifact@v4
with:
@@ -53,7 +49,7 @@ jobs:
run:
shell: bash
steps:
- - uses: actions/setup-node@v4
+ - uses: actions/setup-node@v5
with:
node-version: lts/*
- uses: actions/download-artifact@v5
@@ -82,14 +78,13 @@ jobs:
LIBP2P_TCP_REUSEPORT: false
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
E2E_IPFSD_TYPE: go
- TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- - uses: actions/setup-node@v4
+ - uses: actions/setup-node@v5
with:
node-version: 20.x
- uses: actions/download-artifact@v5
diff --git a/.github/workflows/sharness.yml b/.github/workflows/sharness.yml
index 9295bc1c134..8c0c39130c0 100644
--- a/.github/workflows/sharness.yml
+++ b/.github/workflows/sharness.yml
@@ -4,10 +4,10 @@ on:
workflow_dispatch:
pull_request:
paths-ignore:
- - '**/*.md'
+ - "**/*.md"
push:
branches:
- - 'master'
+ - "master"
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
@@ -22,14 +22,14 @@ jobs:
run:
shell: bash
steps:
- - name: Setup Go
- uses: actions/setup-go@v5
- with:
- go-version: 1.25.x
- name: Checkout Kubo
uses: actions/checkout@v5
with:
path: kubo
+ - name: Setup Go
+ uses: actions/setup-go@v6
+ with:
+ go-version-file: 'kubo/go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils
- uses: actions/cache@v4
@@ -55,7 +55,7 @@ jobs:
# increasing parallelism beyond 10 doesn't speed up the tests much
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
- name: Upload coverage report
- uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
+ uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
if: failure() || success()
with:
name: sharness
diff --git a/.github/workflows/sync-release-assets.yml b/.github/workflows/sync-release-assets.yml
index 0d5c8199b65..c8ba7338c73 100644
--- a/.github/workflows/sync-release-assets.yml
+++ b/.github/workflows/sync-release-assets.yml
@@ -22,11 +22,11 @@ jobs:
- uses: ipfs/start-ipfs-daemon-action@v1
with:
args: --init --init-profile=flatfs,server --enable-gc=false
- - uses: actions/setup-node@v4
+ - uses: actions/setup-node@v5
with:
node-version: 14
- name: Sync the latest 5 github releases
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
const fs = require('fs').promises
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0dec582e37f..eefffc3e262 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,6 @@
# Kubo Changelogs
+- [v0.38](docs/changelogs/v0.38.md)
- [v0.37](docs/changelogs/v0.37.md)
- [v0.36](docs/changelogs/v0.36.md)
- [v0.35](docs/changelogs/v0.35.md)
diff --git a/README.md b/README.md
index ed1ffe90467..b7acab5b5dd 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-
+
Kubo: IPFS Implementation in GO
diff --git a/Rules.mk b/Rules.mk
index ef88bee0f74..d8f16ada8d9 100644
--- a/Rules.mk
+++ b/Rules.mk
@@ -107,8 +107,8 @@ uninstall:
.PHONY: uninstall
supported:
- @echo "Currently supported platforms:"
- @for p in ${SUPPORTED_PLATFORMS}; do echo $$p; done
+ @echo "Currently supported platforms (from .github/build-platforms.yml):"
+ @grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' || (echo "Error: .github/build-platforms.yml not found"; exit 1)
.PHONY: supported
help:
@@ -138,7 +138,8 @@ help:
@echo ' test_short - Run short go tests and short sharness tests'
@echo ' test_go_short - Run short go tests'
@echo ' test_go_test - Run all go tests'
- @echo ' test_go_expensive - Run all go tests and compile on all platforms'
+ @echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml'
+ @echo ' test_go_expensive - Run all go tests and build all platforms'
@echo ' test_go_race - Run go tests with the race detector enabled'
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
@echo ' test_sharness - Run sharness tests'
diff --git a/appveyor.yml b/appveyor.yml
deleted file mode 100644
index 5f2907d0079..00000000000
--- a/appveyor.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Notes:
-# - Minimal appveyor.yml file is an empty file. All sections are optional.
-# - Indent each level of configuration with 2 spaces. Do not use tabs!
-# - All section names are case-sensitive.
-# - Section names should be unique on each level.
-
-version: "{build}"
-
-os: Windows Server 2012 R2
-
-clone_folder: c:\gopath\src\github.com\ipfs\go-ipfs
-
-environment:
- GOPATH: c:\gopath
- TEST_VERBOSE: 1
- #TEST_NO_FUSE: 1
- #TEST_SUITE: test_sharness
- #GOFLAGS: -tags nofuse
- global:
- BASH: C:\cygwin\bin\bash
- matrix:
- - GOARCH: amd64
- GOVERSION: 1.5.1
- GOROOT: c:\go
- DOWNLOADPLATFORM: "x64"
-
-install:
- # Enable make
- #- SET PATH=c:\MinGW\bin;%PATH%
- #- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
- - go version
- - go env
-
-# Cygwin build script
-#
-# NOTES:
-#
-# The stdin/stdout file descriptor appears not to be valid for the Appveyor
-# build which causes failures as certain functions attempt to redirect
-# default file handles. Ensure a dummy file descriptor is opened with 'exec'.
-#
-build_script:
- - '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0 Provide.Strategy, Reprovider.Interval -> Provide.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
}
- // Check for deprecated "flat" strategy
- if cfg.Reprovider.Strategy.WithDefault("") == "flat" {
- log.Error("Reprovider.Strategy='flat' is deprecated and will be removed in the next release. Please update your config to use 'all' instead.")
+ // Check for deprecated "flat" strategy (should have been migrated to "all")
+ if cfg.Provide.Strategy.WithDefault("") == "flat" {
+ log.Fatal("Provide.Strategy='flat' is no longer supported. Use 'all' instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy")
}
if cfg.Experimental.StrategicProviding {
- log.Error("Experimental.StrategicProviding was removed. Remove it from your config and set Provider.Enabled=false to remove this message. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
- cfg.Experimental.StrategicProviding = false
- cfg.Provider.Enabled = config.False
+ log.Fatal("Experimental.StrategicProviding was removed. Remove it from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
+ }
+ // Check for invalid MaxWorkers=0 with SweepEnabled
+ if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) &&
+ cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers) == 0 {
+ log.Fatal("Invalid configuration: Provide.DHT.MaxWorkers cannot be 0 when Provide.DHT.SweepEnabled=true. Set Provide.DHT.MaxWorkers to a positive value (e.g., 16) to control resource usage. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxworkers")
}
if routingOption == routingOptionDelegatedKwd {
// Delegated routing is read-only mode - content providing must be disabled
- if cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
- log.Fatal("Routing.Type=delegated does not support content providing. Set Provider.Enabled=false in your config.")
- }
- if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0 {
- log.Fatal("Routing.Type=delegated does not support content providing. Set Reprovider.Interval='0' in your config.")
+ if cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
+ log.Fatal("Routing.Type=delegated does not support content providing. Set Provide.Enabled=false in your config.")
}
}
@@ -649,7 +685,7 @@ take effect.
if !offline {
// Warn users when provide systems are disabled
- if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
+ if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
fmt.Print(`
⚠️ Provide and Reprovide systems are disabled due to 'Provide.Enabled=false'
@@ -657,12 +693,12 @@ take effect.
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on' or set Provide.Enabled=true'
`)
- } else if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
+ } else if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
fmt.Print(`
-⚠️ Provide and Reprovide systems are disabled due to 'Reprovider.Interval=0'
-⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
-⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h'
+⚠️ Providing to the DHT is disabled due to 'Provide.DHT.Interval=0'
+⚠️ Local CIDs will not be provided to Amino DHT, making them impossible to retrieve without manual peering
+⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Provide.DHT.Interval=22h'
`)
}
diff --git a/cmd/ipfs/kubo/daemon_linux.go b/cmd/ipfs/kubo/daemon_linux.go
index b612738a275..2335dd2b93d 100644
--- a/cmd/ipfs/kubo/daemon_linux.go
+++ b/cmd/ipfs/kubo/daemon_linux.go
@@ -1,5 +1,4 @@
//go:build linux
-// +build linux
package kubo
diff --git a/cmd/ipfs/kubo/daemon_other.go b/cmd/ipfs/kubo/daemon_other.go
index c5b24053d94..6fbc302591b 100644
--- a/cmd/ipfs/kubo/daemon_other.go
+++ b/cmd/ipfs/kubo/daemon_other.go
@@ -1,5 +1,4 @@
//go:build !linux
-// +build !linux
package kubo
diff --git a/cmd/ipfs/runmain_test.go b/cmd/ipfs/runmain_test.go
index a37ec194c74..56a647f8a81 100644
--- a/cmd/ipfs/runmain_test.go
+++ b/cmd/ipfs/runmain_test.go
@@ -1,5 +1,4 @@
//go:build testrunmain
-// +build testrunmain
package main_test
diff --git a/cmd/ipfs/util/signal.go b/cmd/ipfs/util/signal.go
index 2cfd0d5bd2d..51c9d5acb80 100644
--- a/cmd/ipfs/util/signal.go
+++ b/cmd/ipfs/util/signal.go
@@ -1,5 +1,4 @@
//go:build !wasm
-// +build !wasm
package util
@@ -64,13 +63,7 @@ func SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) {
switch count {
case 1:
fmt.Println() // Prevent un-terminated ^C character in terminal
-
- ih.wg.Add(1)
- go func() {
- defer ih.wg.Done()
- cancelFunc()
- }()
-
+ cancelFunc()
default:
fmt.Println("Received another interrupt before graceful shutdown, terminating...")
os.Exit(-1)
diff --git a/cmd/ipfs/util/ui.go b/cmd/ipfs/util/ui.go
index cf8ad506744..f39f1e17104 100644
--- a/cmd/ipfs/util/ui.go
+++ b/cmd/ipfs/util/ui.go
@@ -1,5 +1,4 @@
//go:build !windows
-// +build !windows
package util
diff --git a/cmd/ipfs/util/ulimit_freebsd.go b/cmd/ipfs/util/ulimit_freebsd.go
index 27b31349b4b..358bccfe3bf 100644
--- a/cmd/ipfs/util/ulimit_freebsd.go
+++ b/cmd/ipfs/util/ulimit_freebsd.go
@@ -1,5 +1,4 @@
//go:build freebsd
-// +build freebsd
package util
diff --git a/cmd/ipfs/util/ulimit_test.go b/cmd/ipfs/util/ulimit_test.go
index bef480fffbf..33b077776ed 100644
--- a/cmd/ipfs/util/ulimit_test.go
+++ b/cmd/ipfs/util/ulimit_test.go
@@ -1,5 +1,4 @@
//go:build !windows && !plan9
-// +build !windows,!plan9
package util
diff --git a/cmd/ipfs/util/ulimit_unix.go b/cmd/ipfs/util/ulimit_unix.go
index d3b0ec43c89..b223de0ff46 100644
--- a/cmd/ipfs/util/ulimit_unix.go
+++ b/cmd/ipfs/util/ulimit_unix.go
@@ -1,5 +1,4 @@
//go:build darwin || linux || netbsd || openbsd
-// +build darwin linux netbsd openbsd
package util
diff --git a/cmd/ipfs/util/ulimit_windows.go b/cmd/ipfs/util/ulimit_windows.go
index 5dbfd26f7d7..cd1447365f1 100644
--- a/cmd/ipfs/util/ulimit_windows.go
+++ b/cmd/ipfs/util/ulimit_windows.go
@@ -1,5 +1,4 @@
//go:build windows
-// +build windows
package util
diff --git a/cmd/ipfswatch/ipfswatch_test.go b/cmd/ipfswatch/ipfswatch_test.go
index 75d0075216b..ac68e96cc76 100644
--- a/cmd/ipfswatch/ipfswatch_test.go
+++ b/cmd/ipfswatch/ipfswatch_test.go
@@ -1,5 +1,4 @@
//go:build !plan9
-// +build !plan9
package main
diff --git a/cmd/ipfswatch/main.go b/cmd/ipfswatch/main.go
index 3178cf56432..79ea24224d0 100644
--- a/cmd/ipfswatch/main.go
+++ b/cmd/ipfswatch/main.go
@@ -1,5 +1,4 @@
//go:build !plan9
-// +build !plan9
package main
@@ -13,6 +12,7 @@ import (
"syscall"
commands "github.com/ipfs/kubo/commands"
+ "github.com/ipfs/kubo/config"
core "github.com/ipfs/kubo/core"
coreapi "github.com/ipfs/kubo/core/coreapi"
corehttp "github.com/ipfs/kubo/core/corehttp"
@@ -25,10 +25,18 @@ import (
var (
http = flag.Bool("http", false, "expose IPFS HTTP API")
- repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
+ repoPath *string
watchPath = flag.String("path", ".", "the path to watch")
)
+func init() {
+ ipfsPath, err := config.PathRoot()
+ if err != nil {
+ ipfsPath = os.Getenv(config.EnvDir)
+ }
+ repoPath = flag.String("repo", ipfsPath, "repo path to use")
+}
+
func main() {
flag.Parse()
diff --git a/config/config.go b/config/config.go
index 3236ad003f0..045ca784b73 100644
--- a/config/config.go
+++ b/config/config.go
@@ -35,8 +35,9 @@ type Config struct {
Migration Migration
AutoConf AutoConf
- Provider Provider
- Reprovider Reprovider
+ Provide Provide // Merged Provider and Reprovider configuration
+ Provider Provider // Deprecated: use Provide. Will be removed in a future release.
+ Reprovider Reprovider // Deprecated: use Provide. Will be removed in a future release.
HTTPRetrieval HTTPRetrieval
Experimental Experiments
Plugins Plugins
diff --git a/config/config_test.go b/config/config_test.go
index 16573504370..b1637bceffb 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -134,14 +134,24 @@ func TestCheckKey(t *testing.T) {
t.Fatal("Foo.Bar isn't a valid key in the config")
}
- err = CheckKey("Reprovider.Strategy")
+ err = CheckKey("Provide.Strategy")
if err != nil {
- t.Fatalf("%s: %s", err, "Reprovider.Strategy is a valid key in the config")
+ t.Fatalf("%s: %s", err, "Provide.Strategy is a valid key in the config")
}
- err = CheckKey("Provider.Foo")
+ err = CheckKey("Provide.DHT.MaxWorkers")
+ if err != nil {
+ t.Fatalf("%s: %s", err, "Provide.DHT.MaxWorkers is a valid key in the config")
+ }
+
+ err = CheckKey("Provide.DHT.Interval")
+ if err != nil {
+ t.Fatalf("%s: %s", err, "Provide.DHT.Interval is a valid key in the config")
+ }
+
+ err = CheckKey("Provide.Foo")
if err == nil {
- t.Fatal("Provider.Foo isn't a valid key in the config")
+ t.Fatal("Provide.Foo isn't a valid key in the config")
}
err = CheckKey("Gateway.PublicGateways.Foo.Paths")
diff --git a/config/gateway.go b/config/gateway.go
index 56eb0c39595..92811ee49d4 100644
--- a/config/gateway.go
+++ b/config/gateway.go
@@ -9,6 +9,7 @@ const (
DefaultDeserializedResponses = true
DefaultDisableHTMLErrors = false
DefaultExposeRoutingAPI = false
+ DefaultDiagnosticServiceURL = "https://check.ipfs.network"
// Gateway limit defaults from boxo
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
@@ -98,4 +99,10 @@ type Gateway struct {
// Requests beyond this limit receive 429 Too Many Requests with Retry-After header.
// A value of 0 disables the limit.
MaxConcurrentRequests *OptionalInteger `json:",omitempty"`
+
+ // DiagnosticServiceURL is the URL for a service to diagnose CID retrievability issues.
+ // When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID"
+ // button will be shown that links to this service with the CID appended as ?cid=.
+ // Set to empty string to disable the button.
+ DiagnosticServiceURL *OptionalString `json:",omitempty"`
}
diff --git a/config/import.go b/config/import.go
index c5191728608..e4af253ef4e 100644
--- a/config/import.go
+++ b/config/import.go
@@ -1,8 +1,14 @@
package config
import (
+ "fmt"
+ "strconv"
+ "strings"
+
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
"github.com/ipfs/boxo/ipld/unixfs/io"
+ "github.com/ipfs/boxo/verifcid"
+ mh "github.com/multiformats/go-multihash"
)
const (
@@ -43,3 +49,132 @@ type Import struct {
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
}
+
+// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.
+// See: https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters
+func ValidateImportConfig(cfg *Import) error {
+ // Validate CidVersion
+ if !cfg.CidVersion.IsDefault() {
+ cidVer := cfg.CidVersion.WithDefault(DefaultCidVersion)
+ if cidVer != 0 && cidVer != 1 {
+ return fmt.Errorf("Import.CidVersion must be 0 or 1, got %d", cidVer)
+ }
+ }
+
+ // Validate UnixFSFileMaxLinks
+ if !cfg.UnixFSFileMaxLinks.IsDefault() {
+ maxLinks := cfg.UnixFSFileMaxLinks.WithDefault(DefaultUnixFSFileMaxLinks)
+ if maxLinks <= 0 {
+ return fmt.Errorf("Import.UnixFSFileMaxLinks must be positive, got %d", maxLinks)
+ }
+ }
+
+ // Validate UnixFSDirectoryMaxLinks
+ if !cfg.UnixFSDirectoryMaxLinks.IsDefault() {
+ maxLinks := cfg.UnixFSDirectoryMaxLinks.WithDefault(DefaultUnixFSDirectoryMaxLinks)
+ if maxLinks < 0 {
+ return fmt.Errorf("Import.UnixFSDirectoryMaxLinks must be non-negative, got %d", maxLinks)
+ }
+ }
+
+ // Validate UnixFSHAMTDirectoryMaxFanout if set
+ if !cfg.UnixFSHAMTDirectoryMaxFanout.IsDefault() {
+ fanout := cfg.UnixFSHAMTDirectoryMaxFanout.WithDefault(DefaultUnixFSHAMTDirectoryMaxFanout)
+
+ // Check all requirements: fanout < 8 covers both non-positive and non-multiple of 8
+ // Combined with power of 2 check and max limit, this ensures valid values: 8, 16, 32, 64, 128, 256, 512, 1024
+ if fanout < 8 || !isPowerOfTwo(fanout) || fanout > 1024 {
+ return fmt.Errorf("Import.UnixFSHAMTDirectoryMaxFanout must be a positive power of 2, multiple of 8, and not exceed 1024 (got %d)", fanout)
+ }
+ }
+
+ // Validate BatchMaxNodes
+ if !cfg.BatchMaxNodes.IsDefault() {
+ maxNodes := cfg.BatchMaxNodes.WithDefault(DefaultBatchMaxNodes)
+ if maxNodes <= 0 {
+ return fmt.Errorf("Import.BatchMaxNodes must be positive, got %d", maxNodes)
+ }
+ }
+
+ // Validate BatchMaxSize
+ if !cfg.BatchMaxSize.IsDefault() {
+ maxSize := cfg.BatchMaxSize.WithDefault(DefaultBatchMaxSize)
+ if maxSize <= 0 {
+ return fmt.Errorf("Import.BatchMaxSize must be positive, got %d", maxSize)
+ }
+ }
+
+ // Validate UnixFSChunker format
+ if !cfg.UnixFSChunker.IsDefault() {
+ chunker := cfg.UnixFSChunker.WithDefault(DefaultUnixFSChunker)
+ if !isValidChunker(chunker) {
+ return fmt.Errorf("Import.UnixFSChunker invalid format: %q (expected \"size-\", \"rabin---\", or \"buzhash\")", chunker)
+ }
+ }
+
+ // Validate HashFunction
+ if !cfg.HashFunction.IsDefault() {
+ hashFunc := cfg.HashFunction.WithDefault(DefaultHashFunction)
+ hashCode, ok := mh.Names[strings.ToLower(hashFunc)]
+ if !ok {
+ return fmt.Errorf("Import.HashFunction unrecognized: %q", hashFunc)
+ }
+ // Check if the hash is allowed by verifcid
+ if !verifcid.DefaultAllowlist.IsAllowed(hashCode) {
+ return fmt.Errorf("Import.HashFunction %q is not allowed for use in IPFS", hashFunc)
+ }
+ }
+
+ return nil
+}
+
+// isPowerOfTwo checks if a number is a power of 2
+func isPowerOfTwo(n int64) bool {
+ return n > 0 && (n&(n-1)) == 0
+}
+
+// isValidChunker validates chunker format
+func isValidChunker(chunker string) bool {
+ if chunker == "buzhash" {
+ return true
+ }
+
+ // Check for size- format
+ if strings.HasPrefix(chunker, "size-") {
+ sizeStr := strings.TrimPrefix(chunker, "size-")
+ if sizeStr == "" {
+ return false
+ }
+ // Check if it's a valid positive integer (no negative sign allowed)
+ if sizeStr[0] == '-' {
+ return false
+ }
+ size, err := strconv.Atoi(sizeStr)
+ // Size must be positive (not zero)
+ return err == nil && size > 0
+ }
+
+ // Check for rabin--- format
+ if strings.HasPrefix(chunker, "rabin-") {
+ parts := strings.Split(chunker, "-")
+ if len(parts) != 4 {
+ return false
+ }
+
+ // Parse and validate min, avg, max values
+ values := make([]int, 3)
+ for i := 0; i < 3; i++ {
+ val, err := strconv.Atoi(parts[i+1])
+ if err != nil {
+ return false
+ }
+ values[i] = val
+ }
+
+ // Validate ordering: min <= avg <= max
+ min, avg, max := values[0], values[1], values[2]
+ return min <= avg && avg <= max
+ }
+
+ return false
+}
diff --git a/config/import_test.go b/config/import_test.go
new file mode 100644
index 00000000000..f045b9751f6
--- /dev/null
+++ b/config/import_test.go
@@ -0,0 +1,408 @@
+package config
+
+import (
+ "strings"
+ "testing"
+
+ mh "github.com/multiformats/go-multihash"
+)
+
+func TestValidateImportConfig_HAMTFanout(t *testing.T) {
+ tests := []struct {
+ name string
+ fanout int64
+ wantErr bool
+ errMsg string
+ }{
+ // Valid values - powers of 2, multiples of 8, and <= 1024
+ {name: "valid 8", fanout: 8, wantErr: false},
+ {name: "valid 16", fanout: 16, wantErr: false},
+ {name: "valid 32", fanout: 32, wantErr: false},
+ {name: "valid 64", fanout: 64, wantErr: false},
+ {name: "valid 128", fanout: 128, wantErr: false},
+ {name: "valid 256", fanout: 256, wantErr: false},
+ {name: "valid 512", fanout: 512, wantErr: false},
+ {name: "valid 1024", fanout: 1024, wantErr: false},
+
+ // Invalid values - not powers of 2
+ {name: "invalid 7", fanout: 7, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid 15", fanout: 15, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid 100", fanout: 100, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid 257", fanout: 257, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid 1000", fanout: 1000, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+
+ // Invalid values - powers of 2 but not multiples of 8
+ {name: "invalid 1", fanout: 1, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid 2", fanout: 2, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid 4", fanout: 4, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+
+ // Invalid values - exceeds 1024
+ {name: "invalid 2048", fanout: 2048, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid 4096", fanout: 4096, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+
+ // Invalid values - negative or zero
+ {name: "invalid 0", fanout: 0, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid -8", fanout: -8, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ {name: "invalid -256", fanout: -256, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Import{
+ UnixFSHAMTDirectoryMaxFanout: *NewOptionalInteger(tt.fanout),
+ }
+
+ err := ValidateImportConfig(cfg)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ValidateImportConfig() expected error for fanout=%d, got nil", tt.fanout)
+ } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
+ t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error for fanout=%d: %v", tt.fanout, err)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateImportConfig_CidVersion(t *testing.T) {
+ tests := []struct {
+ name string
+ cidVer int64
+ wantErr bool
+ errMsg string
+ }{
+ {name: "valid 0", cidVer: 0, wantErr: false},
+ {name: "valid 1", cidVer: 1, wantErr: false},
+ {name: "invalid 2", cidVer: 2, wantErr: true, errMsg: "must be 0 or 1"},
+ {name: "invalid -1", cidVer: -1, wantErr: true, errMsg: "must be 0 or 1"},
+ {name: "invalid 100", cidVer: 100, wantErr: true, errMsg: "must be 0 or 1"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Import{
+ CidVersion: *NewOptionalInteger(tt.cidVer),
+ }
+
+ err := ValidateImportConfig(cfg)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ValidateImportConfig() expected error for cidVer=%d, got nil", tt.cidVer)
+ } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
+ t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error for cidVer=%d: %v", tt.cidVer, err)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateImportConfig_UnixFSFileMaxLinks(t *testing.T) {
+ tests := []struct {
+ name string
+ maxLinks int64
+ wantErr bool
+ errMsg string
+ }{
+ {name: "valid 1", maxLinks: 1, wantErr: false},
+ {name: "valid 174", maxLinks: 174, wantErr: false},
+ {name: "valid 1000", maxLinks: 1000, wantErr: false},
+ {name: "invalid 0", maxLinks: 0, wantErr: true, errMsg: "must be positive"},
+ {name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be positive"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Import{
+ UnixFSFileMaxLinks: *NewOptionalInteger(tt.maxLinks),
+ }
+
+ err := ValidateImportConfig(cfg)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks)
+ } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
+ t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateImportConfig_UnixFSDirectoryMaxLinks(t *testing.T) {
+ tests := []struct {
+ name string
+ maxLinks int64
+ wantErr bool
+ errMsg string
+ }{
+ {name: "valid 0", maxLinks: 0, wantErr: false}, // 0 means no limit
+ {name: "valid 1", maxLinks: 1, wantErr: false},
+ {name: "valid 1000", maxLinks: 1000, wantErr: false},
+ {name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be non-negative"},
+ {name: "invalid -100", maxLinks: -100, wantErr: true, errMsg: "must be non-negative"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Import{
+ UnixFSDirectoryMaxLinks: *NewOptionalInteger(tt.maxLinks),
+ }
+
+ err := ValidateImportConfig(cfg)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks)
+ } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
+ t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateImportConfig_BatchMax(t *testing.T) {
+ tests := []struct {
+ name string
+ maxNodes int64
+ maxSize int64
+ wantErr bool
+ errMsg string
+ }{
+ {name: "valid nodes 1", maxNodes: 1, maxSize: -999, wantErr: false},
+ {name: "valid nodes 128", maxNodes: 128, maxSize: -999, wantErr: false},
+ {name: "valid size 1", maxNodes: -999, maxSize: 1, wantErr: false},
+ {name: "valid size 20MB", maxNodes: -999, maxSize: 20 << 20, wantErr: false},
+ {name: "invalid nodes 0", maxNodes: 0, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"},
+ {name: "invalid nodes -1", maxNodes: -1, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"},
+ {name: "invalid size 0", maxNodes: -999, maxSize: 0, wantErr: true, errMsg: "BatchMaxSize must be positive"},
+ {name: "invalid size -1", maxNodes: -999, maxSize: -1, wantErr: true, errMsg: "BatchMaxSize must be positive"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Import{}
+ if tt.maxNodes != -999 {
+ cfg.BatchMaxNodes = *NewOptionalInteger(tt.maxNodes)
+ }
+ if tt.maxSize != -999 {
+ cfg.BatchMaxSize = *NewOptionalInteger(tt.maxSize)
+ }
+
+ err := ValidateImportConfig(cfg)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ValidateImportConfig() expected error, got nil")
+ } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
+ t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateImportConfig_UnixFSChunker(t *testing.T) {
+ tests := []struct {
+ name string
+ chunker string
+ wantErr bool
+ errMsg string
+ }{
+ {name: "valid size-262144", chunker: "size-262144", wantErr: false},
+ {name: "valid size-1", chunker: "size-1", wantErr: false},
+ {name: "valid size-1048576", chunker: "size-1048576", wantErr: false},
+ {name: "valid rabin", chunker: "rabin-128-256-512", wantErr: false},
+ {name: "valid rabin min", chunker: "rabin-16-32-64", wantErr: false},
+ {name: "valid buzhash", chunker: "buzhash", wantErr: false},
+ {name: "invalid size-", chunker: "size-", wantErr: true, errMsg: "invalid format"},
+ {name: "invalid size-abc", chunker: "size-abc", wantErr: true, errMsg: "invalid format"},
+ {name: "invalid rabin-", chunker: "rabin-", wantErr: true, errMsg: "invalid format"},
+ {name: "invalid rabin-128", chunker: "rabin-128", wantErr: true, errMsg: "invalid format"},
+ {name: "invalid rabin-128-256", chunker: "rabin-128-256", wantErr: true, errMsg: "invalid format"},
+ {name: "invalid rabin-a-b-c", chunker: "rabin-a-b-c", wantErr: true, errMsg: "invalid format"},
+ {name: "invalid unknown", chunker: "unknown", wantErr: true, errMsg: "invalid format"},
+ {name: "invalid empty", chunker: "", wantErr: true, errMsg: "invalid format"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Import{
+ UnixFSChunker: *NewOptionalString(tt.chunker),
+ }
+
+ err := ValidateImportConfig(cfg)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ValidateImportConfig() expected error for chunker=%s, got nil", tt.chunker)
+ } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
+ t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error for chunker=%s: %v", tt.chunker, err)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateImportConfig_HashFunction(t *testing.T) {
+ tests := []struct {
+ name string
+ hashFunc string
+ wantErr bool
+ errMsg string
+ }{
+ {name: "valid sha2-256", hashFunc: "sha2-256", wantErr: false},
+ {name: "valid sha2-512", hashFunc: "sha2-512", wantErr: false},
+ {name: "valid sha3-256", hashFunc: "sha3-256", wantErr: false},
+ {name: "valid blake2b-256", hashFunc: "blake2b-256", wantErr: false},
+ {name: "valid blake3", hashFunc: "blake3", wantErr: false},
+ {name: "invalid unknown", hashFunc: "unknown-hash", wantErr: true, errMsg: "unrecognized"},
+ {name: "invalid empty", hashFunc: "", wantErr: true, errMsg: "unrecognized"},
+ }
+
+ // Check for hashes that exist but are not allowed
+ // MD5 should exist but not be allowed
+ if code, ok := mh.Names["md5"]; ok {
+ tests = append(tests, struct {
+ name string
+ hashFunc string
+ wantErr bool
+ errMsg string
+ }{name: "md5 not allowed", hashFunc: "md5", wantErr: true, errMsg: "not allowed"})
+ _ = code // use the variable
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Import{
+ HashFunction: *NewOptionalString(tt.hashFunc),
+ }
+
+ err := ValidateImportConfig(cfg)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ValidateImportConfig() expected error for hashFunc=%s, got nil", tt.hashFunc)
+ } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
+ t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error for hashFunc=%s: %v", tt.hashFunc, err)
+ }
+ }
+ })
+ }
+}
+
+func TestValidateImportConfig_DefaultValue(t *testing.T) {
+ // Test that default (unset) value doesn't trigger validation
+ cfg := &Import{}
+
+ err := ValidateImportConfig(cfg)
+ if err != nil {
+ t.Errorf("ValidateImportConfig() unexpected error for default config: %v", err)
+ }
+}
+
+func TestIsValidChunker(t *testing.T) {
+ tests := []struct {
+ chunker string
+ want bool
+ }{
+ {"buzhash", true},
+ {"size-262144", true},
+ {"size-1", true},
+ {"size-0", false}, // 0 is not valid - must be positive
+ {"size-9999999", true},
+ {"rabin-128-256-512", true},
+ {"rabin-16-32-64", true},
+ {"rabin-1-2-3", true},
+ {"rabin-512-256-128", false}, // Invalid ordering: min > avg > max
+ {"rabin-256-128-512", false}, // Invalid ordering: min > avg
+ {"rabin-128-512-256", false}, // Invalid ordering: avg > max
+
+ {"", false},
+ {"size-", false},
+ {"size-abc", false},
+ {"size--1", false},
+ {"rabin-", false},
+ {"rabin-128", false},
+ {"rabin-128-256", false},
+ {"rabin-128-256-512-1024", false},
+ {"rabin-a-b-c", false},
+ {"unknown", false},
+ {"buzzhash", false}, // typo
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.chunker, func(t *testing.T) {
+ if got := isValidChunker(tt.chunker); got != tt.want {
+ t.Errorf("isValidChunker(%q) = %v, want %v", tt.chunker, got, tt.want)
+ }
+ })
+ }
+}
+
+func TestIsPowerOfTwo(t *testing.T) {
+ tests := []struct {
+ n int64
+ want bool
+ }{
+ {0, false},
+ {1, true},
+ {2, true},
+ {3, false},
+ {4, true},
+ {5, false},
+ {6, false},
+ {7, false},
+ {8, true},
+ {16, true},
+ {32, true},
+ {64, true},
+ {100, false},
+ {128, true},
+ {256, true},
+ {512, true},
+ {1024, true},
+ {2048, true},
+ {-1, false},
+ {-8, false},
+ }
+
+ for _, tt := range tests {
+ t.Run("", func(t *testing.T) {
+ if got := isPowerOfTwo(tt.n); got != tt.want {
+ t.Errorf("isPowerOfTwo(%d) = %v, want %v", tt.n, got, tt.want)
+ }
+ })
+ }
+}
diff --git a/config/init.go b/config/init.go
index cc7b22ca8a4..0aeffef5f74 100644
--- a/config/init.go
+++ b/config/init.go
@@ -60,10 +60,6 @@ func InitWithIdentity(identity Identity) (*Config, error) {
NoFetch: false,
HTTPHeaders: map[string][]string{},
},
- Reprovider: Reprovider{
- Interval: nil,
- Strategy: nil,
- },
Pinning: Pinning{
RemoteServices: map[string]RemotePinningService{},
},
diff --git a/config/internal.go b/config/internal.go
index 267bb250f0a..f344e5252b6 100644
--- a/config/internal.go
+++ b/config/internal.go
@@ -1,11 +1,23 @@
package config
+const (
+ // DefaultMFSNoFlushLimit is the default limit for consecutive unflushed MFS operations
+ DefaultMFSNoFlushLimit = 256
+)
+
type Internal struct {
// All marked as omitempty since we are expecting to make changes to all subcomponents of Internal
Bitswap *InternalBitswap `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"` // moved to Import.UnixFSHAMTDirectorySizeThreshold
Libp2pForceReachability *OptionalString `json:",omitempty"`
BackupBootstrapInterval *OptionalDuration `json:",omitempty"`
+ // MFSNoFlushLimit controls the maximum number of consecutive
+ // MFS operations allowed with --flush=false before requiring a manual flush.
+ // This prevents unbounded memory growth and ensures data consistency.
+ // Set to 0 to disable limiting (old behavior, may cause high memory usage)
+ // This is an EXPERIMENTAL feature and may change or be removed in future releases.
+ // See https://github.com/ipfs/kubo/issues/10842
+ MFSNoFlushLimit *OptionalInteger `json:",omitempty"`
}
type InternalBitswap struct {
diff --git a/config/profile.go b/config/profile.go
index 1479bfc13a6..5479c2d6428 100644
--- a/config/profile.go
+++ b/config/profile.go
@@ -275,7 +275,7 @@ fetching may be degraded.
},
},
"announce-off": {
- Description: `Disables Provide and Reprovide systems (announcing to Amino DHT).
+ Description: `Disables Provide system (announcing to Amino DHT).
USE WITH CAUTION:
The main use case for this is setups with manual Peering.Peers config.
@@ -284,16 +284,16 @@ fetching may be degraded.
one hosting it, and other peers are not already connected to it.
`,
Transform: func(c *Config) error {
- c.Provider.Enabled = False
- c.Reprovider.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
+ c.Provide.Enabled = False
+ c.Provide.DHT.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
return nil
},
},
"announce-on": {
- Description: `Re-enables Provide and Reprovide systems (reverts announce-off profile).`,
+ Description: `Re-enables Provide system (reverts announce-off profile).`,
Transform: func(c *Config) error {
- c.Provider.Enabled = True
- c.Reprovider.Interval = NewOptionalDuration(DefaultReproviderInterval) // have to apply explicit default because nil would be ignored
+ c.Provide.Enabled = True
+ c.Provide.DHT.Interval = NewOptionalDuration(DefaultProvideDHTInterval) // have to apply explicit default because nil would be ignored
return nil
},
},
diff --git a/config/provide.go b/config/provide.go
new file mode 100644
index 00000000000..9fc378a32e8
--- /dev/null
+++ b/config/provide.go
@@ -0,0 +1,170 @@
+package config
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/libp2p/go-libp2p-kad-dht/amino"
+)
+
+const (
+ DefaultProvideEnabled = true
+ DefaultProvideStrategy = "all"
+
+ // DHT provider defaults
+ DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
+ DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
+ DefaultProvideDHTSweepEnabled = false
+ DefaultProvideDHTDedicatedPeriodicWorkers = 2
+ DefaultProvideDHTDedicatedBurstWorkers = 1
+ DefaultProvideDHTMaxProvideConnsPerWorker = 16
+ DefaultProvideDHTKeystoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes)
+ DefaultProvideDHTOfflineDelay = 2 * time.Hour
+)
+
+type ProvideStrategy int
+
+const (
+ ProvideStrategyAll ProvideStrategy = 1 << iota
+ ProvideStrategyPinned
+ ProvideStrategyRoots
+ ProvideStrategyMFS
+)
+
+// Provide configures both immediate CID announcements (provide operations) for new content
+// and periodic re-announcements of existing CIDs (reprovide operations).
+// This section combines the functionality previously split between Provider and Reprovider.
+type Provide struct {
+ // Enabled controls whether both provide and reprovide systems are enabled.
+ // When disabled, the node will not announce any content to the routing system.
+ Enabled Flag `json:",omitempty"`
+
+ // Strategy determines which CIDs are announced to the routing system.
+ // Default: DefaultProvideStrategy
+ Strategy *OptionalString `json:",omitempty"`
+
+ // DHT configures DHT-specific provide and reprovide settings.
+ DHT ProvideDHT
+}
+
+// ProvideDHT configures DHT provider settings for both immediate announcements
+// and periodic reprovides.
+type ProvideDHT struct {
+ // Interval sets the time between rounds of reproviding local content
+ // to the routing system. Set to "0" to disable content reproviding.
+ // Default: DefaultProvideDHTInterval
+ Interval *OptionalDuration `json:",omitempty"`
+
+ // MaxWorkers sets the maximum number of concurrent workers for provide operations.
+ // When SweepEnabled is false: controls NEW CID announcements only.
+ // When SweepEnabled is true: controls total worker pool for all operations.
+ // Default: DefaultProvideDHTMaxWorkers
+ MaxWorkers *OptionalInteger `json:",omitempty"`
+
+ // SweepEnabled activates the sweeping reprovider system which spreads
+ // reprovide operations over time. This will become the default in a future release.
+ // Default: DefaultProvideDHTSweepEnabled
+ SweepEnabled Flag `json:",omitempty"`
+
+ // DedicatedPeriodicWorkers sets workers dedicated to periodic reprovides (sweep mode only).
+ // Default: DefaultProvideDHTDedicatedPeriodicWorkers
+ DedicatedPeriodicWorkers *OptionalInteger `json:",omitempty"`
+
+ // DedicatedBurstWorkers sets workers dedicated to burst provides (sweep mode only).
+ // Default: DefaultProvideDHTDedicatedBurstWorkers
+ DedicatedBurstWorkers *OptionalInteger `json:",omitempty"`
+
+ // MaxProvideConnsPerWorker sets concurrent connections per worker for sending provider records (sweep mode only).
+ // Default: DefaultProvideDHTMaxProvideConnsPerWorker
+ MaxProvideConnsPerWorker *OptionalInteger `json:",omitempty"`
+
+ // KeystoreBatchSize sets the batch size for keystore operations during reprovide refresh (sweep mode only).
+ // Default: DefaultProvideDHTKeystoreBatchSize
+ KeystoreBatchSize *OptionalInteger `json:",omitempty"`
+
+ // OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only).
+ // Default: DefaultProvideDHTOfflineDelay
+ OfflineDelay *OptionalDuration `json:",omitempty"`
+}
+
+func ParseProvideStrategy(s string) ProvideStrategy {
+ var strategy ProvideStrategy
+ for _, part := range strings.Split(s, "+") {
+ switch part {
+ case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
+ return ProvideStrategyAll
+ case "pinned":
+ strategy |= ProvideStrategyPinned
+ case "roots":
+ strategy |= ProvideStrategyRoots
+ case "mfs":
+ strategy |= ProvideStrategyMFS
+ }
+ }
+ return strategy
+}
+
+// ValidateProvideConfig validates the Provide configuration according to DHT requirements.
+func ValidateProvideConfig(cfg *Provide) error {
+ // Validate Provide.DHT.Interval
+ if !cfg.DHT.Interval.IsDefault() {
+ interval := cfg.DHT.Interval.WithDefault(DefaultProvideDHTInterval)
+ if interval > amino.DefaultProvideValidity {
+ return fmt.Errorf("Provide.DHT.Interval (%v) must be less than or equal to DHT provider record validity (%v)", interval, amino.DefaultProvideValidity)
+ }
+ if interval < 0 {
+ return fmt.Errorf("Provide.DHT.Interval must be non-negative, got %v", interval)
+ }
+ }
+
+ // Validate MaxWorkers
+ if !cfg.DHT.MaxWorkers.IsDefault() {
+ maxWorkers := cfg.DHT.MaxWorkers.WithDefault(DefaultProvideDHTMaxWorkers)
+ if maxWorkers <= 0 {
+ return fmt.Errorf("Provide.DHT.MaxWorkers must be positive, got %d", maxWorkers)
+ }
+ }
+
+ // Validate DedicatedPeriodicWorkers
+ if !cfg.DHT.DedicatedPeriodicWorkers.IsDefault() {
+ workers := cfg.DHT.DedicatedPeriodicWorkers.WithDefault(DefaultProvideDHTDedicatedPeriodicWorkers)
+ if workers < 0 {
+ return fmt.Errorf("Provide.DHT.DedicatedPeriodicWorkers must be non-negative, got %d", workers)
+ }
+ }
+
+ // Validate DedicatedBurstWorkers
+ if !cfg.DHT.DedicatedBurstWorkers.IsDefault() {
+ workers := cfg.DHT.DedicatedBurstWorkers.WithDefault(DefaultProvideDHTDedicatedBurstWorkers)
+ if workers < 0 {
+ return fmt.Errorf("Provide.DHT.DedicatedBurstWorkers must be non-negative, got %d", workers)
+ }
+ }
+
+ // Validate MaxProvideConnsPerWorker
+ if !cfg.DHT.MaxProvideConnsPerWorker.IsDefault() {
+ conns := cfg.DHT.MaxProvideConnsPerWorker.WithDefault(DefaultProvideDHTMaxProvideConnsPerWorker)
+ if conns <= 0 {
+ return fmt.Errorf("Provide.DHT.MaxProvideConnsPerWorker must be positive, got %d", conns)
+ }
+ }
+
+ // Validate KeystoreBatchSize
+ if !cfg.DHT.KeystoreBatchSize.IsDefault() {
+ batchSize := cfg.DHT.KeystoreBatchSize.WithDefault(DefaultProvideDHTKeystoreBatchSize)
+ if batchSize <= 0 {
+ return fmt.Errorf("Provide.DHT.KeystoreBatchSize must be positive, got %d", batchSize)
+ }
+ }
+
+ // Validate OfflineDelay
+ if !cfg.DHT.OfflineDelay.IsDefault() {
+ delay := cfg.DHT.OfflineDelay.WithDefault(DefaultProvideDHTOfflineDelay)
+ if delay < 0 {
+ return fmt.Errorf("Provide.DHT.OfflineDelay must be non-negative, got %v", delay)
+ }
+ }
+
+ return nil
+}
diff --git a/config/provide_test.go b/config/provide_test.go
new file mode 100644
index 00000000000..213271eb014
--- /dev/null
+++ b/config/provide_test.go
@@ -0,0 +1,107 @@
+package config
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseProvideStrategy(t *testing.T) {
+ tests := []struct {
+ input string
+ expect ProvideStrategy
+ }{
+ {"all", ProvideStrategyAll},
+ {"pinned", ProvideStrategyPinned},
+ {"mfs", ProvideStrategyMFS},
+ {"pinned+mfs", ProvideStrategyPinned | ProvideStrategyMFS},
+ {"invalid", 0},
+ {"all+invalid", ProvideStrategyAll},
+ {"", ProvideStrategyAll},
+ {"flat", ProvideStrategyAll}, // deprecated, maps to "all"
+ {"flat+all", ProvideStrategyAll},
+ }
+
+ for _, tt := range tests {
+ result := ParseProvideStrategy(tt.input)
+ if result != tt.expect {
+ t.Errorf("ParseProvideStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
+ }
+ }
+}
+
+func TestValidateProvideConfig_Interval(t *testing.T) {
+ tests := []struct {
+ name string
+ interval time.Duration
+ wantErr bool
+ errMsg string
+ }{
+ {"valid default (22h)", 22 * time.Hour, false, ""},
+ {"valid max (48h)", 48 * time.Hour, false, ""},
+ {"valid small (1h)", 1 * time.Hour, false, ""},
+ {"valid zero (disabled)", 0, false, ""},
+ {"invalid over limit (49h)", 49 * time.Hour, true, "must be less than or equal to DHT provider record validity"},
+ {"invalid over limit (72h)", 72 * time.Hour, true, "must be less than or equal to DHT provider record validity"},
+ {"invalid negative", -1 * time.Hour, true, "must be non-negative"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Provide{
+ DHT: ProvideDHT{
+ Interval: NewOptionalDuration(tt.interval),
+ },
+ }
+
+ err := ValidateProvideConfig(cfg)
+
+ if tt.wantErr {
+ require.Error(t, err, "expected error for interval=%v", tt.interval)
+ if tt.errMsg != "" {
+ assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch")
+ }
+ } else {
+ require.NoError(t, err, "unexpected error for interval=%v", tt.interval)
+ }
+ })
+ }
+}
+
+func TestValidateProvideConfig_MaxWorkers(t *testing.T) {
+ tests := []struct {
+ name string
+ maxWorkers int64
+ wantErr bool
+ errMsg string
+ }{
+ {"valid default", 16, false, ""},
+ {"valid high", 100, false, ""},
+ {"valid low", 1, false, ""},
+ {"invalid zero", 0, true, "must be positive"},
+ {"invalid negative", -1, true, "must be positive"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &Provide{
+ DHT: ProvideDHT{
+ MaxWorkers: NewOptionalInteger(tt.maxWorkers),
+ },
+ }
+
+ err := ValidateProvideConfig(cfg)
+
+ if tt.wantErr {
+ require.Error(t, err, "expected error for maxWorkers=%d", tt.maxWorkers)
+ if tt.errMsg != "" {
+ assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch")
+ }
+ } else {
+ require.NoError(t, err, "unexpected error for maxWorkers=%d", tt.maxWorkers)
+ }
+ })
+ }
+}
diff --git a/config/provider.go b/config/provider.go
index 4a2243acb07..e3d5a4052d9 100644
--- a/config/provider.go
+++ b/config/provider.go
@@ -1,14 +1,16 @@
package config
-const (
- DefaultProviderEnabled = true
- DefaultProviderWorkerCount = 16
-)
-
// Provider configuration describes how NEW CIDs are announced the moment they are created.
-// For periodical reprovide configuration, see Reprovider.*
+// For periodical reprovide configuration, see Provide.*
+//
+// Deprecated: use Provide instead. This will be removed in a future release.
type Provider struct {
- Enabled Flag `json:",omitempty"`
- Strategy *OptionalString `json:",omitempty"` // Unused, you are likely looking for Reprovider.Strategy instead
- WorkerCount *OptionalInteger `json:",omitempty"` // Number of concurrent provides allowed, 0 means unlimited
+ // Deprecated: use Provide.Enabled instead. This will be removed in a future release.
+ Enabled Flag `json:",omitempty"`
+
+ // Deprecated: unused, you are likely looking for Provide.Strategy instead. This will be removed in a future release.
+ Strategy *OptionalString `json:",omitempty"`
+
+ // Deprecated: use Provide.DHT.MaxWorkers instead. This will be removed in a future release.
+ WorkerCount *OptionalInteger `json:",omitempty"`
}
diff --git a/config/reprovider.go b/config/reprovider.go
index e7d68736072..0fa5e877a54 100644
--- a/config/reprovider.go
+++ b/config/reprovider.go
@@ -1,44 +1,13 @@
package config
-import (
- "strings"
- "time"
-)
-
-const (
- DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326
- DefaultReproviderStrategy = "all"
-)
-
-type ReproviderStrategy int
-
-const (
- ReproviderStrategyAll ReproviderStrategy = 1 << iota
- ReproviderStrategyPinned
- ReproviderStrategyRoots
- ReproviderStrategyMFS
-)
-
// Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems.
-// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provider.*
+// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provide.*
+//
+// Deprecated: use Provide instead. This will be removed in a future release.
type Reprovider struct {
- Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network
- Strategy *OptionalString `json:",omitempty"` // Which keys to announce
-}
+ // Deprecated: use Provide.DHT.Interval instead. This will be removed in a future release.
+ Interval *OptionalDuration `json:",omitempty"`
-func ParseReproviderStrategy(s string) ReproviderStrategy {
- var strategy ReproviderStrategy
- for _, part := range strings.Split(s, "+") {
- switch part {
- case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
- return ReproviderStrategyAll
- case "pinned":
- strategy |= ReproviderStrategyPinned
- case "roots":
- strategy |= ReproviderStrategyRoots
- case "mfs":
- strategy |= ReproviderStrategyMFS
- }
- }
- return strategy
+ // Deprecated: use Provide.Strategy instead. This will be removed in a future release.
+ Strategy *OptionalString `json:",omitempty"`
}
diff --git a/config/reprovider_test.go b/config/reprovider_test.go
deleted file mode 100644
index 20b338eb007..00000000000
--- a/config/reprovider_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package config
-
-import "testing"
-
-func TestParseReproviderStrategy(t *testing.T) {
- tests := []struct {
- input string
- expect ReproviderStrategy
- }{
- {"all", ReproviderStrategyAll},
- {"pinned", ReproviderStrategyPinned},
- {"mfs", ReproviderStrategyMFS},
- {"pinned+mfs", ReproviderStrategyPinned | ReproviderStrategyMFS},
- {"invalid", 0},
- {"all+invalid", ReproviderStrategyAll},
- {"", ReproviderStrategyAll},
- {"flat", ReproviderStrategyAll}, // deprecated, maps to "all"
- {"flat+all", ReproviderStrategyAll},
- }
-
- for _, tt := range tests {
- result := ParseReproviderStrategy(tt.input)
- if result != tt.expect {
- t.Errorf("ParseReproviderStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
- }
- }
-}
diff --git a/config/routing.go b/config/routing.go
index bd234e8a30d..d68016e4edc 100644
--- a/config/routing.go
+++ b/config/routing.go
@@ -214,3 +214,57 @@ func getEnvOrDefault(key string, defaultValue []string) []string {
}
return defaultValue
}
+
+// HasHTTPProviderConfigured checks if the node is configured to use HTTP routers
+// for providing content announcements. This is used when determining if the node
+// can provide content even when not connected to libp2p peers.
+//
+// Note: Right now we only support delegated HTTP content providing if Routing.Type=custom
+// and Routing.Routers are configured according to:
+// https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md#configuration-file-example
+//
+// This uses the `ProvideBitswap` request type that is not documented anywhere,
+// because we hoped something like IPIP-378 (https://github.com/ipfs/specs/pull/378)
+// would get finalized and we'd switch to that. It never happened due to politics,
+// and now we are stuck with ProvideBitswap being the only API that works.
+// Some people have reverse engineered it (example:
+// https://discuss.ipfs.tech/t/only-peers-found-from-dht-seem-to-be-getting-used-as-relays-so-cant-use-http-routers/19545/9)
+// and use it, so what we do here is the bare minimum to ensure their use case works
+// using this old API until something better is available.
+func (c *Config) HasHTTPProviderConfigured() bool {
+ if len(c.Routing.Routers) == 0 {
+ // No "custom" routers
+ return false
+ }
+ method, ok := c.Routing.Methods[MethodNameProvide]
+ if !ok {
+ // No provide method configured
+ return false
+ }
+ return c.routerSupportsHTTPProviding(method.RouterName)
+}
+
+// routerSupportsHTTPProviding checks if the supplied custom router is or
+// includes an HTTP-based router.
+func (c *Config) routerSupportsHTTPProviding(routerName string) bool {
+ rp, ok := c.Routing.Routers[routerName]
+ if !ok {
+ // Router configured for providing doesn't exist
+ return false
+ }
+
+ switch rp.Type {
+ case RouterTypeHTTP:
+ return true
+ case RouterTypeParallel, RouterTypeSequential:
+ // Check if any child router supports HTTP
+ if children, ok := rp.Parameters.(*ComposableRouterParams); ok {
+ for _, childRouter := range children.Routers {
+ if c.routerSupportsHTTPProviding(childRouter.RouterName) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/core/commands/add.go b/core/commands/add.go
index b24eab0833d..f314bbf648a 100644
--- a/core/commands/add.go
+++ b/core/commands/add.go
@@ -11,11 +11,13 @@ import (
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/commands/cmdenv"
+ "github.com/ipfs/kubo/core/commands/cmdutils"
"github.com/cheggaaa/pb"
"github.com/ipfs/boxo/files"
mfs "github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
+ "github.com/ipfs/boxo/verifcid"
cmds "github.com/ipfs/go-ipfs-cmds"
ipld "github.com/ipfs/go-ipld-format"
coreiface "github.com/ipfs/kubo/core/coreiface"
@@ -81,7 +83,7 @@ to form the IPFS MerkleDAG. Learn more: https://docs.ipfs.tech/concepts/merkle-d
If the daemon is not running, it will just add locally to the repo at $IPFS_PATH.
If the daemon is started later, it will be advertised after a few
-seconds when the reprovider runs.
+seconds when the provide system runs.
BASIC EXAMPLES:
@@ -203,7 +205,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"),
// Experimental Features
cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. WARNING: experimental"),
- cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. WARNING: experimental").WithDefault(32),
+ cmds.IntOption(inlineLimitOptionName, fmt.Sprintf("Maximum block size to inline. Maximum: %d bytes. WARNING: experimental", verifcid.DefaultMaxIdentityDigestSize)).WithDefault(32),
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. WARNING: experimental"),
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. WARNING: experimental"),
cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
@@ -262,6 +264,19 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
hashFunStr, _ := req.Options[hashOptionName].(string)
inline, _ := req.Options[inlineOptionName].(bool)
inlineLimit, _ := req.Options[inlineLimitOptionName].(int)
+
+ // Validate inline-limit doesn't exceed the maximum identity digest size
+ if inline && inlineLimit > verifcid.DefaultMaxIdentityDigestSize {
+ return fmt.Errorf("inline-limit %d exceeds maximum allowed size of %d bytes", inlineLimit, verifcid.DefaultMaxIdentityDigestSize)
+ }
+
+ // Validate pin name
+ if pinNameSet {
+ if err := cmdutils.ValidatePinName(pinName); err != nil {
+ return err
+ }
+ }
+
toFilesStr, toFilesSet := req.Options[toFilesOptionName].(string)
preserveMode, _ := req.Options[preserveModeOptionName].(bool)
preserveMtime, _ := req.Options[preserveMtimeOptionName].(bool)
diff --git a/core/commands/cmdutils/sanitize.go b/core/commands/cmdutils/sanitize.go
new file mode 100644
index 00000000000..4cd3d3f5908
--- /dev/null
+++ b/core/commands/cmdutils/sanitize.go
@@ -0,0 +1,50 @@
+package cmdutils
+
+import (
+ "strings"
+ "unicode"
+)
+
+const maxRunes = 128
+
+// CleanAndTrim sanitizes untrusted strings from remote peers to prevent display issues
+// across web UIs, terminals, and logs. It replaces control characters, format characters,
+// and surrogates with U+FFFD (�), then enforces a maximum length of 128 runes.
+//
+// This follows the libp2p identify specification and RFC 9839 guidance:
+// replacing problematic code points is preferred over deletion as deletion
+// is a known security risk.
+func CleanAndTrim(str string) string {
+ // Build sanitized result
+ var result []rune
+ for _, r := range str {
+ // Replace control characters (Cc) with U+FFFD - prevents terminal escapes, CR, LF, etc.
+ if unicode.Is(unicode.Cc, r) {
+ result = append(result, '\uFFFD')
+ continue
+ }
+ // Replace format characters (Cf) with U+FFFD - prevents RTL/LTR overrides, zero-width chars
+ if unicode.Is(unicode.Cf, r) {
+ result = append(result, '\uFFFD')
+ continue
+ }
+ // Replace surrogate characters (Cs) with U+FFFD - invalid in UTF-8
+ if unicode.Is(unicode.Cs, r) {
+ result = append(result, '\uFFFD')
+ continue
+ }
+ // Private use characters (Co) are preserved per spec
+ result = append(result, r)
+ }
+
+ // Convert to string and trim whitespace
+ sanitized := strings.TrimSpace(string(result))
+
+ // Enforce maximum length (128 runes, not bytes)
+ runes := []rune(sanitized)
+ if len(runes) > maxRunes {
+ return string(runes[:maxRunes])
+ }
+
+ return sanitized
+}
diff --git a/core/commands/cmdutils/utils.go b/core/commands/cmdutils/utils.go
index be295f9e314..9ecfd1446d8 100644
--- a/core/commands/cmdutils/utils.go
+++ b/core/commands/cmdutils/utils.go
@@ -13,6 +13,7 @@ import (
const (
AllowBigBlockOptionName = "allow-big-block"
SoftBlockLimit = 1024 * 1024 // https://github.com/ipfs/kubo/issues/7421#issuecomment-910833499
+ MaxPinNameBytes = 255 // Maximum number of bytes allowed for a pin name
)
var AllowBigBlockOption cmds.Option
@@ -50,6 +51,21 @@ func CheckBlockSize(req *cmds.Request, size uint64) error {
return nil
}
+// ValidatePinName validates that a pin name does not exceed the maximum allowed byte length.
+// Returns an error if the name exceeds MaxPinNameBytes (255 bytes).
+func ValidatePinName(name string) error {
+ if name == "" {
+ // Empty names are allowed
+ return nil
+ }
+
+ nameBytes := len([]byte(name))
+ if nameBytes > MaxPinNameBytes {
+ return fmt.Errorf("pin name is %d bytes (max %d bytes)", nameBytes, MaxPinNameBytes)
+ }
+ return nil
+}
+
// PathOrCidPath returns a path.Path built from the argument. It keeps the old
// behaviour by building a path from a CID string.
func PathOrCidPath(str string) (path.Path, error) {
diff --git a/core/commands/files.go b/core/commands/files.go
index 12a96eba2a5..86331cbaafd 100644
--- a/core/commands/files.go
+++ b/core/commands/files.go
@@ -11,6 +11,8 @@ import (
"slices"
"strconv"
"strings"
+ "sync"
+ "sync/atomic"
"time"
humanize "github.com/dustin/go-humanize"
@@ -35,6 +37,43 @@ import (
var flog = logging.Logger("cmds/files")
+// Global counter for unflushed MFS operations
+var noFlushOperationCounter atomic.Int64
+
+// Cached limit value (read once on first use)
+var (
+ noFlushLimit int64
+ noFlushLimitInit sync.Once
+)
+
+// updateNoFlushCounter manages the counter for unflushed operations
+func updateNoFlushCounter(nd *core.IpfsNode, flush bool) error {
+ if flush {
+ // Reset counter when flushing
+ noFlushOperationCounter.Store(0)
+ return nil
+ }
+
+ // Cache the limit on first use (config doesn't change at runtime)
+ noFlushLimitInit.Do(func() {
+ noFlushLimit = int64(config.DefaultMFSNoFlushLimit)
+ if cfg, err := nd.Repo.Config(); err == nil && cfg.Internal.MFSNoFlushLimit != nil {
+ noFlushLimit = cfg.Internal.MFSNoFlushLimit.WithDefault(int64(config.DefaultMFSNoFlushLimit))
+ }
+ })
+
+ // Check if limit reached
+ if noFlushLimit > 0 && noFlushOperationCounter.Load() >= noFlushLimit {
+ return fmt.Errorf("reached limit of %d unflushed MFS operations. "+
+ "To resolve: 1) run 'ipfs files flush' to persist changes, "+
+ "2) use --flush=true (default), or "+
+ "3) increase Internal.MFSNoFlushLimit in config", noFlushLimit)
+ }
+
+ noFlushOperationCounter.Add(1)
+ return nil
+}
+
// FilesCmd is the 'ipfs files' command
var FilesCmd = &cmds.Command{
Helptext: cmds.HelpText{
@@ -64,13 +103,18 @@ defaults to true and ensures two things: 1) that the changes are reflected in
the full MFS structure (updated CIDs) 2) that the parent-folder's cache is
cleared. Use caution when setting this flag to false. It will improve
performance for large numbers of file operations, but it does so at the cost
-of consistency guarantees and unbound growth of the directories' in-memory
-caches. If the daemon is unexpectedly killed before running 'ipfs files
-flush' on the files in question, then data may be lost. This also applies to
-run 'ipfs repo gc' concurrently with '--flush=false' operations. We recommend
-flushing paths regularly with 'ipfs files flush', specially the folders on
-which many write operations are happening, as a way to clear the directory
-cache, free memory and speed up read operations.`,
+of consistency guarantees. If the daemon is unexpectedly killed before running
+'ipfs files flush' on the files in question, then data may be lost. This also
+applies to run 'ipfs repo gc' concurrently with '--flush=false' operations.
+
+When using '--flush=false', operations are limited to prevent unbounded
+memory growth. After reaching Internal.MFSNoFlushLimit operations, further
+operations will fail until you run 'ipfs files flush'. This explicit failure
+(instead of auto-flushing) ensures you maintain control over when data is
+persisted, preventing unexpected partial states and making batch operations
+predictable. We recommend flushing paths regularly, especially folders with
+many write operations, to clear caches, free memory, and maintain good
+performance.`,
},
Options: []cmds.Option{
cmds.BoolOption(filesFlushOptionName, "f", "Flush target and ancestors after write.").WithDefault(true),
@@ -513,12 +557,16 @@ being GC'ed.
}
}
+ flush, _ := req.Options[filesFlushOptionName].(bool)
+
+ if err := updateNoFlushCounter(nd, flush); err != nil {
+ return err
+ }
+
err = mfs.PutNode(nd.FilesRoot, dst, node)
if err != nil {
return fmt.Errorf("cp: cannot put node in path %s: %s", dst, err)
}
-
- flush, _ := req.Options[filesFlushOptionName].(bool)
if flush {
if _, err := mfs.FlushPath(req.Context, nd.FilesRoot, dst); err != nil {
return fmt.Errorf("cp: cannot flush the created file %s: %s", dst, err)
@@ -844,6 +892,10 @@ Example:
flush, _ := req.Options[filesFlushOptionName].(bool)
+ if err := updateNoFlushCounter(nd, flush); err != nil {
+ return err
+ }
+
src, err := checkPath(req.Arguments[0])
if err != nil {
return err
@@ -981,6 +1033,10 @@ See '--to-files' in 'ipfs add --help' for more information.
flush, _ := req.Options[filesFlushOptionName].(bool)
rawLeaves, rawLeavesDef := req.Options[filesRawLeavesOptionName].(bool)
+ if err := updateNoFlushCounter(nd, flush); err != nil {
+ return err
+ }
+
if !rawLeavesDef && cfg.Import.UnixFSRawLeaves != config.Default {
rawLeavesDef = true
rawLeaves = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
@@ -1109,6 +1165,10 @@ Examples:
flush, _ := req.Options[filesFlushOptionName].(bool)
+ if err := updateNoFlushCounter(n, flush); err != nil {
+ return err
+ }
+
prefix, err := getPrefix(req)
if err != nil {
return err
@@ -1161,6 +1221,9 @@ are run with the '--flush=false'.
return err
}
+ // Reset the counter (flush always resets)
+ noFlushOperationCounter.Store(0)
+
return cmds.EmitOnce(res, &flushRes{enc.Encode(n.Cid())})
},
Type: flushRes{},
@@ -1258,6 +1321,13 @@ Remove files or directories.
cmds.BoolOption(forceOptionName, "Forcibly remove target at path; implies -r for directories"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
+ // Check if user explicitly set --flush=false
+ if flushOpt, ok := req.Options[filesFlushOptionName]; ok {
+ if flush, ok := flushOpt.(bool); ok && !flush {
+ return fmt.Errorf("files rm always flushes for safety. The --flush flag cannot be set to false for this command")
+ }
+ }
+
nd, err := cmdenv.GetNode(env)
if err != nil {
return err
diff --git a/core/commands/id.go b/core/commands/id.go
index 295223258a3..58886699be3 100644
--- a/core/commands/id.go
+++ b/core/commands/id.go
@@ -12,6 +12,7 @@ import (
version "github.com/ipfs/kubo"
"github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/commands/cmdenv"
+ "github.com/ipfs/kubo/core/commands/cmdutils"
cmds "github.com/ipfs/go-ipfs-cmds"
ke "github.com/ipfs/kubo/core/commands/keyencode"
@@ -173,12 +174,14 @@ func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{
slices.Sort(info.Addresses)
protocols, _ := ps.GetProtocols(p) // don't care about errors here.
- info.Protocols = append(info.Protocols, protocols...)
+ for _, proto := range protocols {
+ info.Protocols = append(info.Protocols, protocol.ID(cmdutils.CleanAndTrim(string(proto))))
+ }
slices.Sort(info.Protocols)
if v, err := ps.Get(p, "AgentVersion"); err == nil {
if vs, ok := v.(string); ok {
- info.AgentVersion = vs
+ info.AgentVersion = cmdutils.CleanAndTrim(vs)
}
}
diff --git a/core/commands/mount_nofuse.go b/core/commands/mount_nofuse.go
index 103678e77ca..2844a4b7183 100644
--- a/core/commands/mount_nofuse.go
+++ b/core/commands/mount_nofuse.go
@@ -1,5 +1,4 @@
//go:build !windows && nofuse
-// +build !windows,nofuse
package commands
diff --git a/core/commands/mount_unix.go b/core/commands/mount_unix.go
index 6051f86aa6f..8ca85cdaa2e 100644
--- a/core/commands/mount_unix.go
+++ b/core/commands/mount_unix.go
@@ -1,5 +1,4 @@
//go:build !windows && !nofuse
-// +build !windows,!nofuse
package commands
diff --git a/core/commands/pin/pin.go b/core/commands/pin/pin.go
index 428a75b695d..0934489d2cb 100644
--- a/core/commands/pin/pin.go
+++ b/core/commands/pin/pin.go
@@ -11,6 +11,7 @@ import (
bserv "github.com/ipfs/boxo/blockservice"
offline "github.com/ipfs/boxo/exchange/offline"
dag "github.com/ipfs/boxo/ipld/merkledag"
+ pin "github.com/ipfs/boxo/pinning/pinner"
verifcid "github.com/ipfs/boxo/verifcid"
cid "github.com/ipfs/go-cid"
cidenc "github.com/ipfs/go-cidutil/cidenc"
@@ -99,6 +100,11 @@ It may take some time. Pass '--progress' to track the progress.
name, _ := req.Options[pinNameOptionName].(string)
showProgress, _ := req.Options[pinProgressOptionName].(bool)
+ // Validate pin name
+ if err := cmdutils.ValidatePinName(name); err != nil {
+ return err
+ }
+
if err := req.ParseBodyArgs(); err != nil {
return err
}
@@ -370,18 +376,30 @@ Example:
return err
}
+ n, err := cmdenv.GetNode(env)
+ if err != nil {
+ return err
+ }
+
+ if n.Pinning == nil {
+ return fmt.Errorf("pinning service not available")
+ }
+
typeStr, _ := req.Options[pinTypeOptionName].(string)
stream, _ := req.Options[pinStreamOptionName].(bool)
displayNames, _ := req.Options[pinNamesOptionName].(bool)
name, _ := req.Options[pinNameOptionName].(string)
- switch typeStr {
- case "all", "direct", "indirect", "recursive":
- default:
- err = fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
+ // Validate name filter
+ if err := cmdutils.ValidatePinName(name); err != nil {
return err
}
+ mode, ok := pin.StringToMode(typeStr)
+ if !ok {
+ return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
+ }
+
// For backward compatibility, we accumulate the pins in the same output type as before.
var emit func(PinLsOutputWrapper) error
lgcList := map[string]PinLsType{}
@@ -397,7 +415,7 @@ Example:
}
if len(req.Arguments) > 0 {
- err = pinLsKeys(req, typeStr, api, emit)
+ err = pinLsKeys(req, mode, displayNames || name != "", n.Pinning, api, emit)
} else {
err = pinLsAll(req, typeStr, displayNames || name != "", name, api, emit)
}
@@ -482,23 +500,14 @@ type PinLsObject struct {
Type string `json:",omitempty"`
}
-func pinLsKeys(req *cmds.Request, typeStr string, api coreiface.CoreAPI, emit func(value PinLsOutputWrapper) error) error {
+func pinLsKeys(req *cmds.Request, mode pin.Mode, displayNames bool, pinner pin.Pinner, api coreiface.CoreAPI, emit func(value PinLsOutputWrapper) error) error {
enc, err := cmdenv.GetCidEncoder(req)
if err != nil {
return err
}
- switch typeStr {
- case "all", "direct", "indirect", "recursive":
- default:
- return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
- }
-
- opt, err := options.Pin.IsPinned.Type(typeStr)
- if err != nil {
- panic("unhandled pin type")
- }
-
+ // Collect CIDs to check
+ cids := make([]cid.Cid, 0, len(req.Arguments))
for _, p := range req.Arguments {
p, err := cmdutils.PathOrCidPath(p)
if err != nil {
@@ -510,25 +519,31 @@ func pinLsKeys(req *cmds.Request, typeStr string, api coreiface.CoreAPI, emit fu
return err
}
- pinType, pinned, err := api.Pin().IsPinned(req.Context, rp, opt)
- if err != nil {
- return err
- }
+ cids = append(cids, rp.RootCid())
+ }
- if !pinned {
- return fmt.Errorf("path '%s' is not pinned", p)
+ // Check pins using the new type-specific method
+ pinned, err := pinner.CheckIfPinnedWithType(req.Context, mode, displayNames, cids...)
+ if err != nil {
+ return err
+ }
+
+ // Process results
+ for i, p := range pinned {
+ if !p.Pinned() {
+ return fmt.Errorf("path '%s' is not pinned", req.Arguments[i])
}
- switch pinType {
- case "direct", "indirect", "recursive", "internal":
- default:
- pinType = "indirect through " + pinType
+ pinType, _ := pin.ModeToString(p.Mode)
+ if p.Mode == pin.Indirect && p.Via.Defined() {
+ pinType = "indirect through " + enc.Encode(p.Via)
}
err = emit(PinLsOutputWrapper{
PinLsObject: PinLsObject{
Type: pinType,
- Cid: enc.Encode(rp.RootCid()),
+ Cid: enc.Encode(cids[i]),
+ Name: p.Name,
},
})
if err != nil {
@@ -545,11 +560,9 @@ func pinLsAll(req *cmds.Request, typeStr string, detailed bool, name string, api
return err
}
- switch typeStr {
- case "all", "direct", "indirect", "recursive":
- default:
- err = fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
- return err
+ _, ok := pin.StringToMode(typeStr)
+ if !ok {
+ return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
}
opt, err := options.Pin.Ls.Type(typeStr)
diff --git a/core/commands/pin/remotepin.go b/core/commands/pin/remotepin.go
index 068d15d0bd8..3936ce635df 100644
--- a/core/commands/pin/remotepin.go
+++ b/core/commands/pin/remotepin.go
@@ -171,6 +171,10 @@ NOTE: a comma-separated notation is supported in CLI for convenience:
opts := []pinclient.AddOption{}
if name, nameFound := req.Options[pinNameOptionName]; nameFound {
nameStr := name.(string)
+ // Validate pin name
+ if err := cmdutils.ValidatePinName(nameStr); err != nil {
+ return err
+ }
opts = append(opts, pinclient.PinOpts.WithName(nameStr))
}
@@ -321,6 +325,11 @@ func lsRemote(ctx context.Context, req *cmds.Request, c *pinclient.Client, out c
opts := []pinclient.LsOption{}
if name, nameFound := req.Options[pinNameOptionName]; nameFound {
nameStr := name.(string)
+ // Validate name filter
+ if err := cmdutils.ValidatePinName(nameStr); err != nil {
+ close(out)
+ return err
+ }
opts = append(opts, pinclient.PinOpts.FilterName(nameStr))
}
diff --git a/core/commands/provide.go b/core/commands/provide.go
index ba52ca50b81..3cc8b4f3ce7 100644
--- a/core/commands/provide.go
+++ b/core/commands/provide.go
@@ -1,6 +1,7 @@
package commands
import (
+ "errors"
"fmt"
"io"
"text/tabwriter"
@@ -44,12 +45,12 @@ var provideClearCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Clear all CIDs from the provide queue.",
ShortDescription: `
-Clear all CIDs from the reprovide queue.
+Clear all CIDs pending to be provided for the first time.
Note: Kubo will automatically clear the queue when it detects a change of
-Reprovider.Strategy upon a restart. For more information about reprovider
+Provide.Strategy upon a restart. For more information about provide
strategies, see:
-https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy
+https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
`,
},
Options: []cmds.Option{
@@ -99,8 +100,8 @@ var provideStatCmd = &cmds.Command{
Tagline: "Returns statistics about the node's provider system.",
ShortDescription: `
Returns statistics about the content the node is reproviding every
-Reprovider.Interval according to Reprovider.Strategy:
-https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider
+Provide.DHT.Interval according to Provide.Strategy:
+https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
This interface is not stable and may change from release to release.
@@ -118,7 +119,12 @@ This interface is not stable and may change from release to release.
return ErrNotOnline
}
- stats, err := nd.Provider.Stat()
+ provideSys, ok := nd.Provider.(provider.System)
+ if !ok {
+ return errors.New("stats not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
+ }
+
+ stats, err := provideSys.Stat()
if err != nil {
return err
}
diff --git a/core/commands/routing.go b/core/commands/routing.go
index 0804b1f44c3..c772e204593 100644
--- a/core/commands/routing.go
+++ b/core/commands/routing.go
@@ -11,6 +11,8 @@ import (
"github.com/ipfs/kubo/config"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
+ "github.com/ipfs/kubo/core/node"
+ mh "github.com/multiformats/go-multihash"
dag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipns"
@@ -164,14 +166,19 @@ var provideRefRoutingCmd = &cmds.Command{
if err != nil {
return err
}
- if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
- return errors.New("invalid configuration: Provider.Enabled is set to 'false'")
+ if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
+ return errors.New("invalid configuration: Provide.Enabled is set to 'false'")
}
- if len(nd.PeerHost.Network().Conns()) == 0 {
+ if len(nd.PeerHost.Network().Conns()) == 0 && !cfg.HasHTTPProviderConfigured() {
+ // Node is depending on DHT for providing (no custom HTTP provider
+ // configured) and currently has no connected peers.
return errors.New("cannot provide, no connected peers")
}
+ // If we reach here with no connections but HTTP provider configured,
+ // we proceed with the provide operation via HTTP
+
// Needed to parse stdin args.
// TODO: Lazy Load
err = req.ParseBodyArgs()
@@ -207,9 +214,9 @@ var provideRefRoutingCmd = &cmds.Command{
go func() {
defer cancel()
if rec {
- provideErr = provideKeysRec(ctx, nd.Routing, nd.DAG, cids)
+ provideErr = provideCidsRec(ctx, nd.Provider, nd.DAG, cids)
} else {
- provideErr = provideKeys(ctx, nd.Routing, cids)
+ provideErr = provideCids(nd.Provider, cids)
}
if provideErr != nil {
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
@@ -268,14 +275,18 @@ Trigger reprovider to announce our data to network.
if err != nil {
return err
}
- if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
- return errors.New("invalid configuration: Provider.Enabled is set to 'false'")
+ if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
+ return errors.New("invalid configuration: Provide.Enabled is set to 'false'")
+ }
+ if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
+ return errors.New("invalid configuration: Provide.DHT.Interval is set to '0'")
}
- if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
- return errors.New("invalid configuration: Reprovider.Interval is set to '0'")
+ provideSys, ok := nd.Provider.(*node.LegacyProvider)
+ if !ok {
+ return errors.New("manual reprovide not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
}
- err = nd.Provider.Reprovide(req.Context)
+ err = provideSys.Reprovide(req.Context)
if err != nil {
return err
}
@@ -284,39 +295,25 @@ Trigger reprovider to announce our data to network.
},
}
-func provideKeys(ctx context.Context, r routing.Routing, cids []cid.Cid) error {
- for _, c := range cids {
- err := r.Provide(ctx, c, true)
- if err != nil {
- return err
- }
+func provideCids(prov node.DHTProvider, cids []cid.Cid) error {
+ mhs := make([]mh.Multihash, len(cids))
+ for i, c := range cids {
+ mhs[i] = c.Hash()
}
- return nil
+ return prov.StartProviding(true, mhs...)
}
-func provideKeysRec(ctx context.Context, r routing.Routing, dserv ipld.DAGService, cids []cid.Cid) error {
- provided := cid.NewSet()
+func provideCidsRec(ctx context.Context, prov node.DHTProvider, dserv ipld.DAGService, cids []cid.Cid) error {
for _, c := range cids {
kset := cid.NewSet()
-
err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, kset.Visit)
if err != nil {
return err
}
-
- for _, k := range kset.Keys() {
- if provided.Has(k) {
- continue
- }
-
- err = r.Provide(ctx, k, true)
- if err != nil {
- return err
- }
- provided.Add(k)
+ if err = provideCids(prov, kset.Keys()); err != nil {
+ return err
}
}
-
return nil
}
diff --git a/core/commands/stat_dht.go b/core/commands/stat_dht.go
index e6006e439d3..b4345f570b5 100644
--- a/core/commands/stat_dht.go
+++ b/core/commands/stat_dht.go
@@ -7,6 +7,7 @@ import (
"time"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
+ "github.com/ipfs/kubo/core/commands/cmdutils"
cmds "github.com/ipfs/go-ipfs-cmds"
dht "github.com/libp2p/go-libp2p-kad-dht"
@@ -92,7 +93,9 @@ This interface is not stable and may change from release to release.
info := dhtPeerInfo{ID: p.String()}
if ver, err := nd.Peerstore.Get(p, "AgentVersion"); err == nil {
- info.AgentVersion, _ = ver.(string)
+ if vs, ok := ver.(string); ok {
+ info.AgentVersion = cmdutils.CleanAndTrim(vs)
+ }
} else if err == pstore.ErrNotFound {
// ignore
} else {
@@ -143,7 +146,9 @@ This interface is not stable and may change from release to release.
info := dhtPeerInfo{ID: pi.Id.String()}
if ver, err := nd.Peerstore.Get(pi.Id, "AgentVersion"); err == nil {
- info.AgentVersion, _ = ver.(string)
+ if vs, ok := ver.(string); ok {
+ info.AgentVersion = cmdutils.CleanAndTrim(vs)
+ }
} else if err == pstore.ErrNotFound {
// ignore
} else {
diff --git a/core/commands/swarm.go b/core/commands/swarm.go
index 153068438b3..533ccc07814 100644
--- a/core/commands/swarm.go
+++ b/core/commands/swarm.go
@@ -18,6 +18,7 @@ import (
"github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/commands/cmdenv"
+ "github.com/ipfs/kubo/core/commands/cmdutils"
"github.com/ipfs/kubo/core/node/libp2p"
"github.com/ipfs/kubo/repo"
"github.com/ipfs/kubo/repo/fsrepo"
@@ -27,6 +28,7 @@ import (
inet "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
@@ -290,7 +292,7 @@ var swarmPeersCmd = &cmds.Command{
}
for _, s := range strs {
- ci.Streams = append(ci.Streams, streamInfo{Protocol: string(s)})
+ ci.Streams = append(ci.Streams, streamInfo{Protocol: cmdutils.CleanAndTrim(string(s))})
}
}
@@ -476,13 +478,15 @@ func (ci *connInfo) identifyPeer(ps pstore.Peerstore, p peer.ID) (IdOutput, erro
slices.Sort(info.Addresses)
if protocols, err := ps.GetProtocols(p); err == nil {
- info.Protocols = append(info.Protocols, protocols...)
+ for _, proto := range protocols {
+ info.Protocols = append(info.Protocols, protocol.ID(cmdutils.CleanAndTrim(string(proto))))
+ }
slices.Sort(info.Protocols)
}
if v, err := ps.Get(p, "AgentVersion"); err == nil {
if vs, ok := v.(string); ok {
- info.AgentVersion = vs
+ info.AgentVersion = cmdutils.CleanAndTrim(vs)
}
}
diff --git a/core/commands/sysdiag.go b/core/commands/sysdiag.go
index 123dcb973d8..5a7c41ce985 100644
--- a/core/commands/sysdiag.go
+++ b/core/commands/sysdiag.go
@@ -2,14 +2,13 @@ package commands
import (
"os"
- "path"
"runtime"
+ "github.com/ipfs/go-ipfs-cmds"
version "github.com/ipfs/kubo"
+ "github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
-
- cmds "github.com/ipfs/go-ipfs-cmds"
manet "github.com/multiformats/go-multiaddr/net"
sysi "github.com/whyrusleeping/go-sysinfo"
)
@@ -84,32 +83,28 @@ func runtimeInfo(out map[string]interface{}) error {
func envVarInfo(out map[string]interface{}) error {
ev := make(map[string]interface{})
ev["GOPATH"] = os.Getenv("GOPATH")
- ev["IPFS_PATH"] = os.Getenv("IPFS_PATH")
+ ev[config.EnvDir] = os.Getenv(config.EnvDir)
out["environment"] = ev
return nil
}
-func ipfsPath() string {
- p := os.Getenv("IPFS_PATH")
- if p == "" {
- p = path.Join(os.Getenv("HOME"), ".ipfs")
- }
- return p
-}
-
func diskSpaceInfo(out map[string]interface{}) error {
- di := make(map[string]interface{})
- dinfo, err := sysi.DiskUsage(ipfsPath())
+ pathRoot, err := config.PathRoot()
+ if err != nil {
+ return err
+ }
+ dinfo, err := sysi.DiskUsage(pathRoot)
if err != nil {
return err
}
- di["fstype"] = dinfo.FsType
- di["total_space"] = dinfo.Total
- di["free_space"] = dinfo.Free
+ out["diskinfo"] = map[string]interface{}{
+ "fstype": dinfo.FsType,
+ "total_space": dinfo.Total,
+ "free_space": dinfo.Free,
+ }
- out["diskinfo"] = di
return nil
}
diff --git a/core/core.go b/core/core.go
index f8a6a258f48..8a674d8f680 100644
--- a/core/core.go
+++ b/core/core.go
@@ -92,31 +92,31 @@ type IpfsNode struct {
RecordValidator record.Validator
// Online
- PeerHost p2phost.Host `optional:"true"` // the network host (server+client)
- Peering *peering.PeeringService `optional:"true"`
- Filters *ma.Filters `optional:"true"`
- Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper
- Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht
- ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system
- DNSResolver *madns.Resolver // the DNS resolver
- IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver
- UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver
- OfflineIPLDPathResolver pathresolver.Resolver `name:"offlineIpldPathResolver"` // The IPLD path resolver that uses only locally available blocks
- OfflineUnixFSPathResolver pathresolver.Resolver `name:"offlineUnixFSPathResolver"` // The UnixFS path resolver that uses only locally available blocks
- Exchange exchange.Interface // the block exchange + strategy
- Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
- Namesys namesys.NameSystem // the name system, resolves paths to hashes
- Provider provider.System // the value provider system
- ProvidingStrategy config.ReproviderStrategy `optional:"true"`
- ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
- IpnsRepub *ipnsrp.Republisher `optional:"true"`
- ResourceManager network.ResourceManager `optional:"true"`
+ PeerHost p2phost.Host `optional:"true"` // the network host (server+client)
+ Peering *peering.PeeringService `optional:"true"`
+ Filters *ma.Filters `optional:"true"`
+ Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper
+ ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system
+ DNSResolver *madns.Resolver // the DNS resolver
+ IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver
+ UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver
+ OfflineIPLDPathResolver pathresolver.Resolver `name:"offlineIpldPathResolver"` // The IPLD path resolver that uses only locally available blocks
+ OfflineUnixFSPathResolver pathresolver.Resolver `name:"offlineUnixFSPathResolver"` // The UnixFS path resolver that uses only locally available blocks
+ Exchange exchange.Interface // the block exchange + strategy
+ Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
+ Namesys namesys.NameSystem // the name system, resolves paths to hashes
+ ProvidingStrategy config.ProvideStrategy `optional:"true"`
+ ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
+ IpnsRepub *ipnsrp.Republisher `optional:"true"`
+ ResourceManager network.ResourceManager `optional:"true"`
PubSub *pubsub.PubSub `optional:"true"`
PSRouter *psrouter.PubsubValueStore `optional:"true"`
- DHT *ddht.DHT `optional:"true"`
- DHTClient routing.Routing `name:"dhtc" optional:"true"`
+ Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht
+ Provider node.DHTProvider // the value provider system
+ DHT *ddht.DHT `optional:"true"`
+ DHTClient routing.Routing `name:"dhtc" optional:"true"`
P2P *p2p.P2P `optional:"true"`
diff --git a/core/coreapi/coreapi.go b/core/coreapi/coreapi.go
index 66763e8848b..eca9fd989de 100644
--- a/core/coreapi/coreapi.go
+++ b/core/coreapi/coreapi.go
@@ -23,10 +23,8 @@ import (
dag "github.com/ipfs/boxo/ipld/merkledag"
pathresolver "github.com/ipfs/boxo/path/resolver"
pin "github.com/ipfs/boxo/pinning/pinner"
- provider "github.com/ipfs/boxo/provider"
offlineroute "github.com/ipfs/boxo/routing/offline"
ipld "github.com/ipfs/go-ipld-format"
- logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/ipfs/kubo/core/coreiface/options"
@@ -45,8 +43,6 @@ import (
"github.com/ipfs/kubo/repo"
)
-var log = logging.Logger("coreapi")
-
type CoreAPI struct {
nctx context.Context
@@ -73,8 +69,8 @@ type CoreAPI struct {
ipldPathResolver pathresolver.Resolver
unixFSPathResolver pathresolver.Resolver
- provider provider.System
- providingStrategy config.ReproviderStrategy
+ provider node.DHTProvider
+ providingStrategy config.ProvideStrategy
pubSub *pubsub.PubSub
diff --git a/core/coreapi/routing.go b/core/coreapi/routing.go
index 6d432d744dd..b9c25805622 100644
--- a/core/coreapi/routing.go
+++ b/core/coreapi/routing.go
@@ -15,9 +15,10 @@ import (
cidutil "github.com/ipfs/go-cidutil"
coreiface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
+ "github.com/ipfs/kubo/core/node"
"github.com/ipfs/kubo/tracing"
peer "github.com/libp2p/go-libp2p/core/peer"
- routing "github.com/libp2p/go-libp2p/core/routing"
+ mh "github.com/multiformats/go-multihash"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -148,9 +149,9 @@ func (api *RoutingAPI) Provide(ctx context.Context, path path.Path, opts ...caop
}
if settings.Recursive {
- err = provideKeysRec(ctx, api.routing, api.blockstore, []cid.Cid{c})
+ err = provideKeysRec(ctx, api.provider, api.blockstore, []cid.Cid{c})
} else {
- err = provideKeys(ctx, api.routing, []cid.Cid{c})
+ err = api.provider.StartProviding(false, c.Hash())
}
if err != nil {
return err
@@ -159,41 +160,64 @@ func (api *RoutingAPI) Provide(ctx context.Context, path path.Path, opts ...caop
return nil
}
-func provideKeys(ctx context.Context, r routing.Routing, cids []cid.Cid) error {
- for _, c := range cids {
- err := r.Provide(ctx, c, true)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func provideKeysRec(ctx context.Context, r routing.Routing, bs blockstore.Blockstore, cids []cid.Cid) error {
+func provideKeysRec(ctx context.Context, prov node.DHTProvider, bs blockstore.Blockstore, cids []cid.Cid) error {
provided := cidutil.NewStreamingSet()
- errCh := make(chan error)
+ // Error channel with buffer size 1 to avoid blocking the goroutine
+ errCh := make(chan error, 1)
go func() {
+ // Always close provided.New to signal completion
+ defer close(provided.New)
+ // Also close error channel to distinguish between "no error" and "pending error"
+ defer close(errCh)
+
dserv := dag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
for _, c := range cids {
- err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, provided.Visitor(ctx))
- if err != nil {
- errCh <- err
+ if err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, provided.Visitor(ctx)); err != nil {
+ // Send error to channel. If context is cancelled while trying to send,
+ // exit immediately as the main loop will return ctx.Err()
+ select {
+ case errCh <- err:
+ // Error sent successfully, exit goroutine
+ case <-ctx.Done():
+ // Context cancelled, exit without sending error
+ return
+ }
+ return
}
}
+ // All CIDs walked successfully, goroutine will exit and channels will close
}()
+ keys := make([]mh.Multihash, 0)
for {
select {
- case k := <-provided.New:
- err := r.Provide(ctx, k, true)
- if err != nil {
- return err
- }
- case err := <-errCh:
- return err
case <-ctx.Done():
+ // Context cancelled, return immediately
return ctx.Err()
+ case err := <-errCh:
+ // Received error from DAG walk, return it
+ return err
+ case c, ok := <-provided.New:
+ if !ok {
+ // Channel closed means goroutine finished.
+ // CRITICAL: Check for any error that was sent just before channel closure.
+ // This handles the race where error is sent to errCh but main loop
+ // sees provided.New close first.
+ select {
+ case err := <-errCh:
+ if err != nil {
+ return err
+ }
+ // errCh closed with nil, meaning success
+ default:
+ // No pending error in errCh
+ }
+ // All CIDs successfully processed, start providing
+ return prov.StartProviding(true, keys...)
+ }
+ // Accumulate the CID for providing
+ keys = append(keys, c.Hash())
}
}
}
diff --git a/core/coreapi/test/api_test.go b/core/coreapi/test/api_test.go
index 7867e1f1cba..bf80686f13f 100644
--- a/core/coreapi/test/api_test.go
+++ b/core/coreapi/test/api_test.go
@@ -72,7 +72,7 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity
c.AutoTLS.Enabled = config.False // disable so no /ws listener is added
// For provider tests, avoid that content gets
// auto-provided without calling "provide" (unless pinned).
- c.Reprovider.Strategy = config.NewOptionalString("roots")
+ c.Provide.Strategy = config.NewOptionalString("roots")
ds := syncds.MutexWrap(datastore.NewMapDatastore())
r := &repo.Mock{
diff --git a/core/coreapi/unixfs.go b/core/coreapi/unixfs.go
index de03b60993d..7f068a227c6 100644
--- a/core/coreapi/unixfs.go
+++ b/core/coreapi/unixfs.go
@@ -16,21 +16,25 @@ import (
uio "github.com/ipfs/boxo/ipld/unixfs/io"
"github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
- provider "github.com/ipfs/boxo/provider"
+ "github.com/ipfs/boxo/provider"
cid "github.com/ipfs/go-cid"
cidutil "github.com/ipfs/go-cidutil"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
ipld "github.com/ipfs/go-ipld-format"
+ logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
coreiface "github.com/ipfs/kubo/core/coreiface"
options "github.com/ipfs/kubo/core/coreiface/options"
"github.com/ipfs/kubo/core/coreunix"
"github.com/ipfs/kubo/tracing"
+ mh "github.com/multiformats/go-multihash"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
+var log = logging.Logger("coreapi")
+
type UnixfsAPI CoreAPI
// Add builds a merkledag node from a reader, adds it to the blockstore,
@@ -116,7 +120,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
// nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only
// handles roots). This wrapping ensures all blocks of pinned content get provided.
if settings.Pin && !settings.OnlyHash &&
- (api.providingStrategy&config.ReproviderStrategyPinned) != 0 {
+ (api.providingStrategy&config.ProvideStrategyPinned) != 0 {
dserv = &providingDagService{dserv, api.provider}
}
@@ -386,7 +390,7 @@ func (s *syncDagService) Sync() error {
type providingDagService struct {
ipld.DAGService
- provider provider.System
+ provider.MultihashProvider
}
func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error {
@@ -397,8 +401,8 @@ func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error {
// We don't want DAG operations to fail due to providing issues.
// The user's data is still stored successfully even if the
// announcement to the routing system fails temporarily.
- if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil {
- log.Error(err)
+ if err := pds.StartProviding(false, n.Cid().Hash()); err != nil {
+ log.Errorf("failed to provide new block: %s", err)
}
return nil
}
@@ -407,14 +411,13 @@ func (pds *providingDagService) AddMany(ctx context.Context, nds []ipld.Node) er
if err := pds.DAGService.AddMany(ctx, nds); err != nil {
return err
}
+ keys := make([]mh.Multihash, len(nds))
+ for i, n := range nds {
+ keys[i] = n.Cid().Hash()
+ }
// Same error handling philosophy as Add(): log but don't fail.
- // Note: Provide calls are intentionally blocking here - the Provider
- // implementation should handle concurrency/queuing internally.
- for _, n := range nds {
- if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil {
- log.Error(err)
- break
- }
+ if err := pds.StartProviding(false, keys...); err != nil {
+ log.Errorf("failed to provide new blocks: %s", err)
}
return nil
}
diff --git a/core/corehttp/gateway.go b/core/corehttp/gateway.go
index 340882a7e56..fb9ec1fd542 100644
--- a/core/corehttp/gateway.go
+++ b/core/corehttp/gateway.go
@@ -107,11 +107,13 @@ func Libp2pGatewayOption() ServeOption {
// Keep these constraints for security
DeserializedResponses: false, // Trustless-only
NoDNSLink: true, // No DNS resolution
+ DisableHTMLErrors: true, // Plain text errors only
PublicGateways: nil,
Menu: nil,
// Apply timeout and concurrency limits from user config
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
+ DiagnosticServiceURL: "", // Not used since DisableHTMLErrors=true
}
handler := gateway.NewHandler(gwConfig, &offlineGatewayErrWrapper{gwimpl: backend})
@@ -270,6 +272,7 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er
PublicGateways: map[string]*gateway.PublicGateway{},
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
+ DiagnosticServiceURL: cfg.Gateway.DiagnosticServiceURL.WithDefault(config.DefaultDiagnosticServiceURL),
}
// Add default implicit known gateways, such as subdomain gateway on localhost.
diff --git a/core/corehttp/webui.go b/core/corehttp/webui.go
index 9c3244ad210..18c455b1657 100644
--- a/core/corehttp/webui.go
+++ b/core/corehttp/webui.go
@@ -1,19 +1,31 @@
package corehttp
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+
+ "github.com/ipfs/go-cid"
+ "github.com/ipfs/kubo/config"
+ core "github.com/ipfs/kubo/core"
+)
+
// WebUI version confirmed to work with this Kubo version
-const WebUIPath = "/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu" // v4.8.0
+const WebUIPath = "/ipfs/bafybeicg7e6o2eszkfdzxg5233gmuip2a7kfzoloh7voyvt2r6ivdet54u" // v4.9.1
// WebUIPaths is a list of all past webUI paths.
var WebUIPaths = []string{
WebUIPath,
+ "/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu", // v4.8.0
"/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y", // v4.7.0
"/ipfs/bafybeibpaa5kqrj4gkemiswbwndjqiryl65cks64ypwtyerxixu56gnvvm", // v4.6.0
"/ipfs/bafybeiata4qg7xjtwgor6r5dw63jjxyouenyromrrb4lrewxrlvav7gzgi", // v4.5.0
"/ipfs/bafybeigp3zm7cqoiciqk5anlheenqjsgovp7j7zq6hah4nu6iugdgb4nby", // v4.4.2
"/ipfs/bafybeiatztgdllxnp5p6zu7bdwhjmozsmd7jprff4bdjqjljxtylitvss4", // v4.4.1
- "/ipfs/bafybeibgic2ex3fvzkinhy6k6aqyv3zy2o7bkbsmrzvzka24xetv7eeadm",
- "/ipfs/bafybeid4uxz7klxcu3ffsnmn64r7ihvysamlj4ohl5h2orjsffuegcpaeq",
- "/ipfs/bafybeif6abowqcavbkz243biyh7pde7ick5kkwwytrh7pd2hkbtuqysjxy",
+ "/ipfs/bafybeibgic2ex3fvzkinhy6k6aqyv3zy2o7bkbsmrzvzka24xetv7eeadm", // v4.4.0
+ "/ipfs/bafybeid4uxz7klxcu3ffsnmn64r7ihvysamlj4ohl5h2orjsffuegcpaeq", // v4.3.3
+ "/ipfs/bafybeif6abowqcavbkz243biyh7pde7ick5kkwwytrh7pd2hkbtuqysjxy", // v4.3.2
"/ipfs/bafybeihatzsgposbr3hrngo42yckdyqcc56yean2rynnwpzxstvdlphxf4",
"/ipfs/bafybeigggyffcf6yfhx5irtwzx3cgnk6n3dwylkvcpckzhqqrigsxowjwe",
"/ipfs/bafybeidf7cpkwsjkq6xs3r6fbbxghbugilx3jtezbza7gua3k5wjixpmba",
@@ -22,18 +34,18 @@ var WebUIPaths = []string{
"/ipfs/bafybeicyp7ssbnj3hdzehcibmapmpuc3atrsc4ch3q6acldfh4ojjdbcxe",
"/ipfs/bafybeigs6d53gpgu34553mbi5bbkb26e4ikruoaaar75jpfdywpup2r3my",
"/ipfs/bafybeic4gops3d3lyrisqku37uio33nvt6fqxvkxihrwlqsuvf76yln4fm",
- "/ipfs/bafybeifeqt7mvxaniphyu2i3qhovjaf3sayooxbh5enfdqtiehxjv2ldte",
+ "/ipfs/bafybeifeqt7mvxaniphyu2i3qhovjaf3sayooxbh5enfdqtiehxjv2ldte", // v2.22.0
"/ipfs/bafybeiequgo72mrvuml56j4gk7crewig5bavumrrzhkqbim6b3s2yqi7ty",
- "/ipfs/bafybeibjbq3tmmy7wuihhhwvbladjsd3gx3kfjepxzkq6wylik6wc3whzy",
- "/ipfs/bafybeiavrvt53fks6u32n5p2morgblcmck4bh4ymf4rrwu7ah5zsykmqqa",
- "/ipfs/bafybeiageaoxg6d7npaof6eyzqbwvbubyler7bq44hayik2hvqcggg7d2y",
- "/ipfs/bafybeidb5eryh72zajiokdggzo7yct2d6hhcflncji5im2y5w26uuygdsm",
- "/ipfs/bafybeibozpulxtpv5nhfa2ue3dcjx23ndh3gwr5vwllk7ptoyfwnfjjr4q",
- "/ipfs/bafybeiednzu62vskme5wpoj4bjjikeg3xovfpp4t7vxk5ty2jxdi4mv4bu",
- "/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva",
+ "/ipfs/bafybeibjbq3tmmy7wuihhhwvbladjsd3gx3kfjepxzkq6wylik6wc3whzy", // v2.20.0
+ "/ipfs/bafybeiavrvt53fks6u32n5p2morgblcmck4bh4ymf4rrwu7ah5zsykmqqa", // v2.19.0
+ "/ipfs/bafybeiageaoxg6d7npaof6eyzqbwvbubyler7bq44hayik2hvqcggg7d2y", // v2.18.1
+ "/ipfs/bafybeidb5eryh72zajiokdggzo7yct2d6hhcflncji5im2y5w26uuygdsm", // v2.18.0
+ "/ipfs/bafybeibozpulxtpv5nhfa2ue3dcjx23ndh3gwr5vwllk7ptoyfwnfjjr4q", // v2.15.1
+ "/ipfs/bafybeiednzu62vskme5wpoj4bjjikeg3xovfpp4t7vxk5ty2jxdi4mv4bu", // v2.15.0
+ "/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva", // v2.13.0
"/ipfs/bafybeiflkjt66aetfgcrgvv75izymd5kc47g6luepqmfq6zsf5w6ueth6y",
"/ipfs/bafybeid26vjplsejg7t3nrh7mxmiaaxriebbm4xxrxxdunlk7o337m5sqq",
- "/ipfs/bafybeif4zkmu7qdhkpf3pnhwxipylqleof7rl6ojbe7mq3fzogz6m4xk3i",
+ "/ipfs/bafybeif4zkmu7qdhkpf3pnhwxipylqleof7rl6ojbe7mq3fzogz6m4xk3i", // v2.11.4
"/ipfs/bafybeianwe4vy7sprht5sm3hshvxjeqhwcmvbzq73u55sdhqngmohkjgs4",
"/ipfs/bafybeicitin4p7ggmyjaubqpi3xwnagrwarsy6hiihraafk5rcrxqxju6m",
"/ipfs/bafybeihpetclqvwb4qnmumvcn7nh4pxrtugrlpw4jgjpqicdxsv7opdm6e",
@@ -72,4 +84,85 @@ var WebUIPaths = []string{
"/ipfs/Qmexhq2sBHnXQbvyP2GfUdbnY7HCagH2Mw5vUNSBn2nxip",
}
-var WebUIOption = RedirectOption("webui", WebUIPath)
+// WebUIOption provides the WebUI handler for the RPC API.
+func WebUIOption(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
+ cfg, err := n.Repo.Config()
+ if err != nil {
+ return nil, err
+ }
+
+ handler := &webUIHandler{
+ headers: cfg.API.HTTPHeaders,
+ node: n,
+ noFetch: cfg.Gateway.NoFetch,
+ deserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses),
+ }
+
+ mux.Handle("/webui/", handler)
+ return mux, nil
+}
+
+type webUIHandler struct {
+ headers map[string][]string
+ node *core.IpfsNode
+ noFetch bool
+ deserializedResponses bool
+}
+
+func (h *webUIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ for k, v := range h.headers {
+ w.Header()[http.CanonicalHeaderKey(k)] = v
+ }
+
+ // Check if WebUI is incompatible with current configuration
+ if !h.deserializedResponses {
+ h.writeIncompatibleError(w)
+ return
+ }
+
+ // Check if WebUI is available locally when Gateway.NoFetch is true
+ if h.noFetch {
+ cidStr := strings.TrimPrefix(WebUIPath, "/ipfs/")
+ webUICID, err := cid.Parse(cidStr)
+ if err != nil {
+ // This should never happen with hardcoded constant
+ log.Errorf("failed to parse WebUI CID: %v", err)
+ } else {
+ has, err := h.node.Blockstore.Has(r.Context(), webUICID)
+ if err != nil {
+ log.Debugf("error checking WebUI availability: %v", err)
+ } else if !has {
+ h.writeNotAvailableError(w)
+ return
+ }
+ }
+ }
+
+ // Default behavior: redirect to the WebUI path
+ http.Redirect(w, r, WebUIPath, http.StatusFound)
+}
+
+func (h *webUIHandler) writeIncompatibleError(w http.ResponseWriter) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusServiceUnavailable)
+ fmt.Fprintf(w, `IPFS WebUI Incompatible
+
+WebUI is not compatible with Gateway.DeserializedResponses=false.
+
+The WebUI requires deserializing IPFS responses to render the interface.
+To use the WebUI, set Gateway.DeserializedResponses=true in your config.
+`)
+}
+
+func (h *webUIHandler) writeNotAvailableError(w http.ResponseWriter) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusServiceUnavailable)
+ fmt.Fprintf(w, `IPFS WebUI Not Available
+
+WebUI at %s is not in your local node due to Gateway.NoFetch=true.
+
+To use the WebUI, either:
+1. Run: ipfs pin add --progress --name ipfs-webui %s
+2. Download from https://github.com/ipfs/ipfs-webui/releases and import with: ipfs dag import ipfs-webui.car
+`, WebUIPath, WebUIPath)
+}
diff --git a/core/coreiface/tests/pin.go b/core/coreiface/tests/pin.go
index 18f90c0514f..684e1e5a1c1 100644
--- a/core/coreiface/tests/pin.go
+++ b/core/coreiface/tests/pin.go
@@ -12,6 +12,7 @@ import (
ipld "github.com/ipfs/go-ipld-format"
iface "github.com/ipfs/kubo/core/coreiface"
opt "github.com/ipfs/kubo/core/coreiface/options"
+ "github.com/stretchr/testify/require"
)
func (tp *TestSuite) TestPin(t *testing.T) {
@@ -28,6 +29,7 @@ func (tp *TestSuite) TestPin(t *testing.T) {
t.Run("TestPinLsIndirect", tp.TestPinLsIndirect)
t.Run("TestPinLsPrecedence", tp.TestPinLsPrecedence)
t.Run("TestPinIsPinned", tp.TestPinIsPinned)
+ t.Run("TestPinNames", tp.TestPinNames)
}
func (tp *TestSuite) TestPinAdd(t *testing.T) {
@@ -580,6 +582,145 @@ func assertIsPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path
}
}
+func (tp *TestSuite) TestPinNames(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ api, err := tp.makeAPI(t, ctx)
+ require.NoError(t, err)
+
+ // Create test content
+ p1, err := api.Unixfs().Add(ctx, strFile("content1")())
+ require.NoError(t, err)
+
+ p2, err := api.Unixfs().Add(ctx, strFile("content2")())
+ require.NoError(t, err)
+
+ p3, err := api.Unixfs().Add(ctx, strFile("content3")())
+ require.NoError(t, err)
+
+ p4, err := api.Unixfs().Add(ctx, strFile("content4")())
+ require.NoError(t, err)
+
+ // Test 1: Pin with name
+ err = api.Pin().Add(ctx, p1, opt.Pin.Name("test-pin-1"))
+ require.NoError(t, err, "failed to add pin with name")
+
+ // Test 2: Pin without name
+ err = api.Pin().Add(ctx, p2)
+ require.NoError(t, err, "failed to add pin without name")
+
+ // Test 3: List pins with detailed option to get names
+ pins := make(chan iface.Pin)
+ go func() {
+ err = api.Pin().Ls(ctx, pins, opt.Pin.Ls.Detailed(true))
+ }()
+
+ pinMap := make(map[string]string)
+ for pin := range pins {
+ pinMap[pin.Path().String()] = pin.Name()
+ }
+ require.NoError(t, err, "failed to list pins with names")
+
+ // Verify pin names
+ name1, ok := pinMap[p1.String()]
+ require.True(t, ok, "pin for %s not found", p1)
+ require.Equal(t, "test-pin-1", name1, "unexpected pin name for %s", p1)
+
+ name2, ok := pinMap[p2.String()]
+ require.True(t, ok, "pin for %s not found", p2)
+ require.Empty(t, name2, "expected empty pin name for %s, got '%s'", p2, name2)
+
+ // Test 4: Pin update preserves name
+ err = api.Pin().Add(ctx, p3, opt.Pin.Name("updatable-pin"))
+ require.NoError(t, err, "failed to add pin with name for update test")
+
+ err = api.Pin().Update(ctx, p3, p4)
+ require.NoError(t, err, "failed to update pin")
+
+ // Verify name was preserved after update
+ pins2 := make(chan iface.Pin)
+ go func() {
+ err = api.Pin().Ls(ctx, pins2, opt.Pin.Ls.Detailed(true))
+ }()
+
+ updatedPinMap := make(map[string]string)
+ for pin := range pins2 {
+ updatedPinMap[pin.Path().String()] = pin.Name()
+ }
+ require.NoError(t, err, "failed to list pins after update")
+
+ // Old pin should not exist
+ _, oldExists := updatedPinMap[p3.String()]
+ require.False(t, oldExists, "old pin %s should not exist after update", p3)
+
+ // New pin should have the preserved name
+ name4, ok := updatedPinMap[p4.String()]
+ require.True(t, ok, "updated pin for %s not found", p4)
+ require.Equal(t, "updatable-pin", name4, "pin name not preserved after update from %s to %s", p3, p4)
+
+ // Test 5: Re-pinning with different name updates the name
+ err = api.Pin().Add(ctx, p1, opt.Pin.Name("new-name-for-p1"))
+ require.NoError(t, err, "failed to re-pin with new name")
+
+ // Verify name was updated
+ pins3 := make(chan iface.Pin)
+ go func() {
+ err = api.Pin().Ls(ctx, pins3, opt.Pin.Ls.Detailed(true))
+ }()
+
+ repinMap := make(map[string]string)
+ for pin := range pins3 {
+ repinMap[pin.Path().String()] = pin.Name()
+ }
+ require.NoError(t, err, "failed to list pins after re-pin")
+
+ rePinnedName, ok := repinMap[p1.String()]
+ require.True(t, ok, "re-pinned content %s not found", p1)
+ require.Equal(t, "new-name-for-p1", rePinnedName, "pin name not updated after re-pinning %s", p1)
+
+ // Test 6: Direct pin with name
+ p5, err := api.Unixfs().Add(ctx, strFile("direct-content")())
+ require.NoError(t, err)
+
+ err = api.Pin().Add(ctx, p5, opt.Pin.Recursive(false), opt.Pin.Name("direct-pin-name"))
+ require.NoError(t, err, "failed to add direct pin with name")
+
+ // Verify direct pin has name
+ directPins := make(chan iface.Pin)
+ typeOpt, err := opt.Pin.Ls.Type("direct")
+ require.NoError(t, err, "failed to create type option")
+ go func() {
+ err = api.Pin().Ls(ctx, directPins, typeOpt, opt.Pin.Ls.Detailed(true))
+ }()
+
+ directPinMap := make(map[string]string)
+ for pin := range directPins {
+ directPinMap[pin.Path().String()] = pin.Name()
+ }
+ require.NoError(t, err, "failed to list direct pins")
+
+ directName, ok := directPinMap[p5.String()]
+ require.True(t, ok, "direct pin %s not found", p5)
+ require.Equal(t, "direct-pin-name", directName, "unexpected name for direct pin %s", p5)
+
+ // Test 7: List without detailed option doesn't return names
+ pinsNoDetails := make(chan iface.Pin)
+ go func() {
+ err = api.Pin().Ls(ctx, pinsNoDetails)
+ }()
+
+ noDetailsMap := make(map[string]string)
+ for pin := range pinsNoDetails {
+ noDetailsMap[pin.Path().String()] = pin.Name()
+ }
+ require.NoError(t, err, "failed to list pins without detailed option")
+
+ // All names should be empty without detailed option
+ for path, name := range noDetailsMap {
+ require.Empty(t, name, "expected empty name for %s without detailed option, got '%s'", path, name)
+ }
+}
+
func assertNotPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path.Path) {
t.Helper()
diff --git a/core/node/core.go b/core/node/core.go
index 0a0ded89ae4..a636a0c5406 100644
--- a/core/node/core.go
+++ b/core/node/core.go
@@ -18,7 +18,6 @@ import (
pathresolver "github.com/ipfs/boxo/path/resolver"
pin "github.com/ipfs/boxo/pinning/pinner"
"github.com/ipfs/boxo/pinning/pinner/dspinner"
- provider "github.com/ipfs/boxo/provider"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
format "github.com/ipfs/go-ipld-format"
@@ -49,16 +48,17 @@ func BlockService(cfg *config.Config) func(lc fx.Lifecycle, bs blockstore.Blocks
}
// Pinning creates new pinner which tells GC which blocks should be kept
-func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov provider.System) (pin.Pinner, error) {
+func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov DHTProvider) (pin.Pinner, error) {
// Parse strategy at function creation time (not inside the returned function)
// This happens before the provider is created, which is why we pass the strategy
// string and parse it here, rather than using fx-provided ProvidingStrategy.
- strategyFlag := config.ParseReproviderStrategy(strategy)
+ strategyFlag := config.ParseProvideStrategy(strategy)
return func(bstore blockstore.Blockstore,
ds format.DAGService,
repo repo.Repo,
- prov provider.System) (pin.Pinner, error) {
+ prov DHTProvider,
+ ) (pin.Pinner, error) {
rootDS := repo.Datastore()
syncFn := func(ctx context.Context) error {
@@ -72,8 +72,8 @@ func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGSe
ctx := context.TODO()
var opts []dspinner.Option
- roots := (strategyFlag & config.ReproviderStrategyRoots) != 0
- pinned := (strategyFlag & config.ReproviderStrategyPinned) != 0
+ roots := (strategyFlag & config.ProvideStrategyRoots) != 0
+ pinned := (strategyFlag & config.ProvideStrategyPinned) != 0
// Important: Only one of WithPinnedProvider or WithRootsProvider should be active.
// Having both would cause duplicate root advertisements since "pinned" includes all
@@ -179,8 +179,8 @@ func Dag(bs blockservice.BlockService) format.DAGService {
}
// Files loads persisted MFS root
-func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) {
- return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) {
+func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) {
+ return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) {
dsk := datastore.NewKey("/local/filesroot")
pf := func(ctx context.Context, c cid.Cid) error {
rootDS := repo.Datastore()
@@ -230,18 +230,21 @@ func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo
return nil, err
}
- // MFS (Mutable File System) provider integration:
- // Only pass the provider to MFS when the strategy includes "mfs".
- // MFS will call Provide() on every DAGService.Add() operation,
- // which is sufficient for the "mfs" strategy - it ensures all
- // MFS content gets announced as it's added or modified.
- // For non-mfs strategies, we set provider to nil to avoid unnecessary providing.
- strategyFlag := config.ParseReproviderStrategy(strategy)
- if strategyFlag&config.ReproviderStrategyMFS == 0 {
+ // MFS (Mutable File System) provider integration: Only pass the provider
+ // to MFS when the strategy includes "mfs". MFS will call StartProviding()
+ // on every DAGService.Add() operation, which is sufficient for the "mfs"
+ // strategy - it ensures all MFS content gets announced as it's added or
+ // modified. For non-mfs strategies, we set provider to nil to avoid
+ // unnecessary providing.
+ strategyFlag := config.ParseProvideStrategy(strategy)
+ if strategyFlag&config.ProvideStrategyMFS == 0 {
prov = nil
}
root, err := mfs.NewRoot(ctx, dag, nd, pf, prov)
+ if err != nil {
+ return nil, err
+ }
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
diff --git a/core/node/groups.go b/core/node/groups.go
index 9904574a88b..9e6433a3254 100644
--- a/core/node/groups.go
+++ b/core/node/groups.go
@@ -254,7 +254,7 @@ func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option {
cacheOpts,
cfg.Datastore.HashOnRead,
cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough),
- cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy),
+ cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy),
)),
finalBstore,
)
@@ -347,9 +347,9 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
isBitswapServerEnabled := cfg.Bitswap.ServerEnabled.WithDefault(config.DefaultBitswapServerEnabled)
isHTTPRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled)
- // Right now Provider and Reprovider systems are tied together - disabling Reprovider by setting interval to 0 disables Provider
- // and vice versa: Provider.Enabled=false will disable both Provider of new CIDs and the Reprovider of old ones.
- isProviderEnabled := cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) && cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0
+ // The Provide system handles both new CID announcements and periodic re-announcements.
+ // Disabling is controlled by Provide.Enabled=false or setting Interval to 0.
+ isProviderEnabled := cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) && cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) != 0
return fx.Options(
fx.Provide(BitswapOptions(cfg)),
@@ -365,13 +365,7 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
fx.Provide(p2p.New),
LibP2P(bcfg, cfg, userResourceOverrides),
- OnlineProviders(
- isProviderEnabled,
- cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy),
- cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval),
- cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient),
- int(cfg.Provider.WorkerCount.WithDefault(config.DefaultProviderWorkerCount)),
- ),
+ OnlineProviders(isProviderEnabled, cfg),
)
}
@@ -432,6 +426,16 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
cfg.Import.UnixFSHAMTDirectorySizeThreshold = *cfg.Internal.UnixFSShardingSizeThreshold
}
+ // Validate Import configuration
+ if err := config.ValidateImportConfig(&cfg.Import); err != nil {
+ return fx.Error(err)
+ }
+
+ // Validate Provide configuration
+ if err := config.ValidateProvideConfig(&cfg.Provide); err != nil {
+ return fx.Error(err)
+ }
+
// Auto-sharding settings
shardingThresholdString := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardSingThresholdInt, err := humanize.ParseBytes(shardingThresholdString)
@@ -443,7 +447,7 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
uio.HAMTShardingSize = int(shardSingThresholdInt)
uio.DefaultShardWidth = int(shardMaxFanout)
- providerStrategy := cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy)
+ providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
return fx.Options(
bcfgOpts,
diff --git a/core/node/libp2p/fd/sys_unix.go b/core/node/libp2p/fd/sys_unix.go
index 5e417c0fa6d..dcb82a8815e 100644
--- a/core/node/libp2p/fd/sys_unix.go
+++ b/core/node/libp2p/fd/sys_unix.go
@@ -1,5 +1,4 @@
//go:build linux || darwin
-// +build linux darwin
package fd
diff --git a/core/node/provider.go b/core/node/provider.go
index 17a312f98e9..2c77e580c8d 100644
--- a/core/node/provider.go
+++ b/core/node/provider.go
@@ -11,13 +11,28 @@ import (
"github.com/ipfs/boxo/mfs"
pin "github.com/ipfs/boxo/pinning/pinner"
"github.com/ipfs/boxo/pinning/pinner/dspinner"
- provider "github.com/ipfs/boxo/provider"
+ "github.com/ipfs/boxo/provider"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/repo"
irouting "github.com/ipfs/kubo/routing"
+ dht "github.com/libp2p/go-libp2p-kad-dht"
+ "github.com/libp2p/go-libp2p-kad-dht/amino"
+ "github.com/libp2p/go-libp2p-kad-dht/dual"
+ "github.com/libp2p/go-libp2p-kad-dht/fullrt"
+ dht_pb "github.com/libp2p/go-libp2p-kad-dht/pb"
+ dhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider"
+ "github.com/libp2p/go-libp2p-kad-dht/provider/buffered"
+ ddhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider/dual"
+ "github.com/libp2p/go-libp2p-kad-dht/provider/keystore"
+ routinghelpers "github.com/libp2p/go-libp2p-routing-helpers"
+ "github.com/libp2p/go-libp2p/core/host"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
+ ma "github.com/multiformats/go-multiaddr"
+ mh "github.com/multiformats/go-multihash"
"go.uber.org/fx"
)
@@ -29,154 +44,523 @@ const sampledBatchSize = 1000
// Datastore key used to store previous reprovide strategy.
const reprovideStrategyKey = "/reprovideStrategy"
-func ProviderSys(reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
- return fx.Provide(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, repo repo.Repo) (provider.System, error) {
- // Initialize provider.System first, before pinner/blockstore/etc.
- // The KeyChanFunc will be set later via SetKeyProvider() once we have
- // created the pinner, blockstore and other dependencies.
- opts := []provider.Option{
- provider.Online(cr),
- provider.ReproviderInterval(reprovideInterval),
- provider.ProvideWorkerCount(provideWorkerCount),
+// DHTProvider is an interface for providing keys to a DHT swarm. It holds a
+// state of keys to be advertised, and is responsible for periodically
+// publishing provider records for these keys to the DHT swarm before the
+// records expire.
+type DHTProvider interface {
+ // StartProviding ensures keys are periodically advertised to the DHT swarm.
+ //
+ // If the `keys` aren't currently being reprovided, they are added to the
+ // queue to be provided to the DHT swarm as soon as possible, and scheduled
+ // to be reprovided periodically. If `force` is set to true, all keys are
+ // provided to the DHT swarm, regardless of whether they were already being
+ // reprovided in the past. `keys` keep being reprovided until `StopProviding`
+ // is called.
+ //
+ // This operation is asynchronous, it returns as soon as the `keys` are added
+ // to the provide queue, and provides happens asynchronously.
+ //
+ // Returns an error if the keys couldn't be added to the provide queue. This
+ // can happen if the provider is closed or if the node is currently Offline
+ // (either never bootstrapped, or disconnected since more than `OfflineDelay`).
+ // The schedule and provide queue depend on the network size, hence recent
+ // network connectivity is essential.
+ StartProviding(force bool, keys ...mh.Multihash) error
+ // ProvideOnce sends provider records for the specified keys to the DHT swarm
+ // only once. It does not automatically reprovide those keys afterward.
+ //
+ // Add the supplied multihashes to the provide queue, and return immediately.
+ // The provide operation happens asynchronously.
+ //
+ // Returns an error if the keys couldn't be added to the provide queue. This
+ // can happen if the provider is closed or if the node is currently Offline
+ // (either never bootstrapped, or disconnected since more than `OfflineDelay`).
+ // The schedule and provide queue depend on the network size, hence recent
+ // network connectivity is essential.
+ ProvideOnce(keys ...mh.Multihash) error
+ // Clear clears the all the keys from the provide queue and returns the number
+ // of keys that were cleared.
+ //
+ // The keys are not deleted from the keystore, so they will continue to be
+ // reprovided as scheduled.
+ Clear() int
+ // RefreshSchedule scans the Keystore for any keys that are not currently
+ // scheduled for reproviding. If such keys are found, it schedules their
+ // associated keyspace region to be reprovided.
+ //
+ // This function doesn't remove prefixes that have no keys from the schedule.
+ // This is done automatically during the reprovide operation if a region has no
+ // keys.
+ //
+ // Returns an error if the provider is closed or if the node is currently
+ // Offline (either never bootstrapped, or disconnected since more than
+ // `OfflineDelay`). The schedule depends on the network size, hence recent
+ // network connectivity is essential.
+ RefreshSchedule() error
+}
+
+var (
+ _ DHTProvider = &ddhtprovider.SweepingProvider{}
+ _ DHTProvider = &dhtprovider.SweepingProvider{}
+ _ DHTProvider = &NoopProvider{}
+ _ DHTProvider = &LegacyProvider{}
+)
+
+// NoopProvider is a no-operation provider implementation that does nothing.
+// It is used when providing is disabled or when no DHT is available.
+// All methods return successfully without performing any actual operations.
+type NoopProvider struct{}
+
+func (r *NoopProvider) StartProviding(bool, ...mh.Multihash) error { return nil }
+func (r *NoopProvider) ProvideOnce(...mh.Multihash) error { return nil }
+func (r *NoopProvider) Clear() int { return 0 }
+func (r *NoopProvider) RefreshSchedule() error { return nil }
+
+// LegacyProvider is a wrapper around the boxo/provider.System that implements
+// the DHTProvider interface. This provider manages reprovides using a burst
+// strategy where it sequentially reprovides all keys at once during each
+// reprovide interval, rather than spreading the load over time.
+//
+// This is the legacy provider implementation that can cause resource spikes
+// during reprovide operations. For more efficient providing, consider using
+// the SweepingProvider which spreads the load over the reprovide interval.
+type LegacyProvider struct {
+ provider.System
+}
+
+func (r *LegacyProvider) StartProviding(force bool, keys ...mh.Multihash) error {
+ return r.ProvideOnce(keys...)
+}
+
+func (r *LegacyProvider) ProvideOnce(keys ...mh.Multihash) error {
+ if many, ok := r.System.(routinghelpers.ProvideManyRouter); ok {
+ return many.ProvideMany(context.Background(), keys)
+ }
+
+ for _, k := range keys {
+ if err := r.Provide(context.Background(), cid.NewCidV1(cid.Raw, k), true); err != nil {
+ return err
}
- if !acceleratedDHTClient && reprovideInterval > 0 {
- // The estimation kinda suck if you are running with accelerated DHT client,
- // given this message is just trying to push people to use the acceleratedDHTClient
- // let's not report on through if it's in use
- opts = append(opts,
- provider.ThroughputReport(func(reprovide bool, complete bool, keysProvided uint, duration time.Duration) bool {
- avgProvideSpeed := duration / time.Duration(keysProvided)
- count := uint64(keysProvided)
-
- if !reprovide || !complete {
- // We don't know how many CIDs we have to provide, try to fetch it from the blockstore.
- // But don't try for too long as this might be very expensive if you have a huge datastore.
- ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
- defer cancel()
-
- // FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup.
- // Note: talk to datastore directly, as to not depend on Blockstore here.
- qr, err := repo.Datastore().Query(ctx, query.Query{
- Prefix: blockstore.BlockPrefix.String(),
- KeysOnly: true})
- if err != nil {
- logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err)
- return false
- }
- defer qr.Close()
- count = 0
- countLoop:
- for {
- select {
- case _, ok := <-qr.Next():
- if !ok {
- break countLoop
- }
- count++
- case <-ctx.Done():
- // really big blockstore mode
-
- // how many blocks would be in a 10TiB blockstore with 128KiB blocks.
- const probableBigBlockstore = (10 * 1024 * 1024 * 1024 * 1024) / (128 * 1024)
- // How long per block that lasts us.
- expectedProvideSpeed := reprovideInterval / probableBigBlockstore
- if avgProvideSpeed > expectedProvideSpeed {
- logger.Errorf(`
-🔔🔔🔔 YOU MAY BE FALLING BEHIND DHT REPROVIDES! 🔔🔔🔔
-
-⚠️ Your system might be struggling to keep up with DHT reprovides!
-This means your content could be partially or completely inaccessible on the network.
-We observed that you recently provided %d keys at an average rate of %v per key.
-
-🕑 An attempt to estimate your blockstore size timed out after 5 minutes,
-implying your blockstore might be exceedingly large. Assuming a considerable
-size of 10TiB, it would take %v to provide the complete set.
-
-⏰ The total provide time needs to stay under your reprovide interval (%v) to prevent falling behind!
-
-💡 Consider enabling the Accelerated DHT to enhance your system performance. See:
-https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient`,
- keysProvided, avgProvideSpeed, avgProvideSpeed*probableBigBlockstore, reprovideInterval)
- return false
+ }
+ return nil
+}
+
+func (r *LegacyProvider) Clear() int {
+ return r.System.Clear()
+}
+
+func (r *LegacyProvider) RefreshSchedule() error { return nil }
+
+// LegacyProviderOpt creates a LegacyProvider to be used as provider in the
+// IpfsNode
+func LegacyProviderOpt(reprovideInterval time.Duration, strategy string, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
+ system := fx.Provide(
+ fx.Annotate(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, repo repo.Repo) (*LegacyProvider, error) {
+ // Initialize provider.System first, before pinner/blockstore/etc.
+ // The KeyChanFunc will be set later via SetKeyProvider() once we have
+ // created the pinner, blockstore and other dependencies.
+ opts := []provider.Option{
+ provider.Online(cr),
+ provider.ReproviderInterval(reprovideInterval),
+ provider.ProvideWorkerCount(provideWorkerCount),
+ }
+ if !acceleratedDHTClient && reprovideInterval > 0 {
+ // The estimation kinda suck if you are running with accelerated DHT client,
+ // given this message is just trying to push people to use the acceleratedDHTClient
+ // let's not report on through if it's in use
+ opts = append(opts,
+ provider.ThroughputReport(func(reprovide bool, complete bool, keysProvided uint, duration time.Duration) bool {
+ avgProvideSpeed := duration / time.Duration(keysProvided)
+ count := uint64(keysProvided)
+
+ if !reprovide || !complete {
+ // We don't know how many CIDs we have to provide, try to fetch it from the blockstore.
+ // But don't try for too long as this might be very expensive if you have a huge datastore.
+ ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
+ defer cancel()
+
+ // FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup.
+ // Note: talk to datastore directly, as to not depend on Blockstore here.
+ qr, err := repo.Datastore().Query(ctx, query.Query{
+ Prefix: blockstore.BlockPrefix.String(),
+ KeysOnly: true,
+ })
+ if err != nil {
+ logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err)
+ return false
+ }
+ defer qr.Close()
+ count = 0
+ countLoop:
+ for {
+ select {
+ case _, ok := <-qr.Next():
+ if !ok {
+ break countLoop
+ }
+ count++
+ case <-ctx.Done():
+ // really big blockstore mode
+
+ // how many blocks would be in a 10TiB blockstore with 128KiB blocks.
+ const probableBigBlockstore = (10 * 1024 * 1024 * 1024 * 1024) / (128 * 1024)
+ // How long per block that lasts us.
+ expectedProvideSpeed := reprovideInterval / probableBigBlockstore
+ if avgProvideSpeed > expectedProvideSpeed {
+ logger.Errorf(`
+🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔
+
+Your node may be falling behind on DHT reprovides, which could affect content availability.
+
+Observed: %d keys at %v per key
+Estimated: Assuming 10TiB blockstore, would take %v to complete
+⏰ Must finish within %v (Provide.DHT.Interval)
+
+Solutions (try in order):
+1. Enable Provide.DHT.SweepEnabled=true (recommended)
+2. Increase Provide.DHT.MaxWorkers if needed
+3. Enable Routing.AcceleratedDHTClient=true (last resort, resource intensive)
+
+Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide`,
+ keysProvided, avgProvideSpeed, avgProvideSpeed*probableBigBlockstore, reprovideInterval)
+ return false
+ }
}
}
}
- }
- // How long per block that lasts us.
- expectedProvideSpeed := reprovideInterval
- if count > 0 {
- expectedProvideSpeed = reprovideInterval / time.Duration(count)
- }
+ // How long per block that lasts us.
+ expectedProvideSpeed := reprovideInterval
+ if count > 0 {
+ expectedProvideSpeed = reprovideInterval / time.Duration(count)
+ }
- if avgProvideSpeed > expectedProvideSpeed {
- logger.Errorf(`
-🔔🔔🔔 YOU ARE FALLING BEHIND DHT REPROVIDES! 🔔🔔🔔
+ if avgProvideSpeed > expectedProvideSpeed {
+ logger.Errorf(`
+🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔
-⚠️ Your system is struggling to keep up with DHT reprovides!
-This means your content could be partially or completely inaccessible on the network.
-We observed that you recently provided %d keys at an average rate of %v per key.
+Your node is falling behind on DHT reprovides, which will affect content availability.
-💾 Your total CID count is ~%d which would total at %v reprovide process.
+Observed: %d keys at %v per key
+Confirmed: ~%d total CIDs requiring %v to complete
+⏰ Must finish within %v (Provide.DHT.Interval)
-⏰ The total provide time needs to stay under your reprovide interval (%v) to prevent falling behind!
+Solutions (try in order):
+1. Enable Provide.DHT.SweepEnabled=true (recommended)
+2. Increase Provide.DHT.MaxWorkers if needed
+3. Enable Routing.AcceleratedDHTClient=true (last resort, resource intensive)
-💡 Consider enabling the Accelerated DHT to enhance your reprovide throughput. See:
-https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient`,
- keysProvided, avgProvideSpeed, count, avgProvideSpeed*time.Duration(count), reprovideInterval)
- }
- return false
- }, sampledBatchSize))
+Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide`,
+ keysProvided, avgProvideSpeed, count, avgProvideSpeed*time.Duration(count), reprovideInterval)
+ }
+ return false
+ }, sampledBatchSize))
+ }
+
+ sys, err := provider.New(repo.Datastore(), opts...)
+ if err != nil {
+ return nil, err
+ }
+ lc.Append(fx.Hook{
+ OnStop: func(ctx context.Context) error {
+ return sys.Close()
+ },
+ })
+
+ prov := &LegacyProvider{sys}
+ handleStrategyChange(strategy, prov, repo.Datastore())
+
+ return prov, nil
+ },
+ fx.As(new(provider.System)),
+ fx.As(new(DHTProvider)),
+ ),
+ )
+ setKeyProvider := fx.Invoke(func(lc fx.Lifecycle, system provider.System, keyProvider provider.KeyChanFunc) {
+ lc.Append(fx.Hook{
+ OnStart: func(ctx context.Context) error {
+ // SetKeyProvider breaks the circular dependency between provider, blockstore, and pinner.
+ // We cannot create the blockstore without the provider (it needs to provide blocks),
+ // and we cannot determine the reproviding strategy without the pinner/blockstore.
+ // This deferred initialization allows us to create provider.System first,
+ // then set the actual key provider function after all dependencies are ready.
+ system.SetKeyProvider(keyProvider)
+ return nil
+ },
+ })
+ })
+ return fx.Options(
+ system,
+ setKeyProvider,
+ )
+}
+
+type dhtImpl interface {
+ routing.Routing
+ GetClosestPeers(context.Context, string) ([]peer.ID, error)
+ Host() host.Host
+ MessageSender() dht_pb.MessageSender
+}
+type addrsFilter interface {
+ FilteredAddrs() []ma.Multiaddr
+}
+
+func SweepingProviderOpt(cfg *config.Config) fx.Option {
+ reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)
+ type providerInput struct {
+ fx.In
+ DHT routing.Routing `name:"dhtc"`
+ Repo repo.Repo
+ }
+ sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) {
+ ds := in.Repo.Datastore()
+ ks, err := keystore.NewResettableKeystore(ds,
+ keystore.WithPrefixBits(16),
+ keystore.WithDatastorePath("/provider/keystore"),
+ keystore.WithBatchSize(int(cfg.Provide.DHT.KeystoreBatchSize.WithDefault(config.DefaultProvideDHTKeystoreBatchSize))),
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ // Constants for buffered provider configuration
+ // These values match the upstream defaults from go-libp2p-kad-dht and have been battle-tested
+ const (
+ // bufferedDsName is the datastore namespace used by the buffered provider.
+ // The dsqueue persists operations here to handle large data additions without
+ // being memory-bound, allowing operations on hardware with limited RAM and
+ // enabling core operations to return instantly while processing happens async.
+ bufferedDsName = "bprov"
+
+ // bufferedBatchSize controls how many operations are dequeued and processed
+ // together from the datastore queue. The worker processes up to this many
+ // operations at once, grouping them by type for efficiency.
+ bufferedBatchSize = 1 << 10 // 1024 items
+
+ // bufferedIdleWriteTime is an implementation detail of go-dsqueue that controls
+ // how long the datastore buffer waits for new multihashes to arrive before
+ // flushing in-memory items to the datastore. This does NOT affect providing speed -
+ // provides happen as fast as possible via a dedicated worker that continuously
+ // processes the queue regardless of this timing.
+ bufferedIdleWriteTime = time.Minute
+ )
+
+ bufferedProviderOpts := []buffered.Option{
+ buffered.WithBatchSize(bufferedBatchSize),
+ buffered.WithDsName(bufferedDsName),
+ buffered.WithIdleWriteTime(bufferedIdleWriteTime),
+ }
+ var impl dhtImpl
+ switch inDht := in.DHT.(type) {
+ case *dht.IpfsDHT:
+ if inDht != nil {
+ impl = inDht
+ }
+ case *dual.DHT:
+ if inDht != nil {
+ prov, err := ddhtprovider.New(inDht,
+ ddhtprovider.WithKeystore(ks),
+
+ ddhtprovider.WithReprovideInterval(reprovideInterval),
+ ddhtprovider.WithMaxReprovideDelay(time.Hour),
+ ddhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)),
+ ddhtprovider.WithConnectivityCheckOnlineInterval(1*time.Minute),
+
+ ddhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))),
+ ddhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
+ ddhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
+ ddhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
+ )
+ if err != nil {
+ return nil, nil, err
+ }
+ return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil
+ }
+ case *fullrt.FullRT:
+ if inDht != nil {
+ impl = inDht
+ }
+ }
+ if impl == nil {
+ return &NoopProvider{}, nil, nil
+ }
+
+ var selfAddrsFunc func() []ma.Multiaddr
+ if imlpFilter, ok := impl.(addrsFilter); ok {
+ selfAddrsFunc = imlpFilter.FilteredAddrs
+ } else {
+ selfAddrsFunc = func() []ma.Multiaddr { return impl.Host().Addrs() }
+ }
+ opts := []dhtprovider.Option{
+ dhtprovider.WithKeystore(ks),
+ dhtprovider.WithPeerID(impl.Host().ID()),
+ dhtprovider.WithRouter(impl),
+ dhtprovider.WithMessageSender(impl.MessageSender()),
+ dhtprovider.WithSelfAddrs(selfAddrsFunc),
+ dhtprovider.WithAddLocalRecord(func(h mh.Multihash) error {
+ return impl.Provide(context.Background(), cid.NewCidV1(cid.Raw, h), false)
+ }),
+
+ dhtprovider.WithReplicationFactor(amino.DefaultBucketSize),
+ dhtprovider.WithReprovideInterval(reprovideInterval),
+ dhtprovider.WithMaxReprovideDelay(time.Hour),
+ dhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)),
+ dhtprovider.WithConnectivityCheckOnlineInterval(1 * time.Minute),
+
+ dhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))),
+ dhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
+ dhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
+ dhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
}
- sys, err := provider.New(repo.Datastore(), opts...)
+ prov, err := dhtprovider.New(opts...)
if err != nil {
- return nil, err
+ return nil, nil, err
+ }
+ return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil
+ })
+
+ type keystoreInput struct {
+ fx.In
+ Provider DHTProvider
+ Keystore *keystore.ResettableKeystore
+ KeyProvider provider.KeyChanFunc
+ }
+ initKeystore := fx.Invoke(func(lc fx.Lifecycle, in keystoreInput) {
+ // Skip keystore initialization for NoopProvider
+ if _, ok := in.Provider.(*NoopProvider); ok {
+ return
+ }
+
+ var (
+ cancel context.CancelFunc
+ done = make(chan struct{})
+ )
+
+ syncKeystore := func(ctx context.Context) error {
+ kcf, err := in.KeyProvider(ctx)
+ if err != nil {
+ return err
+ }
+ if err := in.Keystore.ResetCids(ctx, kcf); err != nil {
+ return err
+ }
+ if err := in.Provider.RefreshSchedule(); err != nil {
+ logger.Infow("refreshing provider schedule", "err", err)
+ }
+ return nil
}
lc.Append(fx.Hook{
+ OnStart: func(ctx context.Context) error {
+ // Set the KeyProvider as a garbage collection function for the
+ // keystore. Periodically purge the Keystore from all its keys and
+ // replace them with the keys that needs to be reprovided, coming from
+ // the KeyChanFunc. So far, this is the less worse way to remove CIDs
+ // that shouldn't be reprovided from the provider's state.
+ go func() {
+ // Sync the keystore once at startup. This operation is async since
+ // we need to walk the DAG of objects matching the provide strategy,
+ // which can take a while.
+ strategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
+ logger.Infow("provider keystore sync started", "strategy", strategy)
+ if err := syncKeystore(ctx); err != nil {
+ logger.Errorw("provider keystore sync failed", "err", err, "strategy", strategy)
+ } else {
+ logger.Infow("provider keystore sync completed", "strategy", strategy)
+ }
+ }()
+
+ gcCtx, c := context.WithCancel(context.Background())
+ cancel = c
+
+ go func() { // garbage collection loop for cids to reprovide
+ defer close(done)
+ ticker := time.NewTicker(reprovideInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-gcCtx.Done():
+ return
+ case <-ticker.C:
+ if err := syncKeystore(gcCtx); err != nil {
+ logger.Errorw("provider keystore sync", "err", err)
+ }
+ }
+ }
+ }()
+ return nil
+ },
OnStop: func(ctx context.Context) error {
- return sys.Close()
+ if cancel != nil {
+ cancel()
+ }
+ select {
+ case <-done:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ // Keystore data isn't purged, on close, but it will be overwritten
+ // when the node starts again.
+ return in.Keystore.Close()
},
})
-
- return sys, nil
})
+
+ return fx.Options(
+ sweepingReprovider,
+ initKeystore,
+ )
}
// ONLINE/OFFLINE
-// OnlineProviders groups units managing provider routing records online
-func OnlineProviders(provide bool, providerStrategy string, reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
+// OnlineProviders groups units managing provide routing records online
+func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
if !provide {
return OfflineProviders()
}
- strategyFlag := config.ParseReproviderStrategy(providerStrategy)
+ providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
+
+ strategyFlag := config.ParseProvideStrategy(providerStrategy)
if strategyFlag == 0 {
- return fx.Error(fmt.Errorf("unknown reprovider strategy %q", providerStrategy))
+ return fx.Error(fmt.Errorf("provider: unknown strategy %q", providerStrategy))
}
- return fx.Options(
+ opts := []fx.Option{
fx.Provide(setReproviderKeyProvider(providerStrategy)),
- ProviderSys(reprovideInterval, acceleratedDHTClient, provideWorkerCount),
- )
+ }
+ if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) {
+ opts = append(opts, SweepingProviderOpt(cfg))
+ } else {
+ reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)
+ acceleratedDHTClient := cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient)
+ provideWorkerCount := int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))
+
+ opts = append(opts, LegacyProviderOpt(reprovideInterval, providerStrategy, acceleratedDHTClient, provideWorkerCount))
+ }
+
+ return fx.Options(opts...)
}
-// OfflineProviders groups units managing provider routing records offline
+// OfflineProviders groups units managing provide routing records offline
func OfflineProviders() fx.Option {
- return fx.Provide(provider.NewNoopProvider)
+ return fx.Provide(func() DHTProvider {
+ return &NoopProvider{}
+ })
}
func mfsProvider(mfsRoot *mfs.Root, fetcher fetcher.Factory) provider.KeyChanFunc {
return func(ctx context.Context) (<-chan cid.Cid, error) {
err := mfsRoot.FlushMemFree(ctx)
if err != nil {
- return nil, fmt.Errorf("error flushing mfs, cannot provide MFS: %w", err)
+ return nil, fmt.Errorf("provider: error flushing MFS, cannot provide MFS: %w", err)
}
rootNode, err := mfsRoot.GetDirectory().GetNode()
if err != nil {
- return nil, fmt.Errorf("error loading mfs root, cannot provide MFS: %w", err)
+ return nil, fmt.Errorf("provider: error loading MFS root, cannot provide MFS: %w", err)
}
kcf := provider.NewDAGProvider(rootNode.Cid(), fetcher)
@@ -191,13 +575,12 @@ type provStrategyIn struct {
OfflineIPLDFetcher fetcher.Factory `name:"offlineIpldFetcher"`
OfflineUnixFSFetcher fetcher.Factory `name:"offlineUnixfsFetcher"`
MFSRoot *mfs.Root
- Provider provider.System
Repo repo.Repo
}
type provStrategyOut struct {
fx.Out
- ProvidingStrategy config.ReproviderStrategy
+ ProvidingStrategy config.ProvideStrategy
ProvidingKeyChanFunc provider.KeyChanFunc
}
@@ -207,18 +590,18 @@ type provStrategyOut struct {
// - "pinned": All pinned content (roots + children)
// - "mfs": Only MFS content
// - "all": all blocks
-func createKeyProvider(strategyFlag config.ReproviderStrategy, in provStrategyIn) provider.KeyChanFunc {
+func createKeyProvider(strategyFlag config.ProvideStrategy, in provStrategyIn) provider.KeyChanFunc {
switch strategyFlag {
- case config.ReproviderStrategyRoots:
+ case config.ProvideStrategyRoots:
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher))
- case config.ReproviderStrategyPinned:
+ case config.ProvideStrategyPinned:
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher))
- case config.ReproviderStrategyPinned | config.ReproviderStrategyMFS:
+ case config.ProvideStrategyPinned | config.ProvideStrategyMFS:
return provider.NewPrioritizedProvider(
provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)),
mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher),
)
- case config.ReproviderStrategyMFS:
+ case config.ProvideStrategyMFS:
return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher)
default: // "all", "", "flat" (compat)
return in.Blockstore.AllKeysChan
@@ -257,7 +640,7 @@ func persistStrategy(ctx context.Context, strategy string, ds datastore.Datastor
// Strategy change detection: when the reproviding strategy changes,
// we clear the provide queue to avoid unexpected behavior from mixing
// strategies. This ensures a clean transition between different providing modes.
-func handleStrategyChange(strategy string, provider provider.System, ds datastore.Datastore) {
+func handleStrategyChange(strategy string, provider DHTProvider, ds datastore.Datastore) {
ctx := context.Background()
previous, changed, err := detectStrategyChange(ctx, strategy, ds)
@@ -270,7 +653,7 @@ func handleStrategyChange(strategy string, provider provider.System, ds datastor
return
}
- logger.Infow("Reprovider.Strategy changed, clearing provide queue", "previous", previous, "current", strategy)
+ logger.Infow("Provide.Strategy changed, clearing provide queue", "previous", previous, "current", strategy)
provider.Clear()
if err := persistStrategy(ctx, strategy, ds); err != nil {
@@ -279,22 +662,11 @@ func handleStrategyChange(strategy string, provider provider.System, ds datastor
}
func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut {
- strategyFlag := config.ParseReproviderStrategy(strategy)
+ strategyFlag := config.ParseProvideStrategy(strategy)
return func(in provStrategyIn) provStrategyOut {
// Create the appropriate key provider based on strategy
kcf := createKeyProvider(strategyFlag, in)
-
- // SetKeyProvider breaks the circular dependency between provider, blockstore, and pinner.
- // We cannot create the blockstore without the provider (it needs to provide blocks),
- // and we cannot determine the reproviding strategy without the pinner/blockstore.
- // This deferred initialization allows us to create provider.System first,
- // then set the actual key provider function after all dependencies are ready.
- in.Provider.SetKeyProvider(kcf)
-
- // Handle strategy changes (detection, queue clearing, persistence)
- handleStrategyChange(strategy, in.Provider, in.Repo.Datastore())
-
return provStrategyOut{
ProvidingStrategy: strategyFlag,
ProvidingKeyChanFunc: kcf,
diff --git a/core/node/storage.go b/core/node/storage.go
index b4ffb25878f..e97a0db4ab9 100644
--- a/core/node/storage.go
+++ b/core/node/storage.go
@@ -2,7 +2,6 @@ package node
import (
blockstore "github.com/ipfs/boxo/blockstore"
- provider "github.com/ipfs/boxo/provider"
"github.com/ipfs/go-datastore"
config "github.com/ipfs/kubo/config"
"go.uber.org/fx"
@@ -33,9 +32,8 @@ func BaseBlockstoreCtor(
hashOnRead bool,
writeThrough bool,
providingStrategy string,
-
-) func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) {
- return func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) {
+) func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
+ return func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)}
// Blockstore providing integration:
@@ -43,8 +41,8 @@ func BaseBlockstoreCtor(
// Important: Provide calls from blockstore are intentionally BLOCKING.
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
// This avoids spawning unbounded goroutines for concurrent block additions.
- strategyFlag := config.ParseReproviderStrategy(providingStrategy)
- if strategyFlag&config.ReproviderStrategyAll != 0 {
+ strategyFlag := config.ParseProvideStrategy(providingStrategy)
+ if strategyFlag&config.ProvideStrategyAll != 0 {
opts = append(opts, blockstore.Provider(prov))
}
@@ -79,11 +77,11 @@ func GcBlockstoreCtor(bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockst
}
// FilestoreBlockstoreCtor wraps GcBlockstore and adds Filestore support
-func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
+func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
gclocker = blockstore.NewGCLocker()
// hash security
- fstore = filestore.NewFilestore(bb, repo.FileManager())
+ fstore = filestore.NewFilestore(bb, repo.FileManager(), prov)
gcbs = blockstore.NewGCBlockstore(fstore, gclocker)
gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs}
diff --git a/coverage/main/main.go b/coverage/main/main.go
index e680a7037ea..0d279d967e3 100644
--- a/coverage/main/main.go
+++ b/coverage/main/main.go
@@ -1,5 +1,4 @@
//go:build testrunmain
-// +build testrunmain
package main
diff --git a/docs/RELEASE_CHECKLIST.md b/docs/RELEASE_CHECKLIST.md
index 0bf36a2deda..8dbb771bf64 100644
--- a/docs/RELEASE_CHECKLIST.md
+++ b/docs/RELEASE_CHECKLIST.md
@@ -1,136 +1,113 @@
-
+
# ✅ Release Checklist (vX.Y.Z[-rcN])
-## Labels
-
-If an item should be executed only for a specific release type, it is labeled with:
-
--  execute **ONLY** when releasing a Release Candidate
--  execute **ONLY** when releasing a Final Release
--  do **NOT** execute when releasing a Patch Release
-
-Otherwise, it means a step should be executed for **ALL** release types.
-
-## Before the release
-
-This section covers tasks to be done ahead of the release.
-
-- [ ] Verify you have access to all the services and tools required for the release
- - [ ] [GPG signature](https://docs.github.com/en/authentication/managing-commit-signature-verification) configured in local git and in GitHub
- - [ ] [docker](https://docs.docker.com/get-docker/) installed on your system
- - [ ] [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed on your system
- - [ ] [kubo](https://github.com/ipfs/kubo) checked out under `$(go env GOPATH)/src/github.com/ipfs/kubo`
- - you can also symlink your clone to the expected location by running `mkdir -p $(go env GOPATH)/src/github.com/ipfs && ln -s $(pwd) $(go env GOPATH)/src/github.com/ipfs/kubo`
--  Upgrade Go used in CI to the latest patch release available at
-
-## The release
-
-This section covers tasks to be done during each release.
-
-### 1. Prepare release branch
-
-- [ ] Prepare the release branch and update version numbers accordingly
- - [ ] create a new branch `release-vX.Y.Z`
- - use `master` as base if `Z == 0`
- - use `release` as base if `Z > 0`
- - [ ]  update the `CurrentVersionNumber` in [version.go](version.go) in the `master` branch to `vX.Y+1.0-dev` ([example](https://github.com/ipfs/kubo/pull/9305))
- - [ ] update the `CurrentVersionNumber` in [version.go](version.go) in the `release-vX.Y.Z` branch to `vX.Y.Z(-rcN)` ([example](https://github.com/ipfs/kubo/pull/9394))
- - [ ] create a draft PR from `release-vX.Y.Z` to `release` ([example](https://github.com/ipfs/kubo/pull/9306))
- - [ ] Cherry-pick commits from `master` to the `release-vX.Y.Z` using `git cherry-pick -x ` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf))
- - **NOTE:** cherry-picking with `-x` is important
- - [ ] verify all CI checks on the PR from `release-vX.Y.Z` to `release` are passing
- - [ ]  Replace the `Changelog` and `Contributors` sections of the [changelog](docs/changelogs/vX.Y.md) with the stdout (do **NOT** copy the stderr) of `./bin/mkreleaselog`.
- - **NOTE:** `mkreleaselog` expects your `$GOPATH/src/github.com/ipfs/kubo` to include latest commits from `release-vX.Y.Z`
- - [ ]  Merge the PR from `release-vX.Y.Z` to `release` using the `Create a merge commit`
- - do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
- - do **NOT** delete the `release-vX.Y.Z` branch
-
-### 2. Tag release
-
-- [ ] Create the release tag
- - ⚠️ **NOTE:** This is a dangerous operation! Go and Docker publishing are difficult to reverse! Have the release reviewer verify all the commands marked with !
- - [ ]  tag the HEAD commit using `git tag -s vX.Y.Z(-rcN) -m 'Prerelease X.Y.Z(-rcN)'`
- - [ ]  tag the HEAD commit of the `release` branch using `git tag -s vX.Y.Z -m 'Release X.Y.Z'`
- - [ ] ⚠️ verify the tag is signed and tied to the correct commit using `git show vX.Y.Z(-rcN)`
- - [ ] push the tag to GitHub using `git push origin vX.Y.Z(-rcN)`
- - ⚠️ do **NOT** use `git push --tags` because it pushes all your local tags
-
-### 3. Publish
-
-- [ ] Publish Docker image to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags)
- - [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow run initiated by the tag push to finish
- - [ ] verify the image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags)
-- [ ] Publish the release to [dist.ipfs.tech](https://dist.ipfs.tech)
- - [ ] check out [ipfs/distributions](https://github.com/ipfs/distributions)
- - [ ] create new branch: run `git checkout -b release-kubo-X.Y.Z(-rcN)`
- - [ ] Verify [ipfs/distributions](https://github.com/ipfs/distributions)'s `.tool-versions`'s `golang` entry is set to the [latest go release](https://go.dev/doc/devel/release) on the major go branch [Kubo is being tested on](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) (see `go-version:`). If not, update `.tool-versions` to match the latest golang.
- - [ ] run `./dist.sh add-version kubo vX.Y.Z(-rcN)` to add the new version to the `versions` file ([usage](https://github.com/ipfs/distributions#usage))
- - [ ] create and merge the PR which updates `dists/kubo/versions` (**NOTE:**  will also have `dists/kubo/current` – [example](https://github.com/ipfs/distributions/pull/1125))
- - [ ] wait for the [CI](https://github.com/ipfs/distributions/actions/workflows/main.yml) workflow run initiated by the merge to master to finish
- - [ ] verify the release is available on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo)
-- [ ] Publish the release to [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
- - [ ] manually dispatch the [Release to npm](https://github.com/ipfs/npm-kubo/actions/workflows/main.yml) workflow if it was not executed already and verify it discovered the new release
- - [ ] verify the release is available on [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
-- [ ] Publish the release to [GitHub kubo/releases](https://github.com/ipfs/kubo/releases)
- - [ ] [create](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) a new release
- - [RC example](https://github.com/ipfs/kubo/releases/tag/v0.36.0-rc1)
- - [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.35.0)
- - [ ] use the `vX.Y.Z(-rcN)` tag
- - [ ] link to the release issue
- - [ ]  link to the changelog in the description
- - [ ]  check the `This is a pre-release` checkbox
- - [ ]  copy the changelog (without the header) in the description
- - [ ]  do **NOT** check the `This is a pre-release` checkbox
- - [ ] run the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow and verify the release assets are attached to the GitHub release
-
-### 4. After Publishing
-
-- [ ]  Merge the [release](https://github.com/ipfs/kubo/tree/release) branch back into [master](https://github.com/ipfs/kubo/tree/master)
- - [ ] Create a new branch `merge-release-vX.Y.Z` from `release`
- - [ ] Create the next [`./docs/changelogs/vA.B.md`](https://github.com/ipfs/kubo/blob/master/docs/changelogs/) and link to the new changelog from the [`./CHANGELOG.md`](https://github.com/ipfs/kubo/blob/master/CHANGELOG.md) file
- - [ ] Create and merge a PR from `merge-release-vX.Y.Z` to `master`
- - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
- - ⚠️ **NOTE:** make sure to ignore the changes to [version.go](version.go) (keep the `-dev` in `master`)
+**Release types:** RC (Release Candidate) | FINAL | PATCH
+
+## Prerequisites
+
+- [ ] [GPG signature](https://docs.github.com/en/authentication/managing-commit-signature-verification) configured in local git and GitHub
+- [ ] [Docker](https://docs.docker.com/get-docker/) installed on your system
+- [ ] [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed on your system
+- [ ] kubo repository cloned locally
+- [ ] **non-PATCH:** Upgrade Go in CI to latest patch from
+
+## 1. Prepare Release Branch
+
+- [ ] Fetch latest changes: `git fetch origin master release`
+- [ ] Create branch `release-vX.Y.Z` (base from: `master` if Z=0 for new minor/major, `release` if Z>0 for patch)
+- [ ] **RC1 only:** Switch to `master` branch and prepare for next release cycle:
+ - [ ] Update [version.go](https://github.com/ipfs/kubo/blob/master/version.go) to `vX.Y+1.0-dev` (⚠️ double-check Y+1 is correct) ([example PR](https://github.com/ipfs/kubo/pull/9305))
+ - [ ] Create `./docs/changelogs/vX.Y+1.md` and add link in [CHANGELOG.md](https://github.com/ipfs/kubo/blob/master/CHANGELOG.md)
+- [ ] Switch to `release-vX.Y.Z` branch and update [version.go](https://github.com/ipfs/kubo/blob/master/version.go) to `vX.Y.Z(-rcN)` (⚠️ double-check Y matches release) ([example](https://github.com/ipfs/kubo/pull/9394))
+- [ ] Create draft PR: `release-vX.Y.Z` → `release` ([example](https://github.com/ipfs/kubo/pull/9306))
+- [ ] In `release-vX.Y.Z` branch, cherry-pick commits from `master`: `git cherry-pick -x ` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf))
+ - ⚠️ **NOTE:** `-x` flag records original commit SHA for traceability and ensures cleaner merges with deduplicated commits in history
+- [ ] Verify all CI checks on the PR are passing
+- [ ] **FINAL only:** In `release-vX.Y.Z` branch, replace `Changelog` and `Contributors` sections with `./bin/mkreleaselog` stdout (do **NOT** copy stderr)
+- [ ] **FINAL only:** Merge PR (`release-vX.Y.Z` → `release`) using `Create a merge commit`
+ - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
+ - ⚠️ do **NOT** delete the `release-vX.Y.Z` branch (needed for future patch releases and git history)
+
+## 2. Tag & Publish
+
+### Create Tag
+⚠️ **POINT OF NO RETURN:** Once pushed, tags trigger automatic Docker/NPM publishing that cannot be reversed!
+If you're making a release for the first time, do pair programming and have the release reviewer verify all commands.
+
+- [ ] **RC:** From `release-vX.Y.Z` branch: `git tag -s vX.Y.Z-rcN -m 'Prerelease X.Y.Z-rcN'`
+- [ ] **FINAL:** After PR merge, from `release` branch: `git tag -s vX.Y.Z -m 'Release X.Y.Z'`
+- [ ] ⚠️ Verify tag is signed and correct: `git show vX.Y.Z(-rcN)`
+- [ ] Push tag: `git push origin vX.Y.Z(-rcN)`
+ - ⚠️ do **NOT** use `git push --tags` because it pushes all your local tags
+- [ ] **STOP:** Wait for [Docker build](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) to complete before proceeding
+
+### Publish Artifacts
+
+- [ ] **Docker:** Publish to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags)
+ - [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow triggered by tag push
+ - [ ] Verify image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags)
+- [ ] **dist.ipfs.tech:** Publish to [dist.ipfs.tech](https://dist.ipfs.tech)
+ - [ ] Check out [ipfs/distributions](https://github.com/ipfs/distributions)
+ - [ ] Create branch: `git checkout -b release-kubo-X.Y.Z(-rcN)`
+ - [ ] Verify `.tool-versions` golang matches [Kubo's CI](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) `go-version:` (update if needed)
+ - [ ] Run: `./dist.sh add-version kubo vX.Y.Z(-rcN)` ([usage](https://github.com/ipfs/distributions#usage))
+ - [ ] Create and merge PR (updates `dists/kubo/versions`, **FINAL** also updates `dists/kubo/current` - [example](https://github.com/ipfs/distributions/pull/1125))
+ - [ ] Wait for [CI workflow](https://github.com/ipfs/distributions/actions/workflows/main.yml) triggered by merge
+ - [ ] Verify release on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo)
+- [ ] **NPM:** Publish to [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
+ - [ ] Manually dispatch [Release to npm](https://github.com/ipfs/npm-kubo/actions/workflows/main.yml) workflow if not auto-triggered
+ - [ ] Verify release on [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
+- [ ] **GitHub Release:** Publish to [GitHub](https://github.com/ipfs/kubo/releases)
+ - [ ] [Create release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) ([RC example](https://github.com/ipfs/kubo/releases/tag/v0.36.0-rc1), [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.35.0))
+ - [ ] Use tag `vX.Y.Z(-rcN)`
+ - [ ] Link to release issue
+ - [ ] **RC:** Link to changelog, check `This is a pre-release`
+ - [ ] **FINAL:** Copy changelog content (without header), do **NOT** check pre-release
+ - [ ] Run [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow
+ - [ ] Verify assets are attached to the GitHub release
+
+## 3. Post-Release
+
+### Technical Tasks
+
+- [ ] **FINAL only:** Merge `release` → `master`
+ - [ ] Create branch `merge-release-vX.Y.Z` from `release`
+ - [ ] Merge `master` to `merge-release-vX.Y.Z` first, and resolve conflict in `version.go`
+ - ⚠️ **NOTE:** make sure to ignore the changes to [version.go](https://github.com/ipfs/kubo/blob/master/version.go) (keep the `-dev` in `master`)
+ - [ ] Create and merge PR from `merge-release-vX.Y.Z` to `master` using `Create a merge commit`
+ - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we want to preserve original commit history
- [ ] Update [ipshipyard/waterworks-infra](https://github.com/ipshipyard/waterworks-infra)
- - [ ] Update Kubo staging environment, see the [Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8) for details.
- - [ ]  Test last release against the current RC
- - [ ]  Test last release against the current one
- - [ ] Update collab cluster boxes to the tagged release (final or RC)
- - [ ] Update libp2p bootstrappers to the tagged release (final or RC)
-- [ ] Promote the release
- - [ ] create an [IPFS Discourse](https://discuss.ipfs.tech) topic ([prerelease example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248), [release example](https://discuss.ipfs.tech/t/kubo-v0-16-0-release-is-out/15249))
- - [ ] use `Kubo vX.Y.Z(-rcN) is out!` as the title and `kubo` as tags
- - [ ] repeat the title as a heading (`##`) in the description
- - [ ] link to the GitHub Release, binaries on IPNS, docker pull command and release notes in the description
- - [ ] pin the [IPFS Discourse](https://discuss.ipfs.tech) topic globally, you can make the topic a banner if there is no banner already
- - [ ] verify the [IPFS Discourse](https://discuss.ipfs.tech) topic was copied to:
- - [ ] [#ipfs-chatter](https://discord.com/channels/669268347736686612/669268347736686615) in IPFS Discord
- - [ ] [#ipfs-chatter](https://filecoinproject.slack.com/archives/C018EJ8LWH1) in FIL Slack
- - [ ] [#ipfs-chatter:ipfs.io](https://matrix.to/#/#ipfs-chatter:ipfs.io) in Matrix
- - [ ]  Add the link to the [IPFS Discourse](https://discuss.ipfs.tech) topic to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN)) description ([example](https://github.com/ipfs/kubo/releases/tag/v0.17.0))
- - [ ]  create an issue comment mentioning early testers on the release issue ([example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478))
- - [ ]  create an issue comment linking to the release on the release issue ([example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975))
- - [ ]   promote on bsky.app ([example](https://bsky.app/profile/ipshipyard.com/post/3lh2brzrwbs2c))
- - [ ]   promote on x.com ([example](https://x.com/ipshipyard/status/1885346348808929609))
- - [ ]   post the link to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN)) to [Reddit](https://reddit.com/r/ipfs) ([example](https://www.reddit.com/r/ipfs/comments/9x0q0k/kubo_v0160_release_is_out/))
-- [ ] Manually smoke-test the new version with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/)
-- [ ] Update Kubo in [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
- - [ ] create a PR which updates `kubo` version to the tagged version in `package.json` and `package-lock.json`
- - [ ]  switch to final release and merge
-- [ ]  Update Kubo docs at docs.ipfs.tech:
- - [ ]  run the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow
- - [ ]  merge the PR created by the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow run
-
-- [ ]  Create a blog entry on [blog.ipfs.tech](https://blog.ipfs.tech)
- - [ ]  create a PR which adds a release note for the new Kubo version ([example](https://github.com/ipfs/ipfs-blog/pull/529))
- - [ ]  merge the PR
- - [ ]  verify the blog entry was published
-- [ ]   Create a dependency update PR
- - [ ]   check out [ipfs/kubo](https://github.com/ipfs/kubo)
- - [ ]   go over direct dependencies from `go.mod` in the root directory (NOTE: do not run `go get -u` as it will upgrade indirect dependencies which may cause problems)
- - [ ]   run `make mod_tidy`
- - [ ]   create a PR which updates `go.mod` and `go.sum`
- - [ ]   add the PR to the next release milestone
-- [ ]   Create the next release issue
-- [ ]  Close the release issue
+ - [ ] Update Kubo staging environment ([Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8))
+ - [ ] **RC:** Test last release against current RC
+ - [ ] **FINAL:** Test last release against current one
+ - [ ] Update collab cluster boxes to the tagged release
+ - [ ] Update libp2p bootstrappers to the tagged release
+- [ ] Smoke test with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/)
+- [ ] Update [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
+ - [ ] Create PR updating kubo version in `package.json` and `package-lock.json`
+ - [ ] **FINAL only:** Merge and create/request new release
+- [ ] **FINAL only:** Update [docs.ipfs.tech](https://docs.ipfs.tech/): run [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow and merge the PR
+
+### Promotion
+
+- [ ] Create [IPFS Discourse](https://discuss.ipfs.tech) topic ([RC example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248), [FINAL example](https://discuss.ipfs.tech/t/kubo-v0-37-0-is-out/19673))
+ - [ ] Title: `Kubo vX.Y.Z(-rcN) is out!`, tag: `kubo`
+ - [ ] Use title as heading (`##`) in description
+ - [ ] Include: GitHub release link, IPNS binaries, docker pull command, release notes
+ - [ ] Pin topic globally (make banner if no existing banner)
+- [ ] Verify bot posted to [#ipfs-chatter](https://discord.com/channels/669268347736686612/669268347736686615) (Discord) or [#ipfs-chatter:ipfs.io](https://matrix.to/#/#ipfs-chatter:ipfs.io) (Matrix)
+- [ ] **RC only:** Comment on release issue mentioning early testers ([example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478))
+- [ ] **FINAL only:** Comment on release issue with link ([example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975))
+- [ ] **FINAL only:** Create [blog.ipfs.tech](https://blog.ipfs.tech) entry ([example](https://github.com/ipfs/ipfs-blog/commit/32040d1e90279f21bad56b924fe4710bba5ba043))
+- [ ] **FINAL non-PATCH:** (optional) Post on social media ([bsky](https://bsky.app/profile/ipshipyard.com/post/3ltxcsrbn5s2k), [x.com](https://x.com/ipshipyard/status/1944867893226635603), [Reddit](https://www.reddit.com/r/ipfs/comments/1lzy6ze/release_v0360_ipfskubo/))
+
+### Final Steps
+
+- [ ] **FINAL non-PATCH:** Create dependency update PR
+ - [ ] Review direct dependencies from root `go.mod` (⚠️ do **NOT** run `go get -u` as it will upgrade indirect dependencies which may cause problems)
+ - [ ] Run `make mod_tidy`
+ - [ ] Create PR with `go.mod` and `go.sum` updates
+ - [ ] Add PR to next release milestone
+- [ ] **FINAL non-PATCH:** Create next release issue ([example](https://github.com/ipfs/kubo/issues/10816))
+- [ ] **FINAL only:** Close release issue
\ No newline at end of file
diff --git a/docs/changelogs/v0.38.md b/docs/changelogs/v0.38.md
index 3d2de2f9b6c..2edb31adf87 100644
--- a/docs/changelogs/v0.38.md
+++ b/docs/changelogs/v0.38.md
@@ -10,20 +10,283 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
+ - [🚀 Repository migration: simplified provide configuration](#-repository-migration-simplified-provide-configuration)
+ - [🧹 Experimental Sweeping DHT Provider](#-experimental-sweeping-dht-provider)
+ - [📊 Exposed DHT metrics](#-exposed-dht-metrics)
+ - [🚨 Improved gateway error pages with diagnostic tools](#-improved-gateway-error-pages-with-diagnostic-tools)
+ - [🎨 Updated WebUI](#-updated-webui)
+ - [📌 Pin name improvements](#-pin-name-improvements)
+ - [🛠️ Identity CID size enforcement and `ipfs files write` fixes](#️-identity-cid-size-enforcement-and-ipfs-files-write-fixes)
+ - [📤 Provide Filestore and Urlstore blocks on write](#-provide-filestore-and-urlstore-blocks-on-write)
+ - [🚦 MFS operation limit for --flush=false](#-mfs-operation-limit-for---flush=false)
- [📦️ Important dependency updates](#-important-dependency-updates)
- [📝 Changelog](#-changelog)
- [👨👩👧👦 Contributors](#-contributors)
### Overview
+Kubo 0.38.0 simplifies content announcement configuration, introduces an experimental sweeping DHT provider for efficient large-scale operations, and includes various performance improvements.
+
### 🔦 Highlights
+#### 🚀 Repository migration: simplified provide configuration
+
+This release migrates the repository from version 17 to version 18, simplifying how you configure content announcements.
+
+The old `Provider` and `Reprovider` sections are now combined into a single [`Provide`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provide) section. Your existing settings are automatically migrated - no manual changes needed.
+
+**Migration happens automatically** when you run `ipfs daemon --migrate`. For manual migration: `ipfs repo migrate --to=18`.
+
+Read more about the new system below.
+
+#### 🧹 Experimental Sweeping DHT Provider
+
+A new experimental DHT provider is available as an alternative to both the default provider and the resource-intensive [accelerated DHT client](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient). Enable it via [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled).
+
+**How it works:** Instead of providing keys one-by-one, the sweep provider systematically explores DHT keyspace regions in batches.
+
+>
+>
+>
+>
+>
+>
+> The diagram shows how sweep mode avoids the hourly traffic spikes of Accelerated DHT while maintaining similar effectiveness. By grouping CIDs into keyspace regions and processing them in batches, sweep mode reduces memory overhead and creates predictable network patterns.
+
+**Benefits for large-scale operations:** Handles hundreds of thousands of CIDs with reduced memory and network connections, spreads operations evenly to eliminate resource spikes, maintains state across restarts through persistent keystore, and provides better metrics visibility.
+
+**Monitoring and debugging:** Legacy mode (`SweepEnabled=false`) tracks `provider_reprovider_provide_count` and `provider_reprovider_reprovide_count`, while sweep mode (`SweepEnabled=true`) tracks `total_provide_count_total`. Enable debug logging with `GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug` to see detailed logs from either system.
+
+> [!NOTE]
+> This feature is experimental and opt-in. In the future, it will become the default and replace the legacy system. Some commands like `ipfs stats provide` and `ipfs routing provide` are not yet available with sweep mode. Run `ipfs provide --help` for alternatives.
+
+For configuration details, see [`Provide.DHT`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedht). For metrics documentation, see [Provide metrics](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide).
+
+#### 📊 Exposed DHT metrics
+
+Kubo now exposes DHT metrics from [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/), including `total_provide_count_total` for sweep provider operations and RPC metrics prefixed with `rpc_inbound_` and `rpc_outbound_` for DHT message traffic. See [Kubo metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md) for details.
+
+#### 🚨 Improved gateway error pages with diagnostic tools
+
+Gateway error pages now provide more actionable information during content retrieval failures. When a 504 Gateway Timeout occurs, users see detailed retrieval state information including which phase failed and a sample of providers that were attempted:
+
+> 
+>
+> - **[`Gateway.DiagnosticServiceURL`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaydiagnosticserviceurl)** (default: `https://check.ipfs.network`): Configures the diagnostic service URL. When set, 504 errors show a "Check CID retrievability" button that links to this service with `?cid=` for external diagnostics. Set to empty string to disable.
+> - **Enhanced error details**: Timeout errors now display the retrieval phase where failure occurred (e.g., "connecting to providers", "fetching data") and up to 3 peer IDs that were attempted but couldn't deliver the content, making it easier to diagnose network or provider issues.
+> - **Retry button on all error pages**: Every gateway error page now includes a retry button for quick page refresh without manual URL re-entry.
+
+#### 🎨 Updated WebUI
+
+The Web UI has been updated to [v4.9](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.0) with a new **Diagnostics** screen for troubleshooting and system monitoring. Access it at `http://127.0.0.1:5001/webui` when running your local IPFS node.
+
+| Diagnostics: Logs | Files: Check Retrieval | Diagnostics: Retrieval Results |
+|:---:|:---:|:---:|
+|  |  |  |
+| Debug issues in real-time by adjusting [log level](https://github.com/ipfs/kubo/blob/master/docs/environment-variables.md#golog_log_level) without restart (global or per-subsystem like bitswap) | Check if content is available to other peers directly from Files screen | Find out why content won't load or who is providing it to the network |
+
+| Peers: Agent Versions | Files: Custom Sorting |
+|:---:|:---:|
+|  |  |
+| Know what software peers run | Find files faster with new sorting |
+
+Additional improvements include a close button in the file viewer, better error handling, and fixed navigation highlighting.
+
+#### 📌 Pin name improvements
+
+`ipfs pin ls --names` now correctly returns pin names for specific CIDs ([#10649](https://github.com/ipfs/kubo/issues/10649), [boxo#1035](https://github.com/ipfs/boxo/pull/1035)), RPC no longer incorrectly returns names from other pins ([#10966](https://github.com/ipfs/kubo/pull/10966)), and pin names are now limited to 255 bytes for better cross-platform compatibility ([#10981](https://github.com/ipfs/kubo/pull/10981)).
+
+#### 🛠️ Identity CID size enforcement and `ipfs files write` fixes
+
+**Identity CID size limits are now enforced**
+
+This release enforces a maximum of 128 bytes for identity CIDs ([IPIP-512](https://github.com/ipfs/specs/pull/512)) - attempting to exceed this limit will return a clear error message.
+
+Identity CIDs use [multihash `0x00`](https://github.com/multiformats/multicodec/blob/master/table.csv#L2) to embed data directly in the CID without hashing. This experimental optimization was designed for tiny data where a CID reference would be larger than the data itself, but without size limits it was easy to misuse and could turn into an anti-pattern that wastes resources and enables abuse.
+
+- `ipfs add --inline-limit` and `--hash=identity` now enforce the 128-byte maximum (error when exceeded)
+- `ipfs files write` prevents creation of oversized identity CIDs
+
+**Multiple `ipfs files write` bugs have been fixed**
+
+This release resolves several long-standing MFS issues: raw nodes now preserve their codec instead of being forced to dag-pb, append operations on raw nodes work correctly by converting to UnixFS when needed, and identity CIDs properly inherit the full CID prefix from parent directories.
+
+#### 📤 Provide Filestore and Urlstore blocks on write
+
+Improvements to the providing system in the last release (provide blocks according to the configured [Strategy](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy)) left out [Filestore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore) and [Urlstore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-urlstore) blocks when the "all" strategy was used. They would only be reprovided but not provided on write. This is now fixed, and both Filestore blocks (local file references) and Urlstore blocks (HTTP/HTTPS URL references) will be provided correctly shortly after initial add.
+
+#### 🚦 MFS operation limit for --flush=false
+
+The new [`Internal.MFSNoFlushLimit`](https://github.com/ipfs/kubo/blob/master/docs/config.md#internalmfsnoflushlimit) configuration option prevents unbounded memory growth when using `--flush=false` with `ipfs files` commands. After performing the configured number of operations without flushing (default: 256), further operations will fail with a clear error message instructing users to flush manually.
+
### 📦️ Important dependency updates
+- update `boxo` to [v0.35.0](https://github.com/ipfs/boxo/releases/tag/v0.35.0)
+- update `go-libp2p-kad-dht` to [v0.35.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.35.0)
+- update `ipfs-webui` to [v4.9.1](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.1) (incl. [v4.9.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.0))
+
### 📝 Changelog
Full Changelog
+- github.com/ipfs/kubo:
+ - chore: v0.38.0
+ - chore: bump go-libp2p-kad-dht to v0.35.0 (#11002) ([ipfs/kubo#11002](https://github.com/ipfs/kubo/pull/11002))
+ - docs: add sweeping provide worker count recommendation (#11001) ([ipfs/kubo#11001](https://github.com/ipfs/kubo/pull/11001))
+ - Upgrade to Boxo v0.35.0 (#10999) ([ipfs/kubo#10999](https://github.com/ipfs/kubo/pull/10999))
+ - chore: 0.38.0-rc2
+ - chore: update boxo and kad-dht dependencies (#10995) ([ipfs/kubo#10995](https://github.com/ipfs/kubo/pull/10995))
+ - fix: update webui to v4.9.1 (#10994) ([ipfs/kubo#10994](https://github.com/ipfs/kubo/pull/10994))
+ - fix: provider merge conflicts (#10989) ([ipfs/kubo#10989](https://github.com/ipfs/kubo/pull/10989))
+ - fix(mfs): add soft limit for `--flush=false` (#10985) ([ipfs/kubo#10985](https://github.com/ipfs/kubo/pull/10985))
+ - fix: provide Filestore nodes (#10990) ([ipfs/kubo#10990](https://github.com/ipfs/kubo/pull/10990))
+ - feat: limit pin names to 255 bytes (#10981) ([ipfs/kubo#10981](https://github.com/ipfs/kubo/pull/10981))
+ - fix: SweepingProvider slow start (#10980) ([ipfs/kubo#10980](https://github.com/ipfs/kubo/pull/10980))
+ - chore: release v0.38.0-rc1
+ - fix: SweepingProvider shouldn't error when missing DHT (#10975) ([ipfs/kubo#10975](https://github.com/ipfs/kubo/pull/10975))
+ - fix: allow custom http provide when libp2p node is offline (#10974) ([ipfs/kubo#10974](https://github.com/ipfs/kubo/pull/10974))
+ - docs(provide): validation and reprovide cycle visualization (#10977) ([ipfs/kubo#10977](https://github.com/ipfs/kubo/pull/10977))
+ - refactor(ci): optimize build workflows (#10973) ([ipfs/kubo#10973](https://github.com/ipfs/kubo/pull/10973))
+ - fix(cmds): cleanup unicode identify strings (#9465) ([ipfs/kubo#9465](https://github.com/ipfs/kubo/pull/9465))
+ - feat: ipfs-webui v4.9.0 with retrieval diagnostics (#10969) ([ipfs/kubo#10969](https://github.com/ipfs/kubo/pull/10969))
+ - fix(mfs): unbound cache growth with `flush=false` (#10971) ([ipfs/kubo#10971](https://github.com/ipfs/kubo/pull/10971))
+ - fix: `ipfs pin ls --names` (#10970) ([ipfs/kubo#10970](https://github.com/ipfs/kubo/pull/10970))
+ - refactor(config): migration 17-to-18 to unify Provider/Reprovider into Provide.DHT (#10951) ([ipfs/kubo#10951](https://github.com/ipfs/kubo/pull/10951))
+ - feat: opt-in new Sweep provide system (#10834) ([ipfs/kubo#10834](https://github.com/ipfs/kubo/pull/10834))
+ - rpc: retrieve pin names when Detailed option provided (#10966) ([ipfs/kubo#10966](https://github.com/ipfs/kubo/pull/10966))
+ - fix: enforce identity CID size limits (#10949) ([ipfs/kubo#10949](https://github.com/ipfs/kubo/pull/10949))
+ - docs: kubo logo sources (#10964) ([ipfs/kubo#10964](https://github.com/ipfs/kubo/pull/10964))
+ - feat(config): validate Import config at daemon startup (#10957) ([ipfs/kubo#10957](https://github.com/ipfs/kubo/pull/10957))
+ - fix(telemetry): improve vm/container detection (#10944) ([ipfs/kubo#10944](https://github.com/ipfs/kubo/pull/10944))
+ - feat(gateway): improved error page with retrieval state details (#10950) ([ipfs/kubo#10950](https://github.com/ipfs/kubo/pull/10950))
+ - close files opened during migration (#10956) ([ipfs/kubo#10956](https://github.com/ipfs/kubo/pull/10956))
+ - fix ctrl-c prompt during run migrations prompt (#10947) ([ipfs/kubo#10947](https://github.com/ipfs/kubo/pull/10947))
+ - repo: use config api to get node root path (#10934) ([ipfs/kubo#10934](https://github.com/ipfs/kubo/pull/10934))
+ - docs: simplify release process (#10870) ([ipfs/kubo#10870](https://github.com/ipfs/kubo/pull/10870))
+ - Merge release v0.37.0 ([ipfs/kubo#10943](https://github.com/ipfs/kubo/pull/10943))
+ - feat(ci): docker linting (#10927) ([ipfs/kubo#10927](https://github.com/ipfs/kubo/pull/10927))
+ - fix: disable telemetry in test profile (#10931) ([ipfs/kubo#10931](https://github.com/ipfs/kubo/pull/10931))
+ - fix: harness tests random panic (#10933) ([ipfs/kubo#10933](https://github.com/ipfs/kubo/pull/10933))
+ - chore: 0.38.0-dev
+- github.com/ipfs/boxo (v0.34.0 -> v0.35.0):
+ - Release v0.35.0 ([ipfs/boxo#1046](https://github.com/ipfs/boxo/pull/1046))
+ - feat(gateway): add `MaxRangeRequestFileSize` protection (#1043) ([ipfs/boxo#1043](https://github.com/ipfs/boxo/pull/1043))
+ - revert: remove MFS auto-flush mechanism (#1041) ([ipfs/boxo#1041](https://github.com/ipfs/boxo/pull/1041))
+ - Filestore: add Provider option to provide filestore blocks. (#1042) ([ipfs/boxo#1042](https://github.com/ipfs/boxo/pull/1042))
+ - fix(pinner): restore indirect pin detection and add context cancellation (#1039) ([ipfs/boxo#1039](https://github.com/ipfs/boxo/pull/1039))
+ - fix(mfs): limit cache growth by default (#1037) ([ipfs/boxo#1037](https://github.com/ipfs/boxo/pull/1037))
+ - update dependencies (#1038) ([ipfs/boxo#1038](https://github.com/ipfs/boxo/pull/1038))
+ - feat(pinner): add `CheckIfPinnedWithType` for efficient checks with names (#1035) ([ipfs/boxo#1035](https://github.com/ipfs/boxo/pull/1035))
+ - fix(routing/http): don't cancel batch prematurely (#1036) ([ipfs/boxo#1036](https://github.com/ipfs/boxo/pull/1036))
+ - refactor: use the new Reprovide Sweep interface (#995) ([ipfs/boxo#995](https://github.com/ipfs/boxo/pull/995))
+ - Update go-dsqueue to latest (#1034) ([ipfs/boxo#1034](https://github.com/ipfs/boxo/pull/1034))
+ - feat(routing/http): return 200 for empty results per IPIP-513 (#1032) ([ipfs/boxo#1032](https://github.com/ipfs/boxo/pull/1032))
+ - replace provider queue with go-dsqueue (#1033) ([ipfs/boxo#1033](https://github.com/ipfs/boxo/pull/1033))
+ - refactor: use slices package to simplify slice manipulation (#1031) ([ipfs/boxo#1031](https://github.com/ipfs/boxo/pull/1031))
+ - bitswap/network: fix read/write data race in bitswap network test (#1030) ([ipfs/boxo#1030](https://github.com/ipfs/boxo/pull/1030))
+ - fix(verifcid): enforce size limit for identity CIDs (#1018) ([ipfs/boxo#1018](https://github.com/ipfs/boxo/pull/1018))
+ - docs: boxo logo source files (#1028) ([ipfs/boxo#1028](https://github.com/ipfs/boxo/pull/1028))
+ - feat(gateway): enhance 504 timeout errors with diagnostic UX (#1023) ([ipfs/boxo#1023](https://github.com/ipfs/boxo/pull/1023))
+ - Use `time.Duration` for rebroadcast delay (#1027) ([ipfs/boxo#1027](https://github.com/ipfs/boxo/pull/1027))
+ - refactor(bitswap/client/internal): close session with Close method instead of context (#1011) ([ipfs/boxo#1011](https://github.com/ipfs/boxo/pull/1011))
+ - fix: use %q for logging routing keys with binary data (#1025) ([ipfs/boxo#1025](https://github.com/ipfs/boxo/pull/1025))
+ - rename `retrieval.RetrievalState` to `retrieval.State` (#1026) ([ipfs/boxo#1026](https://github.com/ipfs/boxo/pull/1026))
+ - feat(gateway): add retrieval state tracking for timeout diagnostics (#1015) ([ipfs/boxo#1015](https://github.com/ipfs/boxo/pull/1015))
+ - Nonfunctional changes (#1017) ([ipfs/boxo#1017](https://github.com/ipfs/boxo/pull/1017))
+ - fix: flaky TestCancelOverridesPendingWants (#1016) ([ipfs/boxo#1016](https://github.com/ipfs/boxo/pull/1016))
+ - bitswap/client: GetBlocks cancels session when finished (#1007) ([ipfs/boxo#1007](https://github.com/ipfs/boxo/pull/1007))
+ - Remove unused context ([ipfs/boxo#1006](https://github.com/ipfs/boxo/pull/1006))
+- github.com/ipfs/go-block-format (v0.2.2 -> v0.2.3):
+ - new version (#66) ([ipfs/go-block-format#66](https://github.com/ipfs/go-block-format/pull/66))
+ - Replace CI badge and add GoDoc link in README (#65) ([ipfs/go-block-format#65](https://github.com/ipfs/go-block-format/pull/65))
+- github.com/ipfs/go-datastore (v0.8.3 -> v0.9.0):
+ - new version (#255) ([ipfs/go-datastore#255](https://github.com/ipfs/go-datastore/pull/255))
+ - feat(keytransform): support transaction feature (#239) ([ipfs/go-datastore#239](https://github.com/ipfs/go-datastore/pull/239))
+ - feat: context datastore (#238) ([ipfs/go-datastore#238](https://github.com/ipfs/go-datastore/pull/238))
+ - new version (#254) ([ipfs/go-datastore#254](https://github.com/ipfs/go-datastore/pull/254))
+ - fix comment (#253) ([ipfs/go-datastore#253](https://github.com/ipfs/go-datastore/pull/253))
+ - feat: query iterator (#244) ([ipfs/go-datastore#244](https://github.com/ipfs/go-datastore/pull/244))
+ - Update readme links (#246) ([ipfs/go-datastore#246](https://github.com/ipfs/go-datastore/pull/246))
+- github.com/ipfs/go-ipld-format (v0.6.2 -> v0.6.3):
+ - new version (#100) ([ipfs/go-ipld-format#100](https://github.com/ipfs/go-ipld-format/pull/100))
+ - avoid unnecessary slice allocation (#99) ([ipfs/go-ipld-format#99](https://github.com/ipfs/go-ipld-format/pull/99))
+- github.com/ipfs/go-unixfsnode (v1.10.1 -> v1.10.2):
+ - new version ([ipfs/go-unixfsnode#88](https://github.com/ipfs/go-unixfsnode/pull/88))
+- github.com/ipld/go-car/v2 (v2.14.3 -> v2.15.0):
+ - v2.15.0 bump (#606) ([ipld/go-car#606](https://github.com/ipld/go-car/pull/606))
+ - feat: add NextReader to BlockReader (#603) ([ipld/go-car#603](https://github.com/ipld/go-car/pull/603))
+ - Remove `@masih` form CODEOWNERS ([ipld/go-car#605](https://github.com/ipld/go-car/pull/605))
+- github.com/libp2p/go-libp2p-kad-dht (v0.34.0 -> v0.35.0):
+ - chore: release v0.35.0 (#1162) ([libp2p/go-libp2p-kad-dht#1162](https://github.com/libp2p/go-libp2p-kad-dht/pull/1162))
+ - refactor: adjust FIND_NODE response exceptions (#1158) ([libp2p/go-libp2p-kad-dht#1158](https://github.com/libp2p/go-libp2p-kad-dht/pull/1158))
+ - refactor: remove provider status command (#1157) ([libp2p/go-libp2p-kad-dht#1157](https://github.com/libp2p/go-libp2p-kad-dht/pull/1157))
+ - refactor(provider): closestPeerToPrefix coverage trie (#1156) ([libp2p/go-libp2p-kad-dht#1156](https://github.com/libp2p/go-libp2p-kad-dht/pull/1156))
+ - fix: don't empty mapdatastore keystore on close (#1155) ([libp2p/go-libp2p-kad-dht#1155](https://github.com/libp2p/go-libp2p-kad-dht/pull/1155))
+ - provider: default options (#1153) ([libp2p/go-libp2p-kad-dht#1153](https://github.com/libp2p/go-libp2p-kad-dht/pull/1153))
+ - fix(keystore): use new batch after commit (#1154) ([libp2p/go-libp2p-kad-dht#1154](https://github.com/libp2p/go-libp2p-kad-dht/pull/1154))
+ - provider: more minor fixes (#1152) ([libp2p/go-libp2p-kad-dht#1152](https://github.com/libp2p/go-libp2p-kad-dht/pull/1152))
+ - rename KeyStore -> Keystore (#1151) ([libp2p/go-libp2p-kad-dht#1151](https://github.com/libp2p/go-libp2p-kad-dht/pull/1151))
+ - provider: minor fixes (#1150) ([libp2p/go-libp2p-kad-dht#1150](https://github.com/libp2p/go-libp2p-kad-dht/pull/1150))
+ - buffered provider (#1149) ([libp2p/go-libp2p-kad-dht#1149](https://github.com/libp2p/go-libp2p-kad-dht/pull/1149))
+ - keystore: remove mutex (#1147) ([libp2p/go-libp2p-kad-dht#1147](https://github.com/libp2p/go-libp2p-kad-dht/pull/1147))
+ - provider: ResettableKeyStore (#1146) ([libp2p/go-libp2p-kad-dht#1146](https://github.com/libp2p/go-libp2p-kad-dht/pull/1146))
+ - keystore: revamp (#1142) ([libp2p/go-libp2p-kad-dht#1142](https://github.com/libp2p/go-libp2p-kad-dht/pull/1142))
+ - provider: use synctest for testing time (#1136) ([libp2p/go-libp2p-kad-dht#1136](https://github.com/libp2p/go-libp2p-kad-dht/pull/1136))
+ - provider: connectivity state machine (#1135) ([libp2p/go-libp2p-kad-dht#1135](https://github.com/libp2p/go-libp2p-kad-dht/pull/1135))
+ - provider: minor fixes (#1133) ([libp2p/go-libp2p-kad-dht#1133](https://github.com/libp2p/go-libp2p-kad-dht/pull/1133))
+ - dual: provider (#1132) ([libp2p/go-libp2p-kad-dht#1132](https://github.com/libp2p/go-libp2p-kad-dht/pull/1132))
+ - provider: refresh schedule (#1131) ([libp2p/go-libp2p-kad-dht#1131](https://github.com/libp2p/go-libp2p-kad-dht/pull/1131))
+ - provider: integration tests (#1127) ([libp2p/go-libp2p-kad-dht#1127](https://github.com/libp2p/go-libp2p-kad-dht/pull/1127))
+ - provider: daemon (#1126) ([libp2p/go-libp2p-kad-dht#1126](https://github.com/libp2p/go-libp2p-kad-dht/pull/1126))
+ - provide: handle reprovide (#1125) ([libp2p/go-libp2p-kad-dht#1125](https://github.com/libp2p/go-libp2p-kad-dht/pull/1125))
+ - provider: options (#1124) ([libp2p/go-libp2p-kad-dht#1124](https://github.com/libp2p/go-libp2p-kad-dht/pull/1124))
+ - provider: catchup pending work (#1123) ([libp2p/go-libp2p-kad-dht#1123](https://github.com/libp2p/go-libp2p-kad-dht/pull/1123))
+ - provider: batch reprovide (#1122) ([libp2p/go-libp2p-kad-dht#1122](https://github.com/libp2p/go-libp2p-kad-dht/pull/1122))
+ - provider: batch provide (#1121) ([libp2p/go-libp2p-kad-dht#1121](https://github.com/libp2p/go-libp2p-kad-dht/pull/1121))
+ - provider: swarm exploration (#1120) ([libp2p/go-libp2p-kad-dht#1120](https://github.com/libp2p/go-libp2p-kad-dht/pull/1120))
+ - provider: handleProvide (#1118) ([libp2p/go-libp2p-kad-dht#1118](https://github.com/libp2p/go-libp2p-kad-dht/pull/1118))
+ - provider: schedule (#1117) ([libp2p/go-libp2p-kad-dht#1117](https://github.com/libp2p/go-libp2p-kad-dht/pull/1117))
+ - provider: schedule prefix length (#1116) ([libp2p/go-libp2p-kad-dht#1116](https://github.com/libp2p/go-libp2p-kad-dht/pull/1116))
+ - provider: ProvideStatus interface (#1110) ([libp2p/go-libp2p-kad-dht#1110](https://github.com/libp2p/go-libp2p-kad-dht/pull/1110))
+ - provider: network operations (#1115) ([libp2p/go-libp2p-kad-dht#1115](https://github.com/libp2p/go-libp2p-kad-dht/pull/1115))
+ - provider: adding provide and reprovide queue (#1114) ([libp2p/go-libp2p-kad-dht#1114](https://github.com/libp2p/go-libp2p-kad-dht/pull/1114))
+ - provider: trie allocation helper (#1108) ([libp2p/go-libp2p-kad-dht#1108](https://github.com/libp2p/go-libp2p-kad-dht/pull/1108))
+ - add missing ShortestCoveredPrefix ([libp2p/go-libp2p-kad-dht@d0b110d](https://github.com/libp2p/go-libp2p-kad-dht/commit/d0b110d))
+ - provider: keyspace helpers ([libp2p/go-libp2p-kad-dht@af3ce09](https://github.com/libp2p/go-libp2p-kad-dht/commit/af3ce09))
+ - provider: helpers package rename (#1111) ([libp2p/go-libp2p-kad-dht#1111](https://github.com/libp2p/go-libp2p-kad-dht/pull/1111))
+ - provider: trie region helpers (#1109) ([libp2p/go-libp2p-kad-dht#1109](https://github.com/libp2p/go-libp2p-kad-dht/pull/1109))
+ - provider: PruneSubtrie helper (#1107) ([libp2p/go-libp2p-kad-dht#1107](https://github.com/libp2p/go-libp2p-kad-dht/pull/1107))
+ - provider: NextNonEmptyLeaf trie helper (#1106) ([libp2p/go-libp2p-kad-dht#1106](https://github.com/libp2p/go-libp2p-kad-dht/pull/1106))
+ - provider: find subtrie helper (#1105) ([libp2p/go-libp2p-kad-dht#1105](https://github.com/libp2p/go-libp2p-kad-dht/pull/1105))
+ - provider: helpers trie find prefix (#1104) ([libp2p/go-libp2p-kad-dht#1104](https://github.com/libp2p/go-libp2p-kad-dht/pull/1104))
+ - provider: trie items listing helpers (#1103) ([libp2p/go-libp2p-kad-dht#1103](https://github.com/libp2p/go-libp2p-kad-dht/pull/1103))
+ - provider: add ShortestCoveredPrefix helper (#1102) ([libp2p/go-libp2p-kad-dht#1102](https://github.com/libp2p/go-libp2p-kad-dht/pull/1102))
+ - provider: key helpers (#1101) ([libp2p/go-libp2p-kad-dht#1101](https://github.com/libp2p/go-libp2p-kad-dht/pull/1101))
+ - provider: Connectivity Checker (#1099) ([libp2p/go-libp2p-kad-dht#1099](https://github.com/libp2p/go-libp2p-kad-dht/pull/1099))
+ - provider: SweepingProvider interface (#1098) ([libp2p/go-libp2p-kad-dht#1098](https://github.com/libp2p/go-libp2p-kad-dht/pull/1098))
+ - provider: keystore (#1096) ([libp2p/go-libp2p-kad-dht#1096](https://github.com/libp2p/go-libp2p-kad-dht/pull/1096))
+ - provider initial commit ([libp2p/go-libp2p-kad-dht@70d21a8](https://github.com/libp2p/go-libp2p-kad-dht/commit/70d21a8))
+ - test GCP result order (#1097) ([libp2p/go-libp2p-kad-dht#1097](https://github.com/libp2p/go-libp2p-kad-dht/pull/1097))
+ - refactor: apply suggestions in records (#1113) ([libp2p/go-libp2p-kad-dht#1113](https://github.com/libp2p/go-libp2p-kad-dht/pull/1113))
+- github.com/libp2p/go-libp2p-kbucket (v0.7.0 -> v0.8.0):
+ - chore: release v0.8.0 (#147) ([libp2p/go-libp2p-kbucket#147](https://github.com/libp2p/go-libp2p-kbucket/pull/147))
+ - feat: generic find PeerID with CPL (#145) ([libp2p/go-libp2p-kbucket#145](https://github.com/libp2p/go-libp2p-kbucket/pull/145))
+- github.com/multiformats/go-varint (v0.0.7 -> v0.1.0):
+ - v0.1.0 bump (#29) ([multiformats/go-varint#29](https://github.com/multiformats/go-varint/pull/29))
+ - chore: optimise UvarintSize (#28) ([multiformats/go-varint#28](https://github.com/multiformats/go-varint/pull/28))
+
-### 👨👩👧👦 Contributors
\ No newline at end of file
+### 👨👩👧👦 Contributors
+
+| Contributor | Commits | Lines ± | Files Changed |
+|-------------|---------|---------|---------------|
+| Guillaume Michel | 62 | +15401/-5657 | 209 |
+| Marcin Rataj | 33 | +9540/-1734 | 215 |
+| Andrew Gillis | 29 | +771/-1093 | 70 |
+| Hlib Kanunnikov | 2 | +350/-0 | 5 |
+| Rod Vagg | 3 | +260/-9 | 4 |
+| Hector Sanjuan | 4 | +188/-33 | 11 |
+| Jakub Sztandera | 1 | +67/-15 | 3 |
+| Masih H. Derkani | 1 | +1/-2 | 2 |
+| Dominic Della Valle | 1 | +2/-1 | 1 |
diff --git a/docs/config.md b/docs/config.md
index 6e25814ca88..7982cf7f8da 100644
--- a/docs/config.md
+++ b/docs/config.md
@@ -69,6 +69,7 @@ config file at runtime.
- [`Gateway.MaxConcurrentRequests`](#gatewaymaxconcurrentrequests)
- [`Gateway.HTTPHeaders`](#gatewayhttpheaders)
- [`Gateway.RootRedirect`](#gatewayrootredirect)
+ - [`Gateway.DiagnosticServiceURL`](#gatewaydiagnosticserviceurl)
- [`Gateway.FastDirIndexThreshold`](#gatewayfastdirindexthreshold)
- [`Gateway.Writable`](#gatewaywritable)
- [`Gateway.PathPrefixes`](#gatewaypathprefixes)
@@ -124,6 +125,18 @@ config file at runtime.
- [`Pinning.RemoteServices: Policies.MFS.Enabled`](#pinningremoteservices-policiesmfsenabled)
- [`Pinning.RemoteServices: Policies.MFS.PinName`](#pinningremoteservices-policiesmfspinname)
- [`Pinning.RemoteServices: Policies.MFS.RepinInterval`](#pinningremoteservices-policiesmfsrepininterval)
+ - [`Provide`](#provide)
+ - [`Provide.Enabled`](#provideenabled)
+ - [`Provide.Strategy`](#providestrategy)
+ - [`Provide.DHT`](#providedht)
+ - [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers)
+ - [`Provide.DHT.Interval`](#providedhtinterval)
+ - [`Provide.DHT.SweepEnabled`](#providedhtsweepenabled)
+ - [`Provide.DHT.DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers)
+ - [`Provide.DHT.DedicatedBurstWorkers`](#providedhtdedicatedburstworkers)
+ - [`Provide.DHT.MaxProvideConnsPerWorker`](#providedhtmaxprovideconnsperworker)
+ - [`Provide.DHT.KeystoreBatchSize`](#providedhtkeystorebatchsize)
+ - [`Provide.DHT.OfflineDelay`](#providedhtofflinedelay)
- [`Provider`](#provider)
- [`Provider.Enabled`](#providerenabled)
- [`Provider.Strategy`](#providerstrategy)
@@ -138,7 +151,7 @@ config file at runtime.
- [`Peering.Peers`](#peeringpeers)
- [`Reprovider`](#reprovider)
- [`Reprovider.Interval`](#reproviderinterval)
- - [`Reprovider.Strategy`](#reproviderstrategy)
+ - [`Reprovider.Strategy`](#providestrategy)
- [`Routing`](#routing)
- [`Routing.Type`](#routingtype)
- [`Routing.AcceleratedDHTClient`](#routingaccelerateddhtclient)
@@ -269,8 +282,8 @@ the local [Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`)
Supported Transports:
-* tcp/ip{4,6} - `/ipN/.../tcp/...`
-* unix - `/unix/path/to/socket`
+- tcp/ip{4,6} - `/ipN/.../tcp/...`
+- unix - `/unix/path/to/socket`
> [!CAUTION]
> **NEVER EXPOSE UNPROTECTED ADMIN RPC TO LAN OR THE PUBLIC INTERNET**
@@ -297,8 +310,8 @@ the local [HTTP gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs`, `/ipn
Supported Transports:
-* tcp/ip{4,6} - `/ipN/.../tcp/...`
-* unix - `/unix/path/to/socket`
+- tcp/ip{4,6} - `/ipN/.../tcp/...`
+- unix - `/unix/path/to/socket`
> [!CAUTION]
> **SECURITY CONSIDERATIONS FOR GATEWAY EXPOSURE**
@@ -321,10 +334,10 @@ connections.
Supported Transports:
-* tcp/ip{4,6} - `/ipN/.../tcp/...`
-* websocket - `/ipN/.../tcp/.../ws`
-* quicv1 (RFC9000) - `/ipN/.../udp/.../quic-v1` - can share the same two tuple with `/quic-v1/webtransport`
-* webtransport `/ipN/.../udp/.../quic-v1/webtransport` - can share the same two tuple with `/quic-v1`
+- tcp/ip{4,6} - `/ipN/.../tcp/...`
+- websocket - `/ipN/.../tcp/.../ws`
+- quicv1 (RFC9000) - `/ipN/.../udp/.../quic-v1` - can share the same two tuple with `/quic-v1/webtransport`
+- webtransport `/ipN/.../udp/.../quic-v1/webtransport` - can share the same two tuple with `/quic-v1`
> [!IMPORTANT]
> Make sure your firewall rules allow incoming connections on both TCP and UDP ports defined here.
@@ -333,6 +346,7 @@ Supported Transports:
Note that quic (Draft-29) used to be supported with the format `/ipN/.../udp/.../quic`, but has since been [removed](https://github.com/libp2p/go-libp2p/releases/tag/v0.30.0).
Default:
+
```json
[
"/ip4/0.0.0.0/tcp/4001",
@@ -388,6 +402,7 @@ Contains information used by the [Kubo RPC API](https://docs.ipfs.tech/reference
Map of HTTP headers to set on responses from the RPC (`/api/v0`) HTTP server.
Example:
+
```json
{
"Foo": ["bar"]
@@ -499,11 +514,11 @@ the rest of the internet.
When unset (default), the AutoNAT service defaults to _enabled_. Otherwise, this
field can take one of two values:
-* `enabled` - Enable the V1+V2 service (unless the node determines that it,
+- `enabled` - Enable the V1+V2 service (unless the node determines that it,
itself, isn't reachable by the public internet).
-* `legacy-v1` - **DEPRECATED** Same as `enabled` but only V1 service is enabled. Used for testing
+- `legacy-v1` - **DEPRECATED** Same as `enabled` but only V1 service is enabled. Used for testing
during as few releases as we [transition to V2](https://github.com/ipfs/kubo/issues/10091), will be removed in the future.
-* `disabled` - Disable the service.
+- `disabled` - Disable the service.
Additional modes may be added in the future.
@@ -607,6 +622,7 @@ AutoConf can resolve `"auto"` placeholders in the following configuration fields
AutoConf supports path-based routing URLs that automatically enable specific routing operations based on the URL path. This allows precise control over which HTTP Routing V1 endpoints are used for different operations:
**Supported paths:**
+
- `/routing/v1/providers` - Enables provider record lookups only
- `/routing/v1/peers` - Enables peer routing lookups only
- `/routing/v1/ipns` - Enables IPNS record operations only
@@ -635,6 +651,7 @@ AutoConf supports path-based routing URLs that automatically enable specific rou
```
**Node type categories:**
+
- `mainnet-for-nodes-with-dht`: Mainnet nodes with DHT enabled (typically only need additional provider lookups)
- `mainnet-for-nodes-without-dht`: Mainnet nodes without DHT (need comprehensive routing services)
- `mainnet-for-ipns-publishers-with-http`: Mainnet nodes that publish IPNS records via HTTP
@@ -809,7 +826,6 @@ Default: [certmagic.LetsEncryptProductionCA](https://pkg.go.dev/github.com/caddy
Type: `optionalString`
-
## `Bitswap`
High level client and server configuration of the [Bitswap Protocol](https://specs.ipfs.tech/bitswap-protocol/) over libp2p.
@@ -848,6 +864,7 @@ Bootstrap peers help your node discover and connect to the IPFS network when sta
The special value `"auto"` automatically uses curated, up-to-date bootstrap peers from [AutoConf](#autoconf), ensuring your node can always connect to the healthy network without manual maintenance.
**What this gives you:**
+
- **Reliable startup**: Your node can always find the network, even if some bootstrap peers go offline
- **Automatic updates**: New bootstrap peers are added as the network evolves
- **Custom control**: Add your own trusted peers alongside or instead of the defaults
@@ -950,7 +967,7 @@ cache, which caches block-cids and their block-sizes. Use `0` to disable.
This cache, once primed, can greatly speed up operations like `ipfs repo stat`
as there is no need to read full blocks to know their sizes. Size should be
-adjusted depending on the number of CIDs on disk (`NumObjects in `ipfs repo stat`).
+adjusted depending on the number of CIDs on disk (`NumObjects in`ipfs repo stat`).
Default: `65536` (64KiB)
@@ -966,6 +983,7 @@ datastores to provide extra functionality (eg metrics, logging, or caching).
> For more information on possible values for this configuration option, see [`kubo/docs/datastores.md`](datastores.md)
Default:
+
```
{
"mounts": [
@@ -990,6 +1008,7 @@ Default:
```
With `flatfs-measure` profile:
+
```
{
"mounts": [
@@ -1050,7 +1069,7 @@ Toggle and configure experimental features of Kubo. Experimental features are li
Options for the HTTP gateway.
-**NOTE:** support for `/api/v0` under the gateway path is now deprecated. It will be removed in future versions: https://github.com/ipfs/kubo/issues/10312.
+**NOTE:** support for `/api/v0` under the gateway path is now deprecated. It will be removed in future versions: .
### `Gateway.NoFetch`
@@ -1075,7 +1094,7 @@ Type: `bool`
An optional flag to explicitly configure whether this gateway responds to deserialized
requests, or not. By default, it is enabled. When disabling this option, the gateway
-operates as a Trustless Gateway only: https://specs.ipfs.tech/http-gateways/trustless-gateway/.
+operates as a Trustless Gateway only: .
Default: `true`
@@ -1114,10 +1133,12 @@ Type: `flag`
Maximum duration Kubo will wait for content retrieval (new bytes to arrive).
**Timeout behavior:**
+
- **Time to first byte**: Returns 504 Gateway Timeout if the gateway cannot start writing within this duration (e.g., stuck searching for providers)
- **Time between writes**: After first byte, timeout resets with each write. Response terminates if no new data can be written within this duration
**Truncation handling:** When timeout occurs after HTTP 200 headers are sent (e.g., during CAR streams), the gateway:
+
- Appends error message to indicate truncation
- Forces TCP reset (RST) to prevent caching incomplete responses
- Records in metrics with original status code and `truncated=true` flag
@@ -1125,9 +1146,10 @@ Maximum duration Kubo will wait for content retrieval (new bytes to arrive).
**Monitoring:** Track `ipfs_http_gw_retrieval_timeouts_total` by status code and truncation status.
**Tuning guidance:**
+
- Compare timeout rates (`ipfs_http_gw_retrieval_timeouts_total`) with success rates (`ipfs_http_gw_responses_total{status="200"}`)
- High timeout rate: consider increasing timeout or scaling horizontally if hardware is constrained
-- Many 504s may indicate routing problems - check requested CIDs and provider availability using https://check.ipfs.network/
+- Many 504s may indicate routing problems - check requested CIDs and provider availability using
- `truncated=true` timeouts indicate retrieval stalled mid-file with no new bytes for the timeout duration
A value of 0 disables this timeout.
@@ -1145,6 +1167,7 @@ Protects nodes from traffic spikes and resource exhaustion, especially behind re
**Monitoring:** `ipfs_http_gw_concurrent_requests` tracks current requests in flight.
**Tuning guidance:**
+
- Monitor `ipfs_http_gw_concurrent_requests` gauge for usage patterns
- Track 429s (`ipfs_http_gw_responses_total{status="429"}`) and success rate (`{status="200"}`)
- Near limit with low resource usage → increase value
@@ -1174,6 +1197,16 @@ Default: `""`
Type: `string` (url)
+### `Gateway.DiagnosticServiceURL`
+
+URL for a service to diagnose CID retrievability issues. When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID" button will be shown that links to this service with the CID appended as `?cid=`.
+
+Set to empty string to disable the button.
+
+Default: `"https://check.ipfs.network"`
+
+Type: `optionalstring` (url)
+
### `Gateway.FastDirIndexThreshold`
**REMOVED**: this option is [no longer necessary](https://github.com/ipfs/kubo/pull/9481). Ignored since [Kubo 0.18](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.18.md).
@@ -1206,6 +1239,7 @@ or limit `verifiable.example.net` to response types defined in [Trustless Gatewa
Hostnames can optionally be defined with one or more wildcards.
Examples:
+
- `*.example.com` will match requests to `http://foo.example.com/ipfs/*` or `http://{cid}.ipfs.bar.example.com/*`.
- `foo-*.example.com` will match requests to `http://foo-bar.example.com/ipfs/*` or `http://{cid}.ipfs.foo-xyz.example.com/*`.
@@ -1214,6 +1248,7 @@ Examples:
An array of paths that should be exposed on the hostname.
Example:
+
```json
{
"Gateway": {
@@ -1240,8 +1275,9 @@ and provide [Origin isolation](https://developer.mozilla.org/en-US/docs/Web/Secu
between content roots.
- `true` - enables [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://*.{hostname}/`
- - **Requires whitelist:** make sure respective `Paths` are set.
+ - **Requires whitelist:** make sure respective `Paths` are set.
For example, `Paths: ["/ipfs", "/ipns"]` are required for `http://{cid}.ipfs.{hostname}` and `http://{foo}.ipns.{hostname}` to work:
+
```json
"Gateway": {
"PublicGateways": {
@@ -1252,10 +1288,12 @@ between content roots.
}
}
```
- - **Backward-compatible:** requests for content paths such as `http://{hostname}/ipfs/{cid}` produce redirect to `http://{cid}.ipfs.{hostname}`
+
+ - **Backward-compatible:** requests for content paths such as `http://{hostname}/ipfs/{cid}` produce redirect to `http://{cid}.ipfs.{hostname}`
- `false` - enables [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://{hostname}/*`
- Example:
+
```json
"Gateway": {
"PublicGateways": {
@@ -1294,7 +1332,7 @@ into a single DNS label ([specification](https://specs.ipfs.tech/http-gateways/s
DNSLink name inlining allows for HTTPS on public subdomain gateways with single
label wildcard TLS certs (also enabled when passing `X-Forwarded-Proto: https`),
and provides disjoint Origin per root CID when special rules like
-https://publicsuffix.org, or a custom localhost logic in browsers like Brave
+, or a custom localhost logic in browsers like Brave
has to be applied.
Default: `false`
@@ -1321,6 +1359,7 @@ Type: `flag`
Default entries for `localhost` hostname and loopback IPs are always present.
If additional config is provided for those hostnames, it will be merged on top of implicit values:
+
```json
{
"Gateway": {
@@ -1340,14 +1379,15 @@ For example, to disable subdomain gateway on `localhost`
and make that hostname act the same as `127.0.0.1`:
```console
-$ ipfs config --json Gateway.PublicGateways '{"localhost": null }'
+ipfs config --json Gateway.PublicGateways '{"localhost": null }'
```
### `Gateway` recipes
Below is a list of the most common gateway setups.
-* Public [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://{cid}.ipfs.dweb.link` (each content root gets its own Origin)
+- Public [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://{cid}.ipfs.dweb.link` (each content root gets its own Origin)
+
```console
$ ipfs config --json Gateway.PublicGateways '{
"dweb.link": {
@@ -1356,23 +1396,24 @@ Below is a list of the most common gateway setups.
}
}'
```
- - **Performance:** consider running with `Routing.AcceleratedDHTClient=true` and either `Provider.Enabled=false` (avoid providing newly retrieved blocks) or `Provider.WorkerCount=0` (provide as fast as possible, at the cost of increased load)
- - **Backward-compatible:** this feature enables automatic redirects from content paths to subdomains:
+
+ - **Performance:** Consider enabling `Routing.AcceleratedDHTClient=true` to improve content routing lookups. Separately, gateway operators should decide if the gateway node should also co-host and provide (announce) fetched content to the DHT. If providing content, enable `Provide.DHT.SweepEnabled=true` for efficient announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`. For a read-only gateway that doesn't announce content, use `Provide.Enabled=false`.
+ - **Backward-compatible:** this feature enables automatic redirects from content paths to subdomains:
`http://dweb.link/ipfs/{cid}` → `http://{cid}.ipfs.dweb.link`
- - **X-Forwarded-Proto:** if you run Kubo behind a reverse proxy that provides TLS, make it add a `X-Forwarded-Proto: https` HTTP header to ensure users are redirected to `https://`, not `http://`. It will also ensure DNSLink names are inlined to fit in a single DNS label, so they work fine with a wildcard TLS cert ([details](https://github.com/ipfs/in-web-browsers/issues/169)). The NGINX directive is `proxy_set_header X-Forwarded-Proto "https";`.:
+ - **X-Forwarded-Proto:** if you run Kubo behind a reverse proxy that provides TLS, make it add a `X-Forwarded-Proto: https` HTTP header to ensure users are redirected to `https://`, not `http://`. It will also ensure DNSLink names are inlined to fit in a single DNS label, so they work fine with a wildcard TLS cert ([details](https://github.com/ipfs/in-web-browsers/issues/169)). The NGINX directive is `proxy_set_header X-Forwarded-Proto "https";`.:
`http://dweb.link/ipfs/{cid}` → `https://{cid}.ipfs.dweb.link`
`http://dweb.link/ipns/your-dnslink.site.example.com` → `https://your--dnslink-site-example-com.ipfs.dweb.link`
- - **X-Forwarded-Host:** we also support `X-Forwarded-Host: example.com` if you want to override subdomain gateway host from the original request:
+ - **X-Forwarded-Host:** we also support `X-Forwarded-Host: example.com` if you want to override subdomain gateway host from the original request:
`http://dweb.link/ipfs/{cid}` → `http://{cid}.ipfs.example.com`
+- Public [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://ipfs.io/ipfs/{cid}` (no Origin separation)
-* Public [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://ipfs.io/ipfs/{cid}` (no Origin separation)
```console
$ ipfs config --json Gateway.PublicGateways '{
"ipfs.io": {
@@ -1381,15 +1422,18 @@ Below is a list of the most common gateway setups.
}
}'
```
- - **Performance:** when running an open, recursive gateway consider running with `Routing.AcceleratedDHTClient=true` and either `Provider.Enabled=false` (avoid providing newly retrieved blocks) or `Provider.WorkerCount=0` (provide as fast as possible, at the cost of increased load)
-* Public [DNSLink](https://dnslink.io/) gateway resolving every hostname passed in `Host` header.
+ - **Performance:** Consider enabling `Routing.AcceleratedDHTClient=true` to improve content routing lookups. When running an open, recursive gateway, decide if the gateway should also co-host and provide (announce) fetched content to the DHT. If providing content, enable `Provide.DHT.SweepEnabled=true` for efficient announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`. For a read-only gateway that doesn't announce content, use `Provide.Enabled=false`.
+
+- Public [DNSLink](https://dnslink.io/) gateway resolving every hostname passed in `Host` header.
+
```console
- $ ipfs config --json Gateway.NoDNSLink false
+ ipfs config --json Gateway.NoDNSLink false
```
- * Note that `NoDNSLink: false` is the default (it works out of the box unless set to `true` manually)
-* Hardened, site-specific [DNSLink gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#dnslink-gateway).
+ - Note that `NoDNSLink: false` is the default (it works out of the box unless set to `true` manually)
+
+- Hardened, site-specific [DNSLink gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#dnslink-gateway).
Disable fetching of remote data (`NoFetch: true`) and resolving DNSLink at unknown hostnames (`NoDNSLink: true`).
Then, enable DNSLink gateway only for the specific hostname (for which data
@@ -1576,6 +1620,47 @@ Type: `flag`
**MOVED:** see [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold)
+### `Internal.MFSNoFlushLimit`
+
+Controls the maximum number of consecutive MFS operations allowed with `--flush=false`
+before requiring a manual flush. This prevents unbounded memory growth and ensures
+data consistency when using deferred flushing with `ipfs files` commands.
+
+When the limit is reached, further operations will fail with an error message
+instructing the user to run `ipfs files flush`, use `--flush=true`, or increase
+this limit in the configuration.
+
+**Why operations fail instead of auto-flushing:** Automatic flushing once the limit
+is reached was considered but rejected because it can lead to data corruption issues
+that are difficult to debug. When the system decides to flush without user knowledge, it can:
+
+- Create partial states that violate user expectations about atomicity
+- Interfere with concurrent operations in unexpected ways
+- Make debugging and recovery much harder when issues occur
+
+By failing explicitly, users maintain control over when their data is persisted,
+allowing them to:
+
+- Batch related operations together before flushing
+- Handle errors predictably at natural transaction boundaries
+- Understand exactly when and why their data is written to disk
+
+If you expect automatic flushing behavior, simply use the default `--flush=true`
+(or omit the flag entirely) instead of `--flush=false`.
+
+**⚠️ WARNING:** Increasing this limit or disabling it (setting to 0) can lead to:
+
+- **Out-of-memory errors (OOM)** - Each unflushed operation consumes memory
+- **Data loss** - If the daemon crashes before flushing, all unflushed changes are lost
+- **Degraded performance** - Large unflushed caches slow down MFS operations
+
+Default: `256`
+
+Type: `optionalInteger` (0 disables the limit, strongly discouraged)
+
+**Note:** This is an EXPERIMENTAL feature and may change or be removed in future releases.
+See [#10842](https://github.com/ipfs/kubo/issues/10842) for more information.
+
## `Ipns`
### `Ipns.RepublishPeriod`
@@ -1619,9 +1704,10 @@ When `Ipns.MaxCacheTTL` is set, it defines the upper bound limit of how long a
will be cached and read from cache before checking for updates.
**Examples:**
-* `"1m"` IPNS results are cached 1m or less (good compromise for system where
+
+- `"1m"` IPNS results are cached 1m or less (good compromise for system where
faster updates are desired).
-* `"0s"` IPNS caching is effectively turned off (useful for testing, bad for production use)
+- `"0s"` IPNS caching is effectively turned off (useful for testing, bad for production use)
- **Note:** setting this to `0` will turn off TTL-based caching entirely.
This is discouraged in production environments. It will make IPNS websites
artificially slow because IPNS resolution results will expire as soon as
@@ -1631,7 +1717,6 @@ will be cached and read from cache before checking for updates.
Default: No upper bound, [TTL from IPNS Record](https://specs.ipfs.tech/ipns/ipns-record/#ttl-uint64) (see `ipns name publish --help`) is always respected.
-
Type: `optionalDuration`
### `Ipns.UsePubsub`
@@ -1721,6 +1806,7 @@ Type: `string` (filesystem path)
Mountpoint for Mutable File System (MFS) behind the `ipfs files` API.
> [!CAUTION]
+>
> - Write support is highly experimental and not recommended for mission-critical deployments.
> - Avoid storing lazy-loaded datasets in MFS. Exposing a partially local, lazy-loaded DAG risks operating system search indexers crawling it, which may trigger unintended network prefetching of non-local DAG components.
@@ -1745,13 +1831,14 @@ A remote pinning service is a remote service that exposes an API for managing
that service's interest in long-term data storage.
The exposed API conforms to the specification defined at
-https://ipfs.github.io/pinning-services-api-spec/
+
#### `Pinning.RemoteServices: API`
Contains information relevant to utilizing the remote pinning service
Example:
+
```json
{
"Pinning": {
@@ -1771,7 +1858,7 @@ Example:
The HTTP(S) endpoint through which to access the pinning service
-Example: "https://pinningservice.tld:1234/my/api/path"
+Example: ""
Type: `string`
@@ -1821,36 +1908,117 @@ Default: `"5m"`
Type: `duration`
-## `Provider`
+## `Provide`
-Configuration applied to the initial one-time announcement of fresh CIDs
-created with `ipfs add`, `ipfs files`, `ipfs dag import`, `ipfs block|dag put`
-commands.
+Configures CID announcements to the routing system, including both immediate
+announcements for new content (provide) and periodic re-announcements
+(reprovide) on systems that require it, like Amino DHT. While designed to support
+multiple routing systems in the future, the current default configuration only supports providing to the Amino DHT.
-For periodical DHT reprovide settings, see [`Reprovide.*`](#reprovider).
+### `Provide.Enabled`
-### `Provider.Enabled`
-
-Controls whether Kubo provider and reprovide systems are enabled.
+Controls whether Kubo provide and reprovide systems are enabled.
> [!CAUTION]
-> Disabling this, will disable BOTH `Provider` system for new CIDs
-> and the periodical reprovide ([`Reprovider.Interval`](#reprovider)) of old CIDs.
+> Disabling this will prevent other nodes from discovering your content.
+> Your node will stop announcing data to the routing system, making it
+> inaccessible unless peers connect to you directly.
Default: `true`
Type: `flag`
-### `Provider.Strategy`
+### `Provide.Strategy`
-Legacy, not used at the moment, see [`Reprovider.Strategy`](#reproviderstrategy) instead.
+Tells the provide system what should be announced. Valid strategies are:
-### `Provider.WorkerCount`
+- `"all"` - announce all CIDs of stored blocks
+- `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks)
+ - Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins
+- `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`)
+ - **⚠️ BE CAREFUL:** node with `roots` strategy will not announce child blocks.
+ It makes sense only for use cases where the entire DAG is fetched in full,
+ and a graceful resume does not have to be guaranteed: the lack of child
+ announcements means an interrupted retrieval won't be able to find
+ providers for the missing block in the middle of a file, unless the peer
+ happens to already be connected to a provider and asks for child CID over
+ bitswap.
+- `"mfs"` - announce only the local CIDs that are part of the MFS (`ipfs files`)
+ - Note: MFS is lazy-loaded. Only the MFS blocks present in local datastore are announced.
+- `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies.
+ - **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache.
+ - Order: first `pinned` and then the locally available part of `mfs`.
-Sets the maximum number of _concurrent_ DHT provide operations (announcement of new CIDs).
+**Strategy changes automatically clear the provide queue.** When you change `Provide.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`.
+
+**Memory requirements:**
+
+- Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million CIDs for reproviding to the Amino DHT.
+- This is due to the use of a buffered provider, which loads all CIDs into memory to avoid holding a lock on the entire pinset during the reprovide cycle.
+
+Default: `"all"`
+
+Type: `optionalString` (unset for the default)
-[`Reprovider`](#reprovider) operations do **not** count against this limit.
-A value of `0` allows an unlimited number of provide workers.
+### `Provide.DHT`
+
+Configuration for providing data to Amino DHT peers.
+
+#### Monitoring Provide Operations
+
+You can monitor the effectiveness of your provide configuration through metrics exposed at the Prometheus endpoint: `{Addresses.API}/debug/metrics/prometheus` (default: `http://127.0.0.1:5001/debug/metrics/prometheus`).
+
+Different metrics are available depending on whether you use legacy mode (`SweepEnabled=false`) or sweep mode (`SweepEnabled=true`). See [Provide metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide) for details.
+
+To enable detailed debug logging for both providers, set:
+
+```sh
+GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug
+```
+
+- `provider=debug` enables generic logging (legacy provider and any non-dht operations)
+- `dht/provider=debug` enables logging for the sweep provider
+
+#### `Provide.DHT.Interval`
+
+Sets how often to re-announce content to the DHT. Provider records on Amino DHT
+expire after [`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43),
+also known as Provider Record Expiration Interval.
+
+An interval of about half the expiration window ensures provider records
+are refreshed well before they expire. This keeps your content continuously
+discoverable accounting for network churn without overwhelming the network with too frequent announcements.
+
+- If unset, it uses the implicit safe default.
+- If set to the value `"0"` it will disable content reproviding to DHT.
+
+> [!CAUTION]
+> Disabling this will prevent other nodes from discovering your content via the DHT.
+> Your node will stop announcing data to the DHT, making it
+> inaccessible unless peers connect to you directly. Since provider
+> records expire after `amino.DefaultProvideValidity`, your content will become undiscoverable
+> after this period.
+
+Default: `22h`
+
+Type: `optionalDuration` (unset for the default)
+
+#### `Provide.DHT.MaxWorkers`
+
+Sets the maximum number of _concurrent_ DHT provide operations.
+
+**When `Provide.DHT.SweepEnabled` is false (legacy mode):**
+
+- Controls NEW CID announcements only
+- Reprovide operations do **not** count against this limit
+- A value of `0` allows unlimited provide workers
+
+**When `Provide.DHT.SweepEnabled` is true:**
+
+- Controls the total worker pool for both provide and reprovide operations
+- Workers are split between periodic reprovides and burst provides
+- Use a positive value to control resource usage
+- See [`DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers) and [`DedicatedBurstWorkers`](#providedhtdedicatedburstworkers) for task allocation
If the [accelerated DHT client](#routingaccelerateddhtclient) is enabled, each
provide operation opens ~20 connections in parallel. With the standard DHT
@@ -1861,16 +2029,202 @@ connections this setting can generate.
> [!CAUTION]
> For nodes without strict connection limits that need to provide large volumes
-> of content immediately, we recommend enabling the `Routing.AcceleratedDHTClient` and
-> setting `Provider.WorkerCount` to `0` (unlimited).
+> of content, we recommend first trying `Provide.DHT.SweepEnabled=true` for efficient
+> announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`.
+> As a last resort, consider enabling `Routing.AcceleratedDHTClient=true` but be aware that it is very resource hungry.
>
> At the same time, mind that raising this value too high may lead to increased load.
> Proceed with caution, ensure proper hardware and networking are in place.
+> [!TIP]
+> **When `SweepEnabled` is true:** Users providing millions of CIDs or more
+> should increase the worker count accordingly. Underprovisioning can lead to
+> slow provides (burst workers) and inability to keep up with content
+> reproviding (periodic workers). For nodes with sufficient resources (CPU,
+> bandwidth, number of connections), dedicating `1024` for [periodic
+> workers](#providedhtdedicatedperiodicworkers) and `512` for [burst
+> workers](#providedhtdedicatedburstworkers), and `2048` [max
+> workers](#providedhtmaxworkers) should be adequate even for the largest
+> users. The system will only use workers as needed - unused resources won't be
+> consumed. Ensure you adjust the swarm [connection manager](#swarmconnmgr) and
+> [resource manager](#swarmresourcemgr) configuration accordingly.
+
Default: `16`
Type: `optionalInteger` (non-negative; `0` means unlimited number of workers)
+#### `Provide.DHT.SweepEnabled`
+
+Whether Provide Sweep is enabled. If not enabled, the legacy
+[`boxo/provider`](https://github.com/ipfs/boxo/tree/main/provider) is used for
+both provides and reprovides.
+
+Provide Sweep is a resource efficient technique for advertising content to
+the Amino DHT swarm. The Provide Sweep module tracks the keys that should be periodically reprovided in
+the `Keystore`. It splits the keys into DHT keyspace regions by proximity (XOR
+distance), and schedules when reprovides should happen in order to spread the
+reprovide operation over time to avoid a spike in resource utilization. It
+basically sweeps the keyspace _from left to right_ over the
+[`Provide.DHT.Interval`](#providedhtinterval) time period, and reprovides keys
+matching to the visited keyspace region.
+
+Provide Sweep aims at replacing the inefficient legacy `boxo/provider`
+module, and is currently opt-in. You can compare the effectiveness of sweep mode vs legacy mode by monitoring the appropriate metrics (see [Monitoring Provide Operations](#monitoring-provide-operations) above).
+
+Whenever new keys should be advertised to the Amino DHT, `kubo` calls
+`StartProviding()`, triggering an initial `provide` operation for the given
+keys. The keys will be added to the `Keystore` tracking which keys should be
+reprovided and when they should be reprovided. Calling `StopProviding()`
+removes the keys from the `Keystore`. However, it is currently tricky for
+`kubo` to detect when a key should stop being advertised. Hence, `kubo` will
+periodically refresh the `Keystore` at each [`Provide.DHT.Interval`](#providedhtinterval)
+by providing it a channel of all the keys it is expected to contain according
+to the [`Provide.Strategy`](#providestrategy). During this operation,
+all keys in the `Keystore` are purged, and only the given ones remain scheduled.
+
+>
+>
+>
+>
+>
+>
+> The diagram above visualizes the performance patterns:
+>
+> - **Legacy mode**: Individual (slow) provides per CID, can struggle with large datasets
+> - **Sweep mode**: Even distribution matching the keyspace sweep described with low resource usage
+> - **Accelerated DHT**: Hourly traffic spikes with high resource usage
+>
+> Sweep mode provides similar effectiveness to Accelerated DHT but with steady resource usage - better for machines with limited CPU, memory, or network bandwidth.
+
+> [!NOTE]
+> This feature is opt-in for now, but will become the default in a future release.
+> Eventually, this configuration flag will be removed once the feature is stable.
+
+Default: `false`
+
+Type: `flag`
+
+#### `Provide.DHT.DedicatedPeriodicWorkers`
+
+Number of workers dedicated to periodic keyspace region reprovides. Only applies when `Provide.DHT.SweepEnabled` is true.
+
+Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this
+number of workers will be dedicated to the periodic region reprovide only. The sum of
+`DedicatedPeriodicWorkers` and `DedicatedBurstWorkers` should not exceed `MaxWorkers`.
+Any remaining workers (MaxWorkers - DedicatedPeriodicWorkers - DedicatedBurstWorkers)
+form a shared pool that can be used for either type of work as needed.
+
+> [!NOTE]
+> If the provider system isn't able to keep up with reproviding all your
+> content within the [Provide.DHT.Interval](#providedhtinterval), consider
+> increasing this value.
+
+Default: `2`
+
+Type: `optionalInteger` (`0` means there are no dedicated workers, but the
+operation can be performed by free non-dedicated workers)
+
+#### `Provide.DHT.DedicatedBurstWorkers`
+
+Number of workers dedicated to burst provides. Only applies when `Provide.DHT.SweepEnabled` is true.
+
+Burst provides are triggered by:
+
+- Manual provide commands (`ipfs routing provide`)
+- New content matching your `Provide.Strategy` (blocks from `ipfs add`, bitswap, or trustless gateway requests)
+- Catch-up reprovides after being disconnected/offline for a while
+
+Having dedicated burst workers ensures that bulk operations (like adding many CIDs
+or reconnecting to the network) don't delay regular periodic reprovides, and vice versa.
+
+Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this
+number of workers will be dedicated to burst provides only. In addition to
+these, if there are available workers in the pool, they can also be used for
+burst provides.
+
+> [!NOTE]
+> If CIDs aren't provided quickly enough to your taste, and you can afford more
+> CPU and bandwidth, consider increasing this value.
+
+Default: `1`
+
+Type: `optionalInteger` (`0` means there are no dedicated workers, but the
+operation can be performed by free non-dedicated workers)
+
+#### `Provide.DHT.MaxProvideConnsPerWorker`
+
+Maximum number of connections that a single worker can use to send provider
+records over the network.
+
+When reproviding CIDs corresponding to a keyspace region, the reprovider must
+send a provider record to the 20 closest peers to the CID (in XOR distance) for
+each CID belonging to this keyspace region.
+
+The reprovider opens a connection to a peer from that region, sends it all its
+allocated provider records. Once done, it opens a connection to the next peer
+from that keyspace region until all provider records are assigned.
+
+This option defines how many such connections can be open concurrently by a
+single worker.
+
+Default: `16`
+
+Type: `optionalInteger` (non-negative)
+
+#### `Provide.DHT.KeystoreBatchSize`
+
+During the garbage collection, all keys stored in the Keystore are removed, and
+the keys are streamed from a channel to fill the Keystore again with up-to-date
+keys. Since a high number of CIDs to reprovide can easily fill up the memory,
+keys are read and written in batches to optimize for memory usage.
+
+This option defines how many multihashes should be contained within a batch. A
+multihash is usually represented by 34 bytes.
+
+Default: `16384` (~544 KiB per batch)
+
+Type: `optionalInteger` (non-negative)
+
+#### `Provide.DHT.OfflineDelay`
+
+The `SweepingProvider` has 3 states: `ONLINE`, `DISCONNECTED` and `OFFLINE`. It
+starts `OFFLINE`, and as the node bootstraps, it changes its state to `ONLINE`.
+
+When the provider loses connection to all DHT peers, it switches to the
+`DISCONNECTED` state. In this state, new provides will be added to the provide
+queue, and provided as soon as the node comes back online.
+
+After a node has been `DISCONNECTED` for `OfflineDelay`, it goes to `OFFLINE`
+state. When `OFFLINE`, the provider drops the provide queue, and returns errors
+to new provide requests. However, when `OFFLINE` the provider still adds the
+keys to its state, so keys will eventually be provided in the
+[`Provide.DHT.Interval`](#providedhtinterval) after the provider comes back
+`ONLINE`.
+
+Default: `2h`
+
+Type: `optionalDuration`
+
+## `Provider`
+
+### `Provider.Enabled`
+
+**REMOVED**
+
+Replaced with [`Provide.Enabled`](#provideenabled).
+
+### `Provider.Strategy`
+
+**REMOVED**
+
+This field was unused. Use [`Provide.Strategy`](#providestrategy) instead.
+
+### `Provider.WorkerCount`
+
+**REMOVED**
+
+Replaced with [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers).
+
## `Pubsub`
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
@@ -1895,9 +2249,9 @@ Type: `flag`
Sets the default router used by pubsub to route messages to peers. This can be one of:
-* `"floodsub"` - floodsub is a basic router that simply _floods_ messages to all
+- `"floodsub"` - floodsub is a basic router that simply _floods_ messages to all
connected peers. This router is extremely inefficient but _very_ reliable.
-* `"gossipsub"` - [gossipsub][] is a more advanced routing algorithm that will
+- `"gossipsub"` - [gossipsub][] is a more advanced routing algorithm that will
build an overlay mesh from a subset of the links in the network.
Default: `"gossipsub"`
@@ -1976,11 +2330,11 @@ improve reliability.
Use-cases:
-* An IPFS gateway connected to an IPFS cluster should peer to ensure that the
+- An IPFS gateway connected to an IPFS cluster should peer to ensure that the
gateway can always fetch content from the cluster.
-* A dapp may peer embedded Kubo nodes with a set of pinning services or
+- A dapp may peer embedded Kubo nodes with a set of pinning services or
textile cafes/hubs.
-* A set of friends may peer to ensure that they can always fetch each other's
+- A set of friends may peer to ensure that they can always fetch each other's
content.
When a node is added to the set of peered nodes, Kubo will:
@@ -1996,9 +2350,9 @@ When a node is added to the set of peered nodes, Kubo will:
Peering can be asymmetric or symmetric:
-* When symmetric, the connection will be protected by both nodes and will likely
+- When symmetric, the connection will be protected by both nodes and will likely
be very stable.
-* When asymmetric, only one node (the node that configured peering) will protect
+- When asymmetric, only one node (the node that configured peering) will protect
the connection and attempt to re-connect to the peered node on disconnect. If
the peered node is under heavy load and/or has a low connection limit, the
connection may flap repeatedly. Be careful when asymmetrically peering to not
@@ -2038,56 +2392,15 @@ Type: `array[peering]`
### `Reprovider.Interval`
-Sets the time between rounds of reproviding local content to the routing
-system.
-
-- If unset, it uses the implicit safe default.
-- If set to the value `"0"` it will disable content reproviding.
-
-Note: disabling content reproviding will result in other nodes on the network
-not being able to discover that you have the objects that you have. If you want
-to have this disabled and keep the network aware of what you have, you must
-manually announce your content periodically or run your own routing system
-and convince users to add it to [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters).
-
-> [!CAUTION]
-> To maintain backward-compatibility, setting `Reprovider.Interval=0` will also disable Provider system (equivalent of `Provider.Enabled=false`)
-
-Default: `22h` (`DefaultReproviderInterval`)
+**REMOVED**
-Type: `optionalDuration` (unset for the default)
+Replaced with [`Provide.DHT.Interval`](#providedhtinterval).
### `Reprovider.Strategy`
-Tells reprovider what should be announced. Valid strategies are:
-
-- `"all"` - announce all CIDs of stored blocks
-- `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks)
- - Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins
-- `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`)
- - **⚠️ BE CAREFUL:** node with `roots` strategy will not announce child blocks.
- It makes sense only for use cases where the entire DAG is fetched in full,
- and a graceful resume does not have to be guaranteed: the lack of child
- announcements means an interrupted retrieval won't be able to find
- providers for the missing block in the middle of a file, unless the peer
- happens to already be connected to a provider and ask for child CID over
- bitswap.
-- `"mfs"` - announce only the local CIDs that are part of the MFS (`ipfs files`)
- - Note: MFS is lazy-loaded. Only the MFS blocks present in local datastore are announced.
-- `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies.
- - **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache.
- - Order: first `pinned` and then the locally available part of `mfs`.
-
-**Strategy changes automatically clear the provide queue.** When you change `Reprovider.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`.
-
-**Memory requirements:**
-
-- Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million items for reproviding to the Amino DHT.
-- This is due to the use of a buffered provider, which avoids holding a lock on the entire pinset during the reprovide cycle.
-
-Default: `"all"`
+**REMOVED**
-Type: `optionalString` (unset for the default)
+Replaced with [`Provide.Strategy`](#providestrategy).
## `Routing`
@@ -2097,25 +2410,25 @@ Contains options for content, peer, and IPNS routing mechanisms.
There are multiple routing options: "auto", "autoclient", "none", "dht", "dhtclient", "delegated", and "custom".
-* **DEFAULT:** If unset, or set to "auto", your node will use the public IPFS DHT (aka "Amino")
+- **DEFAULT:** If unset, or set to "auto", your node will use the public IPFS DHT (aka "Amino")
and parallel [`Routing.DelegatedRouters`](#routingdelegatedrouters) for additional speed.
-* If set to "autoclient", your node will behave as in "auto" but without running a DHT server.
+- If set to "autoclient", your node will behave as in "auto" but without running a DHT server.
-* If set to "none", your node will use _no_ routing system. You'll have to
+- If set to "none", your node will use _no_ routing system. You'll have to
explicitly connect to peers that have the content you're looking for.
-* If set to "dht" (or "dhtclient"/"dhtserver"), your node will ONLY use the Amino DHT (no HTTP routers).
+- If set to "dht" (or "dhtclient"/"dhtserver"), your node will ONLY use the Amino DHT (no HTTP routers).
-* If set to "custom", all default routers are disabled, and only ones defined in `Routing.Routers` will be used.
+- If set to "custom", all default routers are disabled, and only ones defined in `Routing.Routers` will be used.
When the DHT is enabled, it can operate in two modes: client and server.
-* In server mode, your node will query other peers for DHT records, and will
+- In server mode, your node will query other peers for DHT records, and will
respond to requests from other peers (both requests to store records and
requests to retrieve records).
-* In client mode, your node will query the DHT as a client but will not respond
+- In client mode, your node will query the DHT as a client but will not respond
to requests from other peers. This mode is less resource-intensive than server
mode.
@@ -2135,7 +2448,7 @@ in addition to the Amino DHT.
When `Routing.Type` is set to `delegated`, your node will use **only** HTTP delegated routers and IPNS publishers,
without initializing the Amino DHT at all. This mode is useful for environments where peer-to-peer DHT connectivity
is not available or desired, while still enabling content routing and IPNS publishing via HTTP APIs.
-This mode requires configuring [`Routing.DelegatedRouters`](#routingdelegatedrouters) for content routing and
+This mode requires configuring [`Routing.DelegatedRouters`](#routingdelegatedrouters) for content routing and
[`Ipns.DelegatedPublishers`](#ipnsdelegatedpublishers) for IPNS publishing.
**Note:** `delegated` mode operates as read-only for content providing - your node cannot announce content to the network
@@ -2147,7 +2460,6 @@ Default: `auto` (DHT + [`Routing.DelegatedRouters`](#routingdelegatedrouters))
Type: `optionalString` (`null`/missing means the default)
-
### `Routing.AcceleratedDHTClient`
This alternative Amino DHT client with a Full-Routing-Table strategy will
@@ -2164,9 +2476,13 @@ This is not compatible with `Routing.Type` `custom`. If you are using composable
you can configure this individually on each router.
When it is enabled:
+
- Client DHT operations (reads and writes) should complete much faster
- The provider will now use a keyspace sweeping mode allowing to keep alive
CID sets that are multiple orders of magnitude larger.
+ - **Note:** For improved provide/reprovide operations specifically, consider using
+ [`Provide.DHT.SweepEnabled`](#providedhtsweepenabled) instead, which offers similar
+ benefits without the hourly traffic spikes.
- The standard Bucket-Routing-Table DHT will still run for the DHT server (if
the DHT server is enabled). This means the classical routing table will
still be used to answer other nodes.
@@ -2174,12 +2490,13 @@ When it is enabled:
- The operations `ipfs stats dht` will default to showing information about the accelerated DHT client
**Caveats:**
+
1. Running the accelerated client likely will result in more resource consumption (connections, RAM, CPU, bandwidth)
- Users that are limited in the number of parallel connections their machines/networks can perform will likely suffer
- The resource usage is not smooth as the client crawls the network in rounds and reproviding is similarly done in rounds
- Users who previously had a lot of content but were unable to advertise it on the network will see an increase in
egress bandwidth as their nodes start to advertise all of their CIDs into the network. If you have lots of data
- entering your node that you don't want to advertise, then consider using [Reprovider Strategies](#reproviderstrategy)
+ entering your node that you don't want to advertise, then consider using [Provide Strategies](#providestrategy)
to reduce the number of CIDs that you are reproviding. Similarly, if you are running a node that deals mostly with
short-lived temporary data (e.g. you use a separate node for ingesting data then for storing and serving it) then
you may benefit from using [Strategic Providing](experimental-features.md#strategic-providing) to prevent advertising
@@ -2277,29 +2594,33 @@ Type: `string`
Parameters needed to create the specified router. Supported params per router type:
HTTP:
- - `Endpoint` (mandatory): URL that will be used to connect to a specified router.
- - `MaxProvideBatchSize`: This number determines the maximum amount of CIDs sent per batch. Servers might not accept more than 100 elements per batch. 100 elements by default.
- - `MaxProvideConcurrency`: It determines the number of threads used when providing content. GOMAXPROCS by default.
+
+- `Endpoint` (mandatory): URL that will be used to connect to a specified router.
+- `MaxProvideBatchSize`: This number determines the maximum amount of CIDs sent per batch. Servers might not accept more than 100 elements per batch. 100 elements by default.
+- `MaxProvideConcurrency`: It determines the number of threads used when providing content. GOMAXPROCS by default.
DHT:
- - `"Mode"`: Mode used by the Amino DHT. Possible values: "server", "client", "auto"
- - `"AcceleratedDHTClient"`: Set to `true` if you want to use the acceleratedDHT.
- - `"PublicIPNetwork"`: Set to `true` to create a `WAN` DHT. Set to `false` to create a `LAN` DHT.
+
+- `"Mode"`: Mode used by the Amino DHT. Possible values: "server", "client", "auto"
+- `"AcceleratedDHTClient"`: Set to `true` if you want to use the acceleratedDHT.
+- `"PublicIPNetwork"`: Set to `true` to create a `WAN` DHT. Set to `false` to create a `LAN` DHT.
Parallel:
- - `Routers`: A list of routers that will be executed in parallel:
- - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list.
- - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout.
- - `ExecuteAfter:duration`: Providing this param will delay the execution of that router at the specified time. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`).
- - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred.
- - `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`).
+
+- `Routers`: A list of routers that will be executed in parallel:
+ - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list.
+ - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout.
+ - `ExecuteAfter:duration`: Providing this param will delay the execution of that router at the specified time. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`).
+ - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred.
+- `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`).
Sequential:
- - `Routers`: A list of routers that will be executed in order:
- - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list.
- - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)`. Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout.
- - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred.
- - `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)`.
+
+- `Routers`: A list of routers that will be executed in order:
+ - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list.
+ - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)`. Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout.
+ - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred.
+- `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)`.
Default: `{}` (use the safe implicit defaults)
@@ -2317,6 +2638,7 @@ Type: `object[string->string]`
The key will be the name of the method: `"provide"`, `"find-providers"`, `"find-peers"`, `"put-ipns"`, `"get-ipns"`. All methods must be added to the list.
The value will contain:
+
- `RouterName:string`: Name of the router. It should be one of the previously added to `Routing.Routers` list.
Type: `object[string->object]`
@@ -2526,7 +2848,6 @@ Default: `131072` (128 kb)
Type: `optionalInteger`
-
#### `Swarm.RelayService.ReservationTTL`
Duration of a new or refreshed reservation.
@@ -2535,7 +2856,6 @@ Default: `"1h"`
Type: `duration`
-
#### `Swarm.RelayService.MaxReservations`
Maximum number of active relay slots.
@@ -2544,7 +2864,6 @@ Default: `128`
Type: `optionalInteger`
-
#### `Swarm.RelayService.MaxCircuits`
Maximum number of open relay connections for each peer.
@@ -2553,7 +2872,6 @@ Default: `16`
Type: `optionalInteger`
-
#### `Swarm.RelayService.BufferSize`
Size of the relayed connection buffers.
@@ -2562,7 +2880,6 @@ Default: `2048`
Type: `optionalInteger`
-
#### `Swarm.RelayService.MaxReservationsPerPeer`
**REMOVED in kubo 0.32 due to [go-libp2p#2974](https://github.com/libp2p/go-libp2p/pull/2974)**
@@ -2606,8 +2923,8 @@ Please use [`AutoNAT.ServiceMode`](#autonatservicemode).
The connection manager determines which and how many connections to keep and can
be configured to keep. Kubo currently supports two connection managers:
-* none: never close idle connections.
-* basic: the default connection manager.
+- none: never close idle connections.
+- basic: the default connection manager.
By default, this section is empty and the implicit defaults defined below
are used.
@@ -2631,11 +2948,11 @@ connections. The process of closing connections happens every `SilencePeriod`.
The connection manager considers a connection idle if:
-* It has not been explicitly _protected_ by some subsystem. For example, Bitswap
+- It has not been explicitly _protected_ by some subsystem. For example, Bitswap
will protect connections to peers from which it is actively downloading data,
the DHT will protect some peers for routing, and the peering subsystem will
protect all "peered" nodes.
-* It has existed for longer than the `GracePeriod`.
+- It has existed for longer than the `GracePeriod`.
**Example:**
@@ -2774,8 +3091,9 @@ Default: Enabled
Type: `flag`
Listen Addresses:
-* /ip4/0.0.0.0/tcp/4001 (default)
-* /ip6/::/tcp/4001 (default)
+
+- /ip4/0.0.0.0/tcp/4001 (default)
+- /ip6/::/tcp/4001 (default)
#### `Swarm.Transports.Network.Websocket`
@@ -2790,8 +3108,9 @@ Default: Enabled
Type: `flag`
Listen Addresses:
-* /ip4/0.0.0.0/tcp/4001/ws
-* /ip6/::/tcp/4001/ws
+
+- /ip4/0.0.0.0/tcp/4001/ws
+- /ip6/::/tcp/4001/ws
#### `Swarm.Transports.Network.QUIC`
@@ -2809,6 +3128,7 @@ Default: Enabled
Type: `flag`
Listen Addresses:
+
- `/ip4/0.0.0.0/udp/4001/quic-v1` (default)
- `/ip6/::/udp/4001/quic-v1` (default)
@@ -2821,10 +3141,11 @@ Allows IPFS node to connect to other peers using their `/p2p-circuit`
NATs.
See also:
+
- Docs: [Libp2p Circuit Relay](https://docs.libp2p.io/concepts/circuit-relay/)
- [`Swarm.RelayClient.Enabled`](#swarmrelayclientenabled) for getting a public
-- `/p2p-circuit` address when behind a firewall.
- - [`Swarm.EnableHolePunching`](#swarmenableholepunching) for direct connection upgrade through relay
+- `/p2p-circuit` address when behind a firewall.
+- [`Swarm.EnableHolePunching`](#swarmenableholepunching) for direct connection upgrade through relay
- [`Swarm.RelayService.Enabled`](#swarmrelayserviceenabled) for becoming a
limited relay for other peers
@@ -2833,9 +3154,9 @@ Default: Enabled
Type: `flag`
Listen Addresses:
-* This transport is special. Any node that enables this transport can receive
- inbound connections on this transport, without specifying a listen address.
+- This transport is special. Any node that enables this transport can receive
+ inbound connections on this transport, without specifying a listen address.
#### `Swarm.Transports.Network.WebTransport`
@@ -2858,6 +3179,7 @@ Default: Enabled
Type: `flag`
Listen Addresses:
+
- `/ip4/0.0.0.0/udp/4001/quic-v1/webtransport` (default)
- `/ip6/::/udp/4001/quic-v1/webtransport` (default)
@@ -2888,6 +3210,7 @@ Default: Enabled
Type: `flag`
Listen Addresses:
+
- `/ip4/0.0.0.0/udp/4001/webrtc-direct` (default)
- `/ip6/::/udp/4001/webrtc-direct` (default)
@@ -2960,7 +3283,7 @@ Type: `priority`
### `Swarm.Transports.Multiplexers.Mplex`
-**REMOVED**: See https://github.com/ipfs/kubo/issues/9958
+**REMOVED**: See
Support for Mplex has been [removed from Kubo and go-libp2p](https://github.com/libp2p/specs/issues/553).
Please remove this option from your config.
@@ -2977,6 +3300,7 @@ This allows for overriding the default DNS resolver provided by the operating sy
and using different resolvers per domain or TLD (including ones from alternative, non-ICANN naming systems).
Example:
+
```json
{
"DNS": {
@@ -2991,9 +3315,10 @@ Example:
```
Be mindful that:
+
- Currently only `https://` URLs for [DNS over HTTPS (DoH)](https://en.wikipedia.org/wiki/DNS_over_HTTPS) endpoints are supported as values.
- The default catch-all resolver is the cleartext one provided by your operating system. It can be overridden by adding a DoH entry for the DNS root indicated by `.` as illustrated above.
-- Out-of-the-box support for selected non-ICANN TLDs relies on third-party centralized services provided by respective communities on best-effort basis.
+- Out-of-the-box support for selected non-ICANN TLDs relies on third-party centralized services provided by respective communities on best-effort basis.
- The special value `"auto"` uses DNS resolvers from [AutoConf](#autoconf) when enabled. For example: `{".": "auto"}` uses any custom DoH resolver (global or per TLD) provided by AutoConf system.
Default: `{".": "auto"}`
@@ -3010,8 +3335,9 @@ If present, the upper bound is applied to DoH resolvers in [`DNS.Resolvers`](#dn
Note: this does NOT work with Go's default DNS resolver. To make this a global setting, add a `.` entry to `DNS.Resolvers` first.
**Examples:**
-* `"1m"` DNS entries are kept for 1 minute or less.
-* `"0s"` DNS entries expire as soon as they are retrieved.
+
+- `"1m"` DNS entries are kept for 1 minute or less.
+- `"0s"` DNS entries expire as soon as they are retrieved.
Default: Respect DNS Response TTL
@@ -3043,6 +3369,7 @@ and the HTTPS server returns HTTP 200 for the [probe path](https://specs.ipfs.te
> This feature is relatively new. Please report any issues via [Github](https://github.com/ipfs/kubo/issues/new).
>
> Important notes:
+>
> - TLS and HTTP/2 are required. For privacy reasons, and to maintain feature-parity with browsers, unencrypted `http://` providers are ignored and not used.
> - This feature works in the same way as Bitswap: connected HTTP-peers receive optimistic block requests even for content that they are not announcing.
> - For performance reasons, and to avoid loops, the HTTP client does not follow redirects. Providers should keep announcements up to date.
@@ -3069,7 +3396,6 @@ Type: `array[string]`
Optional list of hostnames for which HTTP retrieval is not allowed.
Denylist entries take precedence over Allowlist entries.
-
> [!TIP]
> This denylist operates on HTTP endpoint hostnames.
> To deny specific PeerID, use [`Routing.IgnoreProviders`](#routingignoreproviders) instead.
@@ -3122,6 +3448,8 @@ Note that using flags will override the options defined here.
The default CID version. Commands affected: `ipfs add`.
+Must be either 0 or 1. CIDv0 uses SHA2-256 only, while CIDv1 supports multiple hash functions.
+
Default: `0`
Type: `optionalInteger`
@@ -3138,6 +3466,12 @@ Type: `flag`
The default UnixFS chunker. Commands affected: `ipfs add`.
+Valid formats:
+
+- `size-` - fixed size chunker
+- `rabin---` - rabin fingerprint chunker
+- `buzhash` - buzhash chunker
+
Default: `size-262144`
Type: `optionalString`
@@ -3146,6 +3480,10 @@ Type: `optionalString`
The default hash function. Commands affected: `ipfs add`, `ipfs block put`, `ipfs dag put`.
+Must be a valid multihash name (e.g., `sha2-256`, `blake3`) and must be allowed for use in IPFS according to security constraints.
+
+Run `ipfs cid hashes --supported` to see the full list of allowed hash functions.
+
Default: `sha2-256`
Type: `optionalString`
@@ -3156,6 +3494,8 @@ The maximum number of nodes in a write-batch. The total size of the batch is lim
Increasing this will batch more items together when importing data with `ipfs dag import`, which can speed things up.
+Must be positive (> 0). Setting to 0 would cause immediate batching after each node, which is inefficient.
+
Default: `128`
Type: `optionalInteger`
@@ -3166,6 +3506,8 @@ The maximum size of a single write-batch (computed as the sum of the sizes of th
Increasing this will batch more items together when importing data with `ipfs dag import`, which can speed things up.
+Must be positive (> 0). Setting to 0 would cause immediate batching after any data, which is inefficient.
+
Default: `20971520` (20MiB)
Type: `optionalInteger`
@@ -3178,6 +3520,8 @@ when building the DAG while importing.
This setting controls both the fanout in files that are chunked into several
blocks and grouped as a Unixfs (dag-pb) DAG.
+Must be positive (> 0). Zero or negative values would break file DAG construction.
+
Default: `174`
Type: `optionalInteger`
@@ -3197,6 +3541,8 @@ This setting will cause basic directories to be converted to HAMTs when they
exceed the maximum number of children. This happens transparently during the
add process. The fanout of HAMT nodes is controlled by `MaxHAMTFanout`.
+Must be non-negative (>= 0). Zero means no limit, negative values are invalid.
+
Commands affected: `ipfs add`
Default: `0` (no limit, because [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold) triggers controls when to switch to HAMT sharding when a directory grows too big)
@@ -3205,15 +3551,15 @@ Type: `optionalInteger`
### `Import.UnixFSHAMTDirectoryMaxFanout`
-The maximum number of children that a node part of a Unixfs HAMT directory
+The maximum number of children that a node part of a UnixFS HAMT directory
(aka sharded directory) can have.
HAMT directories have unlimited children and are used when basic directories
-become too big or reach `MaxLinks`. A HAMT is a structure made of unixfs
+become too big or reach `MaxLinks`. A HAMT is a structure made of UnixFS
nodes that store the list of elements in the folder. This option controls the
maximum number of children that the HAMT nodes can have.
-Needs to be a power of two (shard entry size) and multiple of 8 (bitfield size).
+According to the [UnixFS specification](https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters), this value must be a power of 2, a multiple of 8 (for byte-aligned bitfields), and not exceed 1024 (to prevent denial-of-service attacks).
Commands affected: `ipfs add`, `ipfs daemon` (globally overrides [`boxo/ipld/unixfs/io.DefaultShardWidth`](https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L30C5-L30C22))
@@ -3233,7 +3579,7 @@ networking stack. At the time of writing this, IPFS peers on the public swarm
tend to ignore requests for blocks bigger than 2MiB.
Uses implementation from `boxo/ipld/unixfs/io/directory`, where the size is not
-the *exact* block size of the encoded directory but just the estimated size
+the _exact_ block size of the encoded directory but just the estimated size
based byte length of DAG-PB Links names and CIDs.
Setting to `1B` is functionally equivalent to always using HAMT (useful in testing).
@@ -3256,7 +3602,7 @@ Optional suffix to the AgentVersion presented by `ipfs id` and exposed via [libp
The value from config takes precedence over value passed via `ipfs daemon --agent-version-suffix`.
> [!NOTE]
-> Setting a custom version suffix helps with ecosystem analysis, such as Amino DHT reports published at https://stats.ipfs.network
+> Setting a custom version suffix helps with ecosystem analysis, such as Amino DHT reports published at
Default: `""` (no suffix, or value from `ipfs daemon --agent-version-suffix=`)
@@ -3432,7 +3778,7 @@ Reduces daemon overhead on the system by disabling optional swarm services.
### `announce-off` profile
-Disables [Reprovider](#reprovider) system (and announcing to Amino DHT).
+Disables [Provide](#provide) system (and announcing to Amino DHT).
> [!CAUTION]
> The main use case for this is setups with manual Peering.Peers config.
@@ -3442,7 +3788,7 @@ Disables [Reprovider](#reprovider) system (and announcing to Amino DHT).
### `announce-on` profile
-(Re-)enables [Reprovider](#reprovider) system (reverts [`announce-off` profile](#announce-off-profile)).
+(Re-)enables [Provide](#provide) system (reverts [`announce-off` profile](#announce-off-profile)).
### `legacy-cid-v0` profile
diff --git a/docs/examples/kubo-as-a-library/go.mod b/docs/examples/kubo-as-a-library/go.mod
index 5e728552d71..81c2a147b8f 100644
--- a/docs/examples/kubo-as-a-library/go.mod
+++ b/docs/examples/kubo-as-a-library/go.mod
@@ -7,7 +7,7 @@ go 1.25
replace github.com/ipfs/kubo => ./../../..
require (
- github.com/ipfs/boxo v0.34.0
+ github.com/ipfs/boxo v0.35.0
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.43.0
github.com/multiformats/go-multiaddr v0.16.1
@@ -26,7 +26,7 @@ require (
github.com/caddyserver/certmagic v0.23.0 // indirect
github.com/caddyserver/zerossl v0.1.3 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
- github.com/cenkalti/backoff/v5 v5.0.2 // indirect
+ github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/ceramicnetwork/go-dag-jose v0.1.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect
@@ -50,21 +50,22 @@ require (
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
- github.com/gabriel-vasile/mimetype v1.4.9 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.1.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
- github.com/go-jose/go-jose/v4 v4.0.5 // indirect
+ github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/glog v1.2.4 // indirect
+ github.com/golang/glog v1.2.5 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/guillaumemichel/reservedpool v0.3.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
@@ -72,29 +73,31 @@ require (
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
- github.com/ipfs/go-block-format v0.2.2 // indirect
+ github.com/ipfs/go-block-format v0.2.3 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-cidutil v0.1.0 // indirect
- github.com/ipfs/go-datastore v0.8.3 // indirect
+ github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-ds-badger v0.3.4 // indirect
github.com/ipfs/go-ds-flatfs v0.5.5 // indirect
github.com/ipfs/go-ds-leveldb v0.5.2 // indirect
github.com/ipfs/go-ds-measure v0.2.2 // indirect
github.com/ipfs/go-ds-pebble v0.5.1 // indirect
+ github.com/ipfs/go-dsqueue v0.0.5 // indirect
github.com/ipfs/go-fs-lock v0.1.1 // indirect
- github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
+ github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
github.com/ipfs/go-ipld-cbor v0.2.1 // indirect
- github.com/ipfs/go-ipld-format v0.6.2 // indirect
+ github.com/ipfs/go-ipld-format v0.6.3 // indirect
github.com/ipfs/go-ipld-git v0.1.1 // indirect
github.com/ipfs/go-ipld-legacy v0.2.2 // indirect
github.com/ipfs/go-log/v2 v2.8.1 // indirect
github.com/ipfs/go-metrics-interface v0.3.0 // indirect
github.com/ipfs/go-peertaskqueue v0.8.2 // indirect
- github.com/ipfs/go-unixfsnode v1.10.1 // indirect
- github.com/ipld/go-car/v2 v2.14.3 // indirect
+ github.com/ipfs/go-test v0.2.3 // indirect
+ github.com/ipfs/go-unixfsnode v1.10.2 // indirect
+ github.com/ipld/go-car/v2 v2.15.0 // indirect
github.com/ipld/go-codec-dagpb v1.7.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipshipyard/p2p-forge v0.6.1 // indirect
@@ -111,8 +114,8 @@ require (
github.com/libp2p/go-doh-resolver v0.5.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
- github.com/libp2p/go-libp2p-kad-dht v0.34.0 // indirect
- github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect
+ github.com/libp2p/go-libp2p-kad-dht v0.35.0 // indirect
+ github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
@@ -139,7 +142,7 @@ require (
github.com/multiformats/go-multicodec v0.9.2 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
- github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/multiformats/go-varint v0.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
@@ -166,14 +169,15 @@ require (
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
- github.com/prometheus/client_golang v1.23.0 // indirect
+ github.com/probe-lab/go-libdht v0.2.1 // indirect
+ github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
- github.com/prometheus/common v0.65.0 // indirect
+ github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
- github.com/rogpeppe/go-internal v1.13.1 // indirect
+ github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect
@@ -186,40 +190,42 @@ require (
github.com/wlynxg/anet v0.0.5 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
- go.opentelemetry.io/otel v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/zipkin v1.37.0 // indirect
- go.opentelemetry.io/otel/metric v1.37.0 // indirect
- go.opentelemetry.io/otel/sdk v1.37.0 // indirect
- go.opentelemetry.io/otel/trace v1.37.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
+ go.opentelemetry.io/otel v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
- golang.org/x/crypto v0.41.0 // indirect
- golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect
- golang.org/x/mod v0.27.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/sync v0.16.0 // indirect
- golang.org/x/sys v0.35.0 // indirect
- golang.org/x/text v0.28.0 // indirect
+ golang.org/x/crypto v0.42.0 // indirect
+ golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect
+ golang.org/x/mod v0.28.0 // indirect
+ golang.org/x/net v0.44.0 // indirect
+ golang.org/x/sync v0.17.0 // indirect
+ golang.org/x/sys v0.36.0 // indirect
+ golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 // indirect
+ golang.org/x/text v0.29.0 // indirect
golang.org/x/time v0.12.0 // indirect
- golang.org/x/tools v0.36.0 // indirect
+ golang.org/x/tools v0.37.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gonum.org/v1/gonum v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
- google.golang.org/grpc v1.73.0 // indirect
- google.golang.org/protobuf v1.36.7 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
+ google.golang.org/grpc v1.75.0 // indirect
+ google.golang.org/protobuf v1.36.9 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)
diff --git a/docs/examples/kubo-as-a-library/go.sum b/docs/examples/kubo-as-a-library/go.sum
index 50a10c6b65c..eb90aa12ad5 100644
--- a/docs/examples/kubo-as-a-library/go.sum
+++ b/docs/examples/kubo-as-a-library/go.sum
@@ -67,8 +67,8 @@ github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+Y
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
-github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/ceramicnetwork/go-dag-jose v0.1.1 h1:7pObs22egc14vSS3AfCFfS1VmaL4lQUsAK7OGC3PlKk=
github.com/ceramicnetwork/go-dag-jose v0.1.1/go.mod h1:8ptnYwY2Z2y/s5oJnNBn/UCxLg6CpramNJ2ZXF/5aNY=
@@ -159,8 +159,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
-github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
+github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
+github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
@@ -177,8 +177,8 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
-github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
+github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
+github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -193,8 +193,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
-github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=
+github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -264,8 +264,10 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
+github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw=
+github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -287,13 +289,13 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
-github.com/ipfs/boxo v0.34.0 h1:pMP9bAsTs4xVh8R0ZmxIWviV7kjDa60U24QrlGgHb1g=
-github.com/ipfs/boxo v0.34.0/go.mod h1:kzdH/ewDybtO3+M8MCVkpwnIIc/d2VISX95DFrY4vQA=
+github.com/ipfs/boxo v0.35.0 h1:3Mku5arSbAZz0dvb4goXRsQuZkFkPrGr5yYdu0YM1pY=
+github.com/ipfs/boxo v0.35.0/go.mod h1:uhaF0DGnbgEiXDTmD249jCGbxVkMm6+Ew85q6Uub7lo=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
-github.com/ipfs/go-block-format v0.2.2 h1:uecCTgRwDIXyZPgYspaLXoMiMmxQpSx2aq34eNc4YvQ=
-github.com/ipfs/go-block-format v0.2.2/go.mod h1:vmuefuWU6b+9kIU0vZJgpiJt1yicQz9baHXE8qR+KB8=
+github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
+github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
@@ -303,8 +305,8 @@ github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q
github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA=
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
-github.com/ipfs/go-datastore v0.8.3 h1:z391GsQyGKUIUof2tPoaZVeDknbt7fNHs6Gqjcw5Jo4=
-github.com/ipfs/go-datastore v0.8.3/go.mod h1:raxQ/CreIy9L6MxT71ItfMX12/ASN6EhXJoUFjICQ2M=
+github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
+github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
@@ -319,10 +321,14 @@ github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICk
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.1 h1:p0FAE0zw9J/3T1VkGB9s98jWmfKmw2t0iEwfMUv8iSQ=
github.com/ipfs/go-ds-pebble v0.5.1/go.mod h1:LsmQx4w+0o9znl4hTxYo1Y2lnBTzNCwc4kNpD3wWXM0=
+github.com/ipfs/go-dsqueue v0.0.5 h1:TUOk15TlCJ/NKV8Yk2W5wgkEjDa44Nem7a7FGIjsMNU=
+github.com/ipfs/go-dsqueue v0.0.5/go.mod h1:i/jAlpZjBbQJLioN+XKbFgnd+u9eAhGZs9IrqIzTd9g=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ=
github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE=
+github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
+github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
@@ -336,8 +342,8 @@ github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyB
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E=
github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A=
-github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU=
-github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk=
+github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8=
+github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk=
github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y=
github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI=
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
@@ -353,10 +359,10 @@ github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8
github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
-github.com/ipfs/go-unixfsnode v1.10.1 h1:hGKhzuH6NSzZ4y621wGuDspkjXRNG3B+HqhlyTjSwSM=
-github.com/ipfs/go-unixfsnode v1.10.1/go.mod h1:eguv/otvacjmfSbYvmamc9ssNAzLvRk0+YN30EYeOOY=
-github.com/ipld/go-car/v2 v2.14.3 h1:1Mhl82/ny8MVP+w1M4LXbj4j99oK3gnuZG2GmG1IhC8=
-github.com/ipld/go-car/v2 v2.14.3/go.mod h1:/vpSvPngOX8UnvmdFJ3o/mDgXa9LuyXsn7wxOzHDYQE=
+github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8=
+github.com/ipfs/go-unixfsnode v1.10.2/go.mod h1:ImDPTSiKZ+2h4UVdkSDITJHk87bUAp7kX/lgifjRicg=
+github.com/ipld/go-car/v2 v2.15.0 h1:RxtZcGXFx72zFESl+UUsCNQV2YMcy3gEMYx9M3uio24=
+github.com/ipld/go-car/v2 v2.15.0/go.mod h1:ovlq/n3xlVJDmoiN3Kd/Z7kIzQbdTIFSwltfOP+qIgk=
github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0=
github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM=
github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8=
@@ -426,11 +432,11 @@ github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl9
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
-github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0=
-github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o=
+github.com/libp2p/go-libp2p-kad-dht v0.35.0 h1:pWRC4FKR9ptQjA9DuMSrAn2D3vABE8r58iAeoLoK1Ig=
+github.com/libp2p/go-libp2p-kad-dht v0.35.0/go.mod h1:s70f017NjhsBx+SVl0/w+x//uyglrFpKLfvuQJj4QAU=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
-github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs=
-github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g=
+github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
+github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
@@ -534,8 +540,8 @@ github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuV
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
-github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
-github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI=
+github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
@@ -622,16 +628,18 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
+github.com/probe-lab/go-libdht v0.2.1 h1:oBCsKBvS/OVirTO5+BT6/AOocWjdqwpfSfkTfBjUPJE=
+github.com/probe-lab/go-libdht v0.2.1/go.mod h1:q+WlGiqs/UIRfdhw9Gmc+fPoAYlOim7VvXTjOI6KJmQ=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
-github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
-github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
@@ -643,8 +651,8 @@ github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssk
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
@@ -712,8 +720,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
@@ -767,32 +775,32 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
-go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
-go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4=
-go.opentelemetry.io/otel/exporters/zipkin v1.37.0 h1:Z2apuaRnHEjzDAkpbWNPiksz1R0/FCIrJSjiMA43zwI=
-go.opentelemetry.io/otel/exporters/zipkin v1.37.0/go.mod h1:ofGu/7fG+bpmjZoiPUUmYDJ4vXWxMT57HmGoegx49uw=
-go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
-go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
-go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
-go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
-go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
-go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
-go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
-go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
-go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
-go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
+go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
+go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
+go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
+go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
+go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
+go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
@@ -809,6 +817,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
@@ -832,8 +842,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
-golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
-golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
+golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -842,8 +852,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE=
-golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
+golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
+golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -866,8 +876,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
-golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
+golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -903,8 +913,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
+golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -923,8 +933,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
-golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -971,8 +981,10 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
-golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
+golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8=
+golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -992,8 +1004,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
+golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1034,8 +1046,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
-golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
+golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
+golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1081,10 +1093,10 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@@ -1097,8 +1109,8 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
-google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
+google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
+google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1110,8 +1122,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
-google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
+google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/docs/experimental-features.md b/docs/experimental-features.md
index 68d7a819c6e..ad3fbdfed59 100644
--- a/docs/experimental-features.md
+++ b/docs/experimental-features.md
@@ -539,7 +539,7 @@ ipfs config --json Swarm.RelayClient.Enabled true
`Experimental.StrategicProviding` was removed in Kubo v0.35.
-Replaced by [`Provide.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providerenabled) and [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy).
+Replaced by [`Provide.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provideenabled) and [`Provide.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy).
## GraphSync
diff --git a/docs/logo/kubo-logo.png b/docs/logo/kubo-logo.png
new file mode 100644
index 00000000000..c98eadd5905
Binary files /dev/null and b/docs/logo/kubo-logo.png differ
diff --git a/docs/logo/kubo-logo.svg b/docs/logo/kubo-logo.svg
new file mode 100644
index 00000000000..7dbd2ec6719
--- /dev/null
+++ b/docs/logo/kubo-logo.svg
@@ -0,0 +1,34 @@
+
diff --git a/docs/metrics.md b/docs/metrics.md
new file mode 100644
index 00000000000..54835969472
--- /dev/null
+++ b/docs/metrics.md
@@ -0,0 +1,118 @@
+## Kubo metrics
+
+By default, a Prometheus endpoint is exposed by Kubo at `http://127.0.0.1:5001/debug/metrics/prometheus`.
+
+It includes default [Prometheus Go client metrics](https://prometheus.io/docs/guides/go-application/) + Kubo-specific metrics listed below.
+
+### Table of Contents
+
+- [DHT RPC](#dht-rpc)
+ - [Inbound RPC metrics](#inbound-rpc-metrics)
+ - [Outbound RPC metrics](#outbound-rpc-metrics)
+- [Provide](#provide)
+ - [Legacy Provider](#legacy-provider)
+ - [DHT Provider](#dht-provider)
+- [Gateway (`boxo/gateway`)](#gateway-boxogateway)
+ - [HTTP metrics](#http-metrics)
+ - [Blockstore cache metrics](#blockstore-cache-metrics)
+ - [Backend metrics](#backend-metrics)
+- [Generic HTTP Servers](#generic-http-servers)
+ - [Core HTTP metrics](#core-http-metrics-ipfs_http_)
+ - [HTTP Server metrics](#http-server-metrics-http_server_)
+- [OpenTelemetry Metadata](#opentelemetry-metadata)
+
+> [!WARNING]
+> This documentation is incomplete. For an up-to-date list of metrics available at daemon startup, see [test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile](https://github.com/ipfs/kubo/blob/master/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile).
+>
+> Additional metrics may appear during runtime as some components (like boxo/gateway) register metrics only after their first event occurs (e.g., HTTP request/response).
+
+## DHT RPC
+
+Metrics from `go-libp2p-kad-dht` for DHT RPC operations:
+
+### Inbound RPC metrics
+
+- `rpc_inbound_messages_total` - Counter: total messages received per RPC
+- `rpc_inbound_message_errors_total` - Counter: total errors for received messages
+- `rpc_inbound_bytes_[bucket|sum|count]` - Histogram: distribution of received bytes per RPC
+- `rpc_inbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for inbound RPCs
+
+### Outbound RPC metrics
+
+- `rpc_outbound_messages_total` - Counter: total messages sent per RPC
+- `rpc_outbound_message_errors_total` - Counter: total errors for sent messages
+- `rpc_outbound_requests_total` - Counter: total requests sent
+- `rpc_outbound_request_errors_total` - Counter: total errors for sent requests
+- `rpc_outbound_bytes_[bucket|sum|count]` - Histogram: distribution of sent bytes per RPC
+- `rpc_outbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for outbound RPCs
+
+## Provide
+
+### Legacy Provider
+
+Metrics for the legacy provider system when `Provide.DHT.SweepEnabled=false`:
+
+- `provider_reprovider_provide_count` - Counter: total successful provide operations since node startup
+- `provider_reprovider_reprovide_count` - Counter: total reprovide sweep operations since node startup
+
+### DHT Provider
+
+Metrics for the DHT provider system when `Provide.DHT.SweepEnabled=true`:
+
+- `total_provide_count_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`)
+
+> [!NOTE]
+> These metrics are exposed by [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/). You can enable debug logging for DHT provider activity with `GOLOG_LOG_LEVEL=dht/provider=debug`.
+
+## Gateway (`boxo/gateway`)
+
+> [!TIP]
+> These metrics are limited to [IPFS Gateway](https://specs.ipfs.tech/http-gateways/) endpoints. For general HTTP metrics across all endpoints, consider using a reverse proxy.
+
+Gateway metrics appear after the first HTTP request is processed:
+
+### HTTP metrics
+
+- `ipfs_http_gw_responses_total{code}` - Counter: total HTTP responses by status code
+- `ipfs_http_gw_retrieval_timeouts_total{code,truncated}` - Counter: requests that timed out during content retrieval
+- `ipfs_http_gw_concurrent_requests` - Gauge: number of requests currently being processed
+
+### Blockstore cache metrics
+
+- `ipfs_http_blockstore_cache_hit` - Counter: global block cache hits
+- `ipfs_http_blockstore_cache_requests` - Counter: global block cache requests
+
+### Backend metrics
+
+- `ipfs_gw_backend_api_call_duration_seconds_[bucket|sum|count]{backend_method}` - Histogram: time spent in IPFSBackend API calls
+
+## Generic HTTP Servers
+
+> [!TIP]
+> The metrics below are not very useful and exist mostly for historical reasons. If you need non-gateway HTTP metrics, it's better to put a reverse proxy in front of Kubo and use its metrics.
+
+### Core HTTP metrics (`ipfs_http_*`)
+
+Prometheus metrics for the HTTP API exposed at port 5001:
+
+- `ipfs_http_requests_total{method,code,handler}` - Counter: total HTTP requests (Legacy - new metrics are provided by boxo/gateway for gateway traffic)
+- `ipfs_http_request_duration_seconds[_sum|_count]{handler}` - Summary: request processing duration
+- `ipfs_http_request_size_bytes[_sum|_count]{handler}` - Summary: request body sizes
+- `ipfs_http_response_size_bytes[_sum|_count]{handler}` - Summary: response body sizes
+
+### HTTP Server metrics (`http_server_*`)
+
+Additional HTTP instrumentation for all handlers (Gateway, API commands, etc.):
+
+- `http_server_request_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of request body sizes
+- `http_server_request_duration_seconds_[bucket|count|sum]` - Histogram: distribution of request processing times
+- `http_server_response_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of response body sizes
+
+These metrics are automatically added to Gateway handlers, Hostname Gateway, Libp2p Gateway, and API command handlers.
+
+## OpenTelemetry Metadata
+
+Kubo uses Prometheus for metrics collection for historical reasons, but OpenTelemetry metrics are automatically exposed through the same Prometheus endpoint. These metadata metrics provide context about the instrumentation:
+
+- `otel_scope_info` - Information about instrumentation libraries producing metrics
+- `target_info` - Service metadata including version and instance information
\ No newline at end of file
diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go
index ece386bf757..bbd5cbc982e 100644
--- a/fuse/ipns/ipns_test.go
+++ b/fuse/ipns/ipns_test.go
@@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
-// +build !nofuse,!openbsd,!netbsd,!plan9
package ipns
diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go
index 9c36c9a26c6..f291c947042 100644
--- a/fuse/ipns/ipns_unix.go
+++ b/fuse/ipns/ipns_unix.go
@@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
-// +build !nofuse,!openbsd,!netbsd,!plan9
// package fuse/ipns implements a fuse filesystem that interfaces
// with ipns, the naming system for ipfs.
diff --git a/fuse/ipns/link_unix.go b/fuse/ipns/link_unix.go
index da810c8f947..f95894b1235 100644
--- a/fuse/ipns/link_unix.go
+++ b/fuse/ipns/link_unix.go
@@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
-// +build !nofuse,!openbsd,!netbsd,!plan9
package ipns
diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go
index 8c8ea8afeb2..da3a6ac0b31 100644
--- a/fuse/ipns/mount_unix.go
+++ b/fuse/ipns/mount_unix.go
@@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse
-// +build linux darwin freebsd netbsd openbsd
-// +build !nofuse
package ipns
diff --git a/fuse/mfs/mfs_test.go b/fuse/mfs/mfs_test.go
index cedbe996723..a441246c71a 100644
--- a/fuse/mfs/mfs_test.go
+++ b/fuse/mfs/mfs_test.go
@@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
-// +build !nofuse,!openbsd,!netbsd,!plan9
package mfs
diff --git a/fuse/mfs/mfs_unix.go b/fuse/mfs/mfs_unix.go
index 91cad257d92..99ca5fe529e 100644
--- a/fuse/mfs/mfs_unix.go
+++ b/fuse/mfs/mfs_unix.go
@@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse
-// +build linux darwin freebsd netbsd openbsd
-// +build !nofuse
package mfs
diff --git a/fuse/mfs/mount_unix.go b/fuse/mfs/mount_unix.go
index bd7021e28c8..92e0845bc36 100644
--- a/fuse/mfs/mount_unix.go
+++ b/fuse/mfs/mount_unix.go
@@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse
-// +build linux darwin freebsd netbsd openbsd
-// +build !nofuse
package mfs
diff --git a/fuse/mount/fuse.go b/fuse/mount/fuse.go
index e18c0b4a9fa..313c4af6a30 100644
--- a/fuse/mount/fuse.go
+++ b/fuse/mount/fuse.go
@@ -1,5 +1,4 @@
//go:build !nofuse && !windows && !openbsd && !netbsd && !plan9
-// +build !nofuse,!windows,!openbsd,!netbsd,!plan9
package mount
diff --git a/fuse/node/mount_darwin.go b/fuse/node/mount_darwin.go
index 88e1f248e46..57fbe4d901e 100644
--- a/fuse/node/mount_darwin.go
+++ b/fuse/node/mount_darwin.go
@@ -1,5 +1,4 @@
-//go:build !nofuse
-// +build !nofuse
+//go:build !nofuse && darwin
package node
diff --git a/fuse/node/mount_nofuse.go b/fuse/node/mount_nofuse.go
index 6d4e102e223..026f002ff08 100644
--- a/fuse/node/mount_nofuse.go
+++ b/fuse/node/mount_nofuse.go
@@ -1,5 +1,4 @@
//go:build !windows && nofuse
-// +build !windows,nofuse
package node
diff --git a/fuse/node/mount_notsupp.go b/fuse/node/mount_notsupp.go
index 15f98c40e89..d5f0d2cbe9c 100644
--- a/fuse/node/mount_notsupp.go
+++ b/fuse/node/mount_notsupp.go
@@ -1,5 +1,4 @@
//go:build (!nofuse && openbsd) || (!nofuse && netbsd) || (!nofuse && plan9)
-// +build !nofuse,openbsd !nofuse,netbsd !nofuse,plan9
package node
diff --git a/fuse/node/mount_test.go b/fuse/node/mount_test.go
index 1947f759ffa..b296e7e95b5 100644
--- a/fuse/node/mount_test.go
+++ b/fuse/node/mount_test.go
@@ -1,5 +1,4 @@
//go:build !openbsd && !nofuse && !netbsd && !plan9
-// +build !openbsd,!nofuse,!netbsd,!plan9
package node
diff --git a/fuse/node/mount_unix.go b/fuse/node/mount_unix.go
index 6c63f6e5048..6864e363b17 100644
--- a/fuse/node/mount_unix.go
+++ b/fuse/node/mount_unix.go
@@ -1,5 +1,4 @@
//go:build !windows && !openbsd && !netbsd && !plan9 && !nofuse
-// +build !windows,!openbsd,!netbsd,!plan9,!nofuse
package node
diff --git a/fuse/readonly/ipfs_test.go b/fuse/readonly/ipfs_test.go
index 348236737d9..8e7d6b34dcf 100644
--- a/fuse/readonly/ipfs_test.go
+++ b/fuse/readonly/ipfs_test.go
@@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
-// +build !nofuse,!openbsd,!netbsd,!plan9
package readonly
diff --git a/fuse/readonly/mount_unix.go b/fuse/readonly/mount_unix.go
index 0ee19840930..33565acd2a6 100644
--- a/fuse/readonly/mount_unix.go
+++ b/fuse/readonly/mount_unix.go
@@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd) && !nofuse
-// +build linux darwin freebsd
-// +build !nofuse
package readonly
diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go
index 573e80e2377..c042628403c 100644
--- a/fuse/readonly/readonly_unix.go
+++ b/fuse/readonly/readonly_unix.go
@@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd) && !nofuse
-// +build linux darwin freebsd
-// +build !nofuse
package readonly
diff --git a/gc/gc.go b/gc/gc.go
index 1d4805a66e3..ac3f3d08fda 100644
--- a/gc/gc.go
+++ b/gc/gc.go
@@ -165,7 +165,7 @@ func Descendants(ctx context.Context, getLinks dag.GetLinks, set *cid.Set, roots
}
verboseCidError := func(err error) error {
- if strings.Contains(err.Error(), verifcid.ErrBelowMinimumHashLength.Error()) ||
+ if strings.Contains(err.Error(), verifcid.ErrDigestTooSmall.Error()) ||
strings.Contains(err.Error(), verifcid.ErrPossiblyInsecureHashFunction.Error()) {
err = fmt.Errorf("\"%s\"\nPlease run 'ipfs pin verify'"+ // nolint
" to list insecure hashes. If you want to read them,"+
diff --git a/go.mod b/go.mod
index 0396b9e0f86..a1a2a3ab195 100644
--- a/go.mod
+++ b/go.mod
@@ -22,11 +22,11 @@ require (
github.com/hashicorp/go-version v1.7.0
github.com/ipfs-shipyard/nopfs v0.0.14
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0
- github.com/ipfs/boxo v0.34.0
- github.com/ipfs/go-block-format v0.2.2
+ github.com/ipfs/boxo v0.35.0
+ github.com/ipfs/go-block-format v0.2.3
github.com/ipfs/go-cid v0.5.0
github.com/ipfs/go-cidutil v0.1.0
- github.com/ipfs/go-datastore v0.8.3
+ github.com/ipfs/go-datastore v0.9.0
github.com/ipfs/go-detect-race v0.0.1
github.com/ipfs/go-ds-badger v0.3.4
github.com/ipfs/go-ds-flatfs v0.5.5
@@ -36,15 +36,15 @@ require (
github.com/ipfs/go-fs-lock v0.1.1
github.com/ipfs/go-ipfs-cmds v0.15.0
github.com/ipfs/go-ipld-cbor v0.2.1
- github.com/ipfs/go-ipld-format v0.6.2
+ github.com/ipfs/go-ipld-format v0.6.3
github.com/ipfs/go-ipld-git v0.1.1
github.com/ipfs/go-ipld-legacy v0.2.2
github.com/ipfs/go-log/v2 v2.8.1
github.com/ipfs/go-metrics-interface v0.3.0
github.com/ipfs/go-metrics-prometheus v0.1.0
github.com/ipfs/go-test v0.2.3
- github.com/ipfs/go-unixfsnode v1.10.1
- github.com/ipld/go-car/v2 v2.14.3
+ github.com/ipfs/go-unixfsnode v1.10.2
+ github.com/ipld/go-car/v2 v2.15.0
github.com/ipld/go-codec-dagpb v1.7.0
github.com/ipld/go-ipld-prime v0.21.0
github.com/ipshipyard/p2p-forge v0.6.1
@@ -53,8 +53,8 @@ require (
github.com/libp2p/go-doh-resolver v0.5.0
github.com/libp2p/go-libp2p v0.43.0
github.com/libp2p/go-libp2p-http v0.5.0
- github.com/libp2p/go-libp2p-kad-dht v0.34.0
- github.com/libp2p/go-libp2p-kbucket v0.7.0
+ github.com/libp2p/go-libp2p-kad-dht v0.35.0
+ github.com/libp2p/go-libp2p-kbucket v0.8.0
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/libp2p/go-libp2p-pubsub-router v0.6.0
github.com/libp2p/go-libp2p-record v0.3.1
@@ -69,28 +69,30 @@ require (
github.com/multiformats/go-multihash v0.2.3
github.com/opentracing/opentracing-go v1.2.0
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
- github.com/prometheus/client_golang v1.23.0
- github.com/stretchr/testify v1.10.0
+ github.com/prometheus/client_golang v1.23.2
+ github.com/stretchr/testify v1.11.1
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
github.com/tidwall/gjson v1.16.0
github.com/tidwall/sjson v1.2.5
github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
go.opencensus.io v0.24.0
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
go.opentelemetry.io/contrib/propagators/autoprop v0.46.1
- go.opentelemetry.io/otel v1.37.0
- go.opentelemetry.io/otel/sdk v1.37.0
- go.opentelemetry.io/otel/trace v1.37.0
+ go.opentelemetry.io/otel v1.38.0
+ go.opentelemetry.io/otel/exporters/prometheus v0.56.0
+ go.opentelemetry.io/otel/sdk v1.38.0
+ go.opentelemetry.io/otel/sdk/metric v1.38.0
+ go.opentelemetry.io/otel/trace v1.38.0
go.uber.org/dig v1.19.0
go.uber.org/fx v1.24.0
go.uber.org/zap v1.27.0
- golang.org/x/crypto v0.41.0
- golang.org/x/exp v0.0.0-20250813145105-42675adae3e6
- golang.org/x/mod v0.27.0
- golang.org/x/sync v0.16.0
- golang.org/x/sys v0.35.0
- google.golang.org/protobuf v1.36.7
+ golang.org/x/crypto v0.42.0
+ golang.org/x/exp v0.0.0-20250911091902-df9299821621
+ golang.org/x/mod v0.28.0
+ golang.org/x/sync v0.17.0
+ golang.org/x/sys v0.36.0
+ google.golang.org/protobuf v1.36.9
)
require (
@@ -102,7 +104,7 @@ require (
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/caddyserver/zerossl v0.1.3 // indirect
- github.com/cenkalti/backoff/v5 v5.0.2 // indirect
+ github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect
@@ -124,11 +126,11 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
- github.com/gabriel-vasile/mimetype v1.4.9 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.1.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
- github.com/go-jose/go-jose/v4 v4.0.5 // indirect
+ github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
@@ -140,13 +142,14 @@ require (
github.com/google/gopacket v1.1.19 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/guillaumemichel/reservedpool v0.3.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
- github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
+ github.com/ipfs/go-dsqueue v0.0.5 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
@@ -183,7 +186,7 @@ require (
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
- github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/multiformats/go-varint v0.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/onsi/gomega v1.36.3 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
@@ -210,15 +213,16 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
+ github.com/probe-lab/go-libdht v0.2.1 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
- github.com/prometheus/common v0.65.0 // indirect
+ github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/prometheus/statsd_exporter v0.27.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
- github.com/rogpeppe/go-internal v1.13.1 // indirect
+ github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/rs/cors v1.11.1 // indirect
github.com/slok/go-http-metrics v0.13.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
@@ -233,33 +237,35 @@ require (
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/propagators/aws v1.21.1 // indirect
go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect
go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 // indirect
go.opentelemetry.io/contrib/propagators/ot v1.21.1 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect
- go.opentelemetry.io/otel/exporters/zipkin v1.37.0 // indirect
- go.opentelemetry.io/otel/metric v1.37.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect
+ go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/oauth2 v0.30.0 // indirect
- golang.org/x/term v0.34.0 // indirect
- golang.org/x/text v0.28.0 // indirect
+ golang.org/x/net v0.44.0 // indirect
+ golang.org/x/oauth2 v0.31.0 // indirect
+ golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 // indirect
+ golang.org/x/term v0.35.0 // indirect
+ golang.org/x/text v0.29.0 // indirect
golang.org/x/time v0.12.0 // indirect
- golang.org/x/tools v0.36.0 // indirect
+ golang.org/x/tools v0.37.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gonum.org/v1/gonum v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
- google.golang.org/grpc v1.73.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
+ google.golang.org/grpc v1.75.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
diff --git a/go.sum b/go.sum
index a5c243b03d4..5dd61e2dc8d 100644
--- a/go.sum
+++ b/go.sum
@@ -94,8 +94,8 @@ github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+Y
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
-github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/ceramicnetwork/go-dag-jose v0.1.1 h1:7pObs22egc14vSS3AfCFfS1VmaL4lQUsAK7OGC3PlKk=
github.com/ceramicnetwork/go-dag-jose v0.1.1/go.mod h1:8ptnYwY2Z2y/s5oJnNBn/UCxLg6CpramNJ2ZXF/5aNY=
@@ -197,8 +197,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
-github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
+github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
+github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
@@ -216,8 +216,8 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
-github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
+github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
+github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -329,8 +329,10 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
+github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw=
+github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
@@ -354,13 +356,13 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
-github.com/ipfs/boxo v0.34.0 h1:pMP9bAsTs4xVh8R0ZmxIWviV7kjDa60U24QrlGgHb1g=
-github.com/ipfs/boxo v0.34.0/go.mod h1:kzdH/ewDybtO3+M8MCVkpwnIIc/d2VISX95DFrY4vQA=
+github.com/ipfs/boxo v0.35.0 h1:3Mku5arSbAZz0dvb4goXRsQuZkFkPrGr5yYdu0YM1pY=
+github.com/ipfs/boxo v0.35.0/go.mod h1:uhaF0DGnbgEiXDTmD249jCGbxVkMm6+Ew85q6Uub7lo=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
-github.com/ipfs/go-block-format v0.2.2 h1:uecCTgRwDIXyZPgYspaLXoMiMmxQpSx2aq34eNc4YvQ=
-github.com/ipfs/go-block-format v0.2.2/go.mod h1:vmuefuWU6b+9kIU0vZJgpiJt1yicQz9baHXE8qR+KB8=
+github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
+github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
@@ -370,8 +372,8 @@ github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q
github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA=
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
-github.com/ipfs/go-datastore v0.8.3 h1:z391GsQyGKUIUof2tPoaZVeDknbt7fNHs6Gqjcw5Jo4=
-github.com/ipfs/go-datastore v0.8.3/go.mod h1:raxQ/CreIy9L6MxT71ItfMX12/ASN6EhXJoUFjICQ2M=
+github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
+github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
@@ -386,6 +388,8 @@ github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICk
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.1 h1:p0FAE0zw9J/3T1VkGB9s98jWmfKmw2t0iEwfMUv8iSQ=
github.com/ipfs/go-ds-pebble v0.5.1/go.mod h1:LsmQx4w+0o9znl4hTxYo1Y2lnBTzNCwc4kNpD3wWXM0=
+github.com/ipfs/go-dsqueue v0.0.5 h1:TUOk15TlCJ/NKV8Yk2W5wgkEjDa44Nem7a7FGIjsMNU=
+github.com/ipfs/go-dsqueue v0.0.5/go.mod h1:i/jAlpZjBbQJLioN+XKbFgnd+u9eAhGZs9IrqIzTd9g=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ=
@@ -405,8 +409,8 @@ github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyB
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E=
github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A=
-github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU=
-github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk=
+github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8=
+github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk=
github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y=
github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI=
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
@@ -424,10 +428,10 @@ github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8
github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
-github.com/ipfs/go-unixfsnode v1.10.1 h1:hGKhzuH6NSzZ4y621wGuDspkjXRNG3B+HqhlyTjSwSM=
-github.com/ipfs/go-unixfsnode v1.10.1/go.mod h1:eguv/otvacjmfSbYvmamc9ssNAzLvRk0+YN30EYeOOY=
-github.com/ipld/go-car/v2 v2.14.3 h1:1Mhl82/ny8MVP+w1M4LXbj4j99oK3gnuZG2GmG1IhC8=
-github.com/ipld/go-car/v2 v2.14.3/go.mod h1:/vpSvPngOX8UnvmdFJ3o/mDgXa9LuyXsn7wxOzHDYQE=
+github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8=
+github.com/ipfs/go-unixfsnode v1.10.2/go.mod h1:ImDPTSiKZ+2h4UVdkSDITJHk87bUAp7kX/lgifjRicg=
+github.com/ipld/go-car/v2 v2.15.0 h1:RxtZcGXFx72zFESl+UUsCNQV2YMcy3gEMYx9M3uio24=
+github.com/ipld/go-car/v2 v2.15.0/go.mod h1:ovlq/n3xlVJDmoiN3Kd/Z7kIzQbdTIFSwltfOP+qIgk=
github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0=
github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM=
github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8=
@@ -512,11 +516,11 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk
github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA=
github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc=
github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg=
-github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0=
-github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o=
+github.com/libp2p/go-libp2p-kad-dht v0.35.0 h1:pWRC4FKR9ptQjA9DuMSrAn2D3vABE8r58iAeoLoK1Ig=
+github.com/libp2p/go-libp2p-kad-dht v0.35.0/go.mod h1:s70f017NjhsBx+SVl0/w+x//uyglrFpKLfvuQJj4QAU=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
-github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs=
-github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g=
+github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
+github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
@@ -635,8 +639,8 @@ github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuV
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
-github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
-github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI=
+github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -726,6 +730,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
+github.com/probe-lab/go-libdht v0.2.1 h1:oBCsKBvS/OVirTO5+BT6/AOocWjdqwpfSfkTfBjUPJE=
+github.com/probe-lab/go-libdht v0.2.1/go.mod h1:q+WlGiqs/UIRfdhw9Gmc+fPoAYlOim7VvXTjOI6KJmQ=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@@ -734,8 +740,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
-github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -749,8 +755,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
-github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -774,8 +780,8 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
@@ -848,8 +854,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
@@ -922,10 +928,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 h1:cXTYcMjY0dsYokAuo8LbNBQxpF8VgTHdiHJJ1zlIXl4=
go.opentelemetry.io/contrib/propagators/autoprop v0.46.1/go.mod h1:WZxgny1/6+j67B1s72PLJ4bGjidoWFzSmLNfJKVt2bo=
go.opentelemetry.io/contrib/propagators/aws v1.21.1 h1:uQIQIDWb0gzyvon2ICnghpLAf9w7ADOCUiIiwCQgR2o=
@@ -936,28 +942,30 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 h1:f4beMGDKiVzg9IcX7/VuWV
go.opentelemetry.io/contrib/propagators/jaeger v1.21.1/go.mod h1:U9jhkEl8d1LL+QXY7q3kneJWJugiN3kZJV2OWz3hkBY=
go.opentelemetry.io/contrib/propagators/ot v1.21.1 h1:3TN5vkXjKYWp0YdMcnUEC/A+pBPvqz9V3nCS2xmcurk=
go.opentelemetry.io/contrib/propagators/ot v1.21.1/go.mod h1:oy0MYCbS/b3cqUDW37wBWtlwBIsutngS++Lklpgh+fc=
-go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
-go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4=
-go.opentelemetry.io/otel/exporters/zipkin v1.37.0 h1:Z2apuaRnHEjzDAkpbWNPiksz1R0/FCIrJSjiMA43zwI=
-go.opentelemetry.io/otel/exporters/zipkin v1.37.0/go.mod h1:ofGu/7fG+bpmjZoiPUUmYDJ4vXWxMT57HmGoegx49uw=
-go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
-go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
-go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
-go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
-go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
-go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
-go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
-go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
-go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
-go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
+go.opentelemetry.io/otel/exporters/prometheus v0.56.0 h1:GnCIi0QyG0yy2MrJLzVrIM7laaJstj//flf1zEJCG+E=
+go.opentelemetry.io/otel/exporters/prometheus v0.56.0/go.mod h1:JQcVZtbIIPM+7SWBB+T6FK+xunlyidwLp++fN0sUaOk=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
+go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
+go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
+go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
+go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
+go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
+go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
@@ -974,6 +982,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
@@ -997,8 +1007,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
-golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
-golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
+golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1009,8 +1019,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE=
-golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
+golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
+golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1034,8 +1044,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
-golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
+golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1087,8 +1097,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
+golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1098,8 +1108,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
-golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo=
+golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1115,8 +1125,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
-golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1184,8 +1194,10 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
-golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
+golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8=
+golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -1194,8 +1206,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
-golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
-golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
+golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
+golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1207,8 +1219,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
+golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1266,8 +1278,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
-golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
+golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
+golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1337,10 +1349,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@@ -1357,8 +1369,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
-google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
+google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
+google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1373,8 +1385,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
-google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
+google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/mk/golang.mk b/mk/golang.mk
index 5f691bc8543..4f4cd4fed34 100644
--- a/mk/golang.mk
+++ b/mk/golang.mk
@@ -42,15 +42,14 @@ define go-build
$(GOCC) build $(go-flags-with-tags) -o "$@" "$(1)"
endef
-define go-try-build
-$(GOCC) build $(go-flags-with-tags) -o /dev/null "$(call go-pkg-name,$<)"
-endef
-
test_go_test: $$(DEPS_GO)
$(GOCC) test $(go-flags-with-tags) $(GOTFLAGS) ./...
.PHONY: test_go_test
-test_go_build: $$(TEST_GO_BUILD)
+# Build all platforms from .github/build-platforms.yml
+test_go_build:
+ bin/test-go-build-platforms
+.PHONY: test_go_build
test_go_short: GOTFLAGS += -test.short
test_go_short: test_go_test
diff --git a/mk/util.mk b/mk/util.mk
index 2ce48583f56..3eb9f76d075 100644
--- a/mk/util.mk
+++ b/mk/util.mk
@@ -9,26 +9,9 @@ else
PATH_SEP :=:
endif
-SUPPORTED_PLATFORMS += windows-386
-SUPPORTED_PLATFORMS += windows-amd64
-
-SUPPORTED_PLATFORMS += linux-arm
-SUPPORTED_PLATFORMS += linux-arm64
-SUPPORTED_PLATFORMS += linux-386
-SUPPORTED_PLATFORMS += linux-amd64
-
-SUPPORTED_PLATFORMS += darwin-amd64
-ifeq ($(shell bin/check_go_version "1.16.0" 2>/dev/null; echo $$?),0)
-SUPPORTED_PLATFORMS += darwin-arm64
-endif
-SUPPORTED_PLATFORMS += freebsd-386
-SUPPORTED_PLATFORMS += freebsd-amd64
-
-SUPPORTED_PLATFORMS += openbsd-386
-SUPPORTED_PLATFORMS += openbsd-amd64
-
-SUPPORTED_PLATFORMS += netbsd-386
-SUPPORTED_PLATFORMS += netbsd-amd64
+# Platforms are now defined in .github/build-platforms.yml
+# The cmd/ipfs-try-build target is deprecated in favor of GitHub Actions
+# Use 'make supported' to see the list of platforms
space:=$() $()
comma:=,
diff --git a/plugin/loader/load_nocgo.go b/plugin/loader/load_nocgo.go
index 9de31a9eb69..3e0f393377d 100644
--- a/plugin/loader/load_nocgo.go
+++ b/plugin/loader/load_nocgo.go
@@ -1,7 +1,4 @@
//go:build !cgo && !noplugin && (linux || darwin || freebsd)
-// +build !cgo
-// +build !noplugin
-// +build linux darwin freebsd
package loader
diff --git a/plugin/loader/load_noplugin.go b/plugin/loader/load_noplugin.go
index fc56b16a073..dddeac91d74 100644
--- a/plugin/loader/load_noplugin.go
+++ b/plugin/loader/load_noplugin.go
@@ -1,5 +1,4 @@
//go:build noplugin
-// +build noplugin
package loader
diff --git a/plugin/loader/load_unix.go b/plugin/loader/load_unix.go
index 4a5dccb40a7..05af3019719 100644
--- a/plugin/loader/load_unix.go
+++ b/plugin/loader/load_unix.go
@@ -1,7 +1,4 @@
//go:build cgo && !noplugin && (linux || darwin || freebsd)
-// +build cgo
-// +build !noplugin
-// +build linux darwin freebsd
package loader
diff --git a/plugin/plugins/telemetry/telemetry.go b/plugin/plugins/telemetry/telemetry.go
index bcb6c03e972..f96fc0805cf 100644
--- a/plugin/plugins/telemetry/telemetry.go
+++ b/plugin/plugins/telemetry/telemetry.go
@@ -9,7 +9,9 @@ import (
"os"
"path"
"runtime"
+ "slices"
"strings"
+ "sync"
"time"
"github.com/google/uuid"
@@ -27,6 +29,14 @@ import (
var log = logging.Logger("telemetry")
+// Caching for virtualization detection - these values never change during process lifetime
+var (
+ containerDetectionOnce sync.Once
+ vmDetectionOnce sync.Once
+ isContainerCached bool
+ isVMCached bool
+)
+
const (
modeEnvVar = "IPFS_TELEMETRY"
uuidFilename = "telemetry_uuid"
@@ -397,7 +407,7 @@ func (p *telemetryPlugin) collectBasicInfo() {
}
p.event.UptimeBucket = uptimeBucket
- p.event.ReproviderStrategy = p.config.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy)
+ p.event.ReproviderStrategy = p.config.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
}
func (p *telemetryPlugin) collectRoutingInfo() {
@@ -476,45 +486,135 @@ func (p *telemetryPlugin) collectPlatformInfo() {
}
func isRunningInContainer() bool {
- // Check for Docker container
+ containerDetectionOnce.Do(func() {
+ isContainerCached = detectContainer()
+ })
+ return isContainerCached
+}
+
+func detectContainer() bool {
+ // Docker creates /.dockerenv inside containers
if _, err := os.Stat("/.dockerenv"); err == nil {
return true
}
- // Check cgroup for container
- content, err := os.ReadFile("/proc/self/cgroup")
- if err == nil {
- if strings.Contains(string(content), "docker") || strings.Contains(string(content), "lxc") || strings.Contains(string(content), "/kubepods") {
- return true
- }
+ // Kubernetes mounts service account tokens inside pods
+ if _, err := os.Stat("/var/run/secrets/kubernetes.io"); err == nil {
+ return true
}
- content, err = os.ReadFile("/proc/self/mountinfo")
- if err == nil {
+ // systemd-nspawn creates this file inside containers
+ if _, err := os.Stat("/run/systemd/container"); err == nil {
+ return true
+ }
+
+ // Check if our process is running inside a container cgroup
+ // Look for container-specific patterns in the cgroup path after "::/"
+ if content, err := os.ReadFile("/proc/self/cgroup"); err == nil {
for line := range strings.Lines(string(content)) {
- if strings.Contains(line, "overlay") && strings.Contains(line, "/var/lib/containers/storage/overlay") {
+ // cgroup lines format: "ID:subsystem:/path"
+ // We want to check the path part after the last ":"
+ parts := strings.SplitN(line, ":", 3)
+ if len(parts) == 3 {
+ cgroupPath := parts[2]
+ // Check for container-specific paths
+ containerIndicators := []string{
+ "/docker/", // Docker containers
+ "/containerd/", // containerd runtime
+ "/cri-o/", // CRI-O runtime
+ "/lxc/", // LXC containers
+ "/podman/", // Podman containers
+ "/kubepods/", // Kubernetes pods
+ }
+ for _, indicator := range containerIndicators {
+ if strings.Contains(cgroupPath, indicator) {
+ return true
+ }
+ }
+ }
+ }
+ }
+
+ // WSL is technically a container-like environment
+ if runtime.GOOS == "linux" {
+ if content, err := os.ReadFile("/proc/sys/kernel/osrelease"); err == nil {
+ osrelease := strings.ToLower(string(content))
+ if strings.Contains(osrelease, "microsoft") || strings.Contains(osrelease, "wsl") {
return true
}
}
}
- // Also check for systemd-nspawn
- if _, err := os.Stat("/run/systemd/container"); err == nil {
- return true
+ // LXC sets container environment variable
+ if content, err := os.ReadFile("/proc/1/environ"); err == nil {
+ if strings.Contains(string(content), "container=lxc") {
+ return true
+ }
+ }
+
+ // Additional check: In containers, PID 1 is often not systemd/init
+ if content, err := os.ReadFile("/proc/1/comm"); err == nil {
+ pid1 := strings.TrimSpace(string(content))
+ // Common container init processes
+ containerInits := []string{"tini", "dumb-init", "s6-svscan", "runit"}
+ if slices.Contains(containerInits, pid1) {
+ return true
+ }
}
return false
}
func isRunningInVM() bool {
- // Check for VM
- if _, err := os.Stat("/sys/hypervisor/uuid"); err == nil {
- return true
+ vmDetectionOnce.Do(func() {
+ isVMCached = detectVM()
+ })
+ return isVMCached
+}
+
+func detectVM() bool {
+ // Check for VM-specific files and drivers that only exist inside VMs
+ vmIndicators := []string{
+ "/proc/xen", // Xen hypervisor guest
+ "/sys/hypervisor/uuid", // KVM/Xen hypervisor guest
+ "/dev/vboxguest", // VirtualBox guest additions
+ "/sys/module/vmw_balloon", // VMware balloon driver (guest only)
+ "/sys/module/hv_vmbus", // Hyper-V VM bus driver (guest only)
}
- // Check for other VM indicators
- if _, err := os.Stat("/dev/virt-0"); err == nil {
- return true
+ for _, path := range vmIndicators {
+ if _, err := os.Stat(path); err == nil {
+ return true
+ }
+ }
+
+ // Check DMI for VM vendors - these strings only appear inside VMs
+ // DMI (Desktop Management Interface) is populated by the hypervisor
+ dmiFiles := map[string][]string{
+ "/sys/class/dmi/id/sys_vendor": {
+ "qemu", "kvm", "vmware", "virtualbox", "xen",
+ "parallels", // Parallels Desktop
+ // Note: Removed "microsoft corporation" as it can match Surface devices
+ },
+ "/sys/class/dmi/id/product_name": {
+ "virtualbox", "vmware", "kvm", "qemu",
+ "hvm domu", // Xen HVM guest
+ // Note: Removed generic "virtual machine" to avoid false positives
+ },
+ "/sys/class/dmi/id/chassis_vendor": {
+ "qemu", "oracle", // Oracle for VirtualBox
+ },
+ }
+
+ for path, signatures := range dmiFiles {
+ if content, err := os.ReadFile(path); err == nil {
+ contentStr := strings.ToLower(strings.TrimSpace(string(content)))
+ for _, sig := range signatures {
+ if strings.Contains(contentStr, sig) {
+ return true
+ }
+ }
+ }
}
return false
diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go
index 671621ef39b..718d5614d82 100644
--- a/repo/fsrepo/fsrepo.go
+++ b/repo/fsrepo/fsrepo.go
@@ -393,6 +393,7 @@ func (r *FSRepo) SetAPIAddr(addr ma.Multiaddr) error {
}
if _, err = f.WriteString(addr.String()); err != nil {
+ f.Close()
return err
}
if err = f.Close(); err != nil {
diff --git a/repo/fsrepo/migrations/common/base.go b/repo/fsrepo/migrations/common/base.go
new file mode 100644
index 00000000000..9b9ef635d32
--- /dev/null
+++ b/repo/fsrepo/migrations/common/base.go
@@ -0,0 +1,97 @@
+package common
+
+import (
+ "fmt"
+ "io"
+ "path/filepath"
+)
+
+// BaseMigration provides common functionality for migrations
+type BaseMigration struct {
+ FromVersion string
+ ToVersion string
+ Description string
+ Convert func(in io.ReadSeeker, out io.Writer) error
+}
+
+// Versions returns the version string for this migration
+func (m *BaseMigration) Versions() string {
+ return fmt.Sprintf("%s-to-%s", m.FromVersion, m.ToVersion)
+}
+
+// configBackupSuffix returns the backup suffix for the config file
+// e.g. ".16-to-17.bak" results in "config.16-to-17.bak"
+func (m *BaseMigration) configBackupSuffix() string {
+ return fmt.Sprintf(".%s-to-%s.bak", m.FromVersion, m.ToVersion)
+}
+
+// Reversible returns true as we keep backups
+func (m *BaseMigration) Reversible() bool {
+ return true
+}
+
+// Apply performs the migration
+func (m *BaseMigration) Apply(opts Options) error {
+ if opts.Verbose {
+ fmt.Printf("applying %s repo migration\n", m.Versions())
+ if m.Description != "" {
+ fmt.Printf("> %s\n", m.Description)
+ }
+ }
+
+ // Check version
+ if err := CheckVersion(opts.Path, m.FromVersion); err != nil {
+ return err
+ }
+
+ configPath := filepath.Join(opts.Path, "config")
+
+ // Perform migration with backup
+ if err := WithBackup(configPath, m.configBackupSuffix(), m.Convert); err != nil {
+ return err
+ }
+
+ // Update version
+ if err := WriteVersion(opts.Path, m.ToVersion); err != nil {
+ if opts.Verbose {
+ fmt.Printf("failed to update version file to %s\n", m.ToVersion)
+ }
+ return err
+ }
+
+ if opts.Verbose {
+ fmt.Println("updated version file")
+ fmt.Printf("Migration %s succeeded\n", m.Versions())
+ }
+
+ return nil
+}
+
+// Revert reverts the migration
+func (m *BaseMigration) Revert(opts Options) error {
+ if opts.Verbose {
+ fmt.Println("reverting migration")
+ }
+
+ // Check we're at the expected version
+ if err := CheckVersion(opts.Path, m.ToVersion); err != nil {
+ return err
+ }
+
+ // Restore backup
+ configPath := filepath.Join(opts.Path, "config")
+ if err := RevertBackup(configPath, m.configBackupSuffix()); err != nil {
+ return err
+ }
+
+ // Revert version
+ if err := WriteVersion(opts.Path, m.FromVersion); err != nil {
+ return err
+ }
+
+ if opts.Verbose {
+ fmt.Printf("lowered version number to %s\n", m.FromVersion)
+ }
+
+ return nil
+}
diff --git a/repo/fsrepo/migrations/common/config_helpers.go b/repo/fsrepo/migrations/common/config_helpers.go
new file mode 100644
index 00000000000..22b99f84d73
--- /dev/null
+++ b/repo/fsrepo/migrations/common/config_helpers.go
@@ -0,0 +1,353 @@
+package common
+
+import (
+ "fmt"
+ "maps"
+ "slices"
+ "strings"
+)
+
+// GetField retrieves a field from a nested config structure using a dot-separated path
+// Example: GetField(config, "DNS.Resolvers") returns config["DNS"]["Resolvers"]
+func GetField(config map[string]any, path string) (any, bool) {
+ parts := strings.Split(path, ".")
+ current := config
+
+ for i, part := range parts {
+ // Last part - return the value
+ if i == len(parts)-1 {
+ val, exists := current[part]
+ return val, exists
+ }
+
+ // Navigate deeper
+ next, exists := current[part]
+ if !exists {
+ return nil, false
+ }
+
+ // Ensure it's a map
+ nextMap, ok := next.(map[string]any)
+ if !ok {
+ return nil, false
+ }
+ current = nextMap
+ }
+
+ return nil, false
+}
+
+// SetField sets a field in a nested config structure using a dot-separated path
+// It creates intermediate maps as needed
+func SetField(config map[string]any, path string, value any) {
+ parts := strings.Split(path, ".")
+ current := config
+
+ for i, part := range parts {
+ // Last part - set the value
+ if i == len(parts)-1 {
+ current[part] = value
+ return
+ }
+
+ // Navigate or create intermediate maps
+ next, exists := current[part]
+ if !exists {
+ // Create new intermediate map
+ newMap := make(map[string]any)
+ current[part] = newMap
+ current = newMap
+ } else {
+ // Ensure it's a map
+ nextMap, ok := next.(map[string]any)
+ if !ok {
+ // Can't navigate further, replace with new map
+ newMap := make(map[string]any)
+ current[part] = newMap
+ current = newMap
+ } else {
+ current = nextMap
+ }
+ }
+ }
+}
+
+// DeleteField removes a field from a nested config structure
+func DeleteField(config map[string]any, path string) bool {
+ parts := strings.Split(path, ".")
+
+ // Handle simple case
+ if len(parts) == 1 {
+ _, exists := config[parts[0]]
+ delete(config, parts[0])
+ return exists
+ }
+
+ // Navigate to parent
+ parentPath := strings.Join(parts[:len(parts)-1], ".")
+ parent, exists := GetField(config, parentPath)
+ if !exists {
+ return false
+ }
+
+ parentMap, ok := parent.(map[string]any)
+ if !ok {
+ return false
+ }
+
+ fieldName := parts[len(parts)-1]
+ _, exists = parentMap[fieldName]
+ delete(parentMap, fieldName)
+ return exists
+}
+
+// MoveField moves a field from one location to another
+func MoveField(config map[string]any, from, to string) error {
+ value, exists := GetField(config, from)
+ if !exists {
+ return fmt.Errorf("source field %s does not exist", from)
+ }
+
+ SetField(config, to, value)
+ DeleteField(config, from)
+ return nil
+}
+
+// RenameField renames a field within the same parent
+func RenameField(config map[string]any, path, oldName, newName string) error {
+ var parent map[string]any
+ if path == "" {
+ parent = config
+ } else {
+ p, exists := GetField(config, path)
+ if !exists {
+ return fmt.Errorf("parent path %s does not exist", path)
+ }
+ var ok bool
+ parent, ok = p.(map[string]any)
+ if !ok {
+ return fmt.Errorf("parent path %s is not a map", path)
+ }
+ }
+
+ value, exists := parent[oldName]
+ if !exists {
+ return fmt.Errorf("field %s does not exist", oldName)
+ }
+
+ parent[newName] = value
+ delete(parent, oldName)
+ return nil
+}
+
+// SetDefault sets a field value only if it doesn't already exist
+func SetDefault(config map[string]any, path string, value any) {
+ if _, exists := GetField(config, path); !exists {
+ SetField(config, path, value)
+ }
+}
+
+// TransformField applies a transformation function to a field value
+func TransformField(config map[string]any, path string, transformer func(any) any) error {
+ value, exists := GetField(config, path)
+ if !exists {
+ return fmt.Errorf("field %s does not exist", path)
+ }
+
+ newValue := transformer(value)
+ SetField(config, path, newValue)
+ return nil
+}
+
+// EnsureFieldIs checks if a field equals expected value, sets it if missing
+func EnsureFieldIs(config map[string]any, path string, expected any) {
+ current, exists := GetField(config, path)
+ if !exists || current != expected {
+ SetField(config, path, expected)
+ }
+}
+
+// MergeInto merges multiple source fields into a destination map
+func MergeInto(config map[string]any, destination string, sources ...string) {
+ var destMap map[string]any
+
+ // Get existing destination if it exists
+ if existing, exists := GetField(config, destination); exists {
+ if m, ok := existing.(map[string]any); ok {
+ destMap = m
+ }
+ }
+
+ // Merge each source
+ for _, source := range sources {
+ if value, exists := GetField(config, source); exists {
+ if sourceMap, ok := value.(map[string]any); ok {
+ if destMap == nil {
+ destMap = make(map[string]any)
+ }
+ maps.Copy(destMap, sourceMap)
+ }
+ }
+ }
+
+ if destMap != nil {
+ SetField(config, destination, destMap)
+ }
+}
+
+// CopyField copies a field value to a new location (keeps original)
+func CopyField(config map[string]any, from, to string) error {
+ value, exists := GetField(config, from)
+ if !exists {
+ return fmt.Errorf("source field %s does not exist", from)
+ }
+
+ SetField(config, to, value)
+ return nil
+}
+
+// ConvertInterfaceSlice converts []interface{} to []string
+func ConvertInterfaceSlice(slice []interface{}) []string {
+ result := make([]string, 0, len(slice))
+ for _, item := range slice {
+ if str, ok := item.(string); ok {
+ result = append(result, str)
+ }
+ }
+ return result
+}
+
+// GetOrCreateSection gets or creates a map section in config
+func GetOrCreateSection(config map[string]any, path string) map[string]any {
+ existing, exists := GetField(config, path)
+ if exists {
+ if section, ok := existing.(map[string]any); ok {
+ return section
+ }
+ }
+
+ // Create new section
+ section := make(map[string]any)
+ SetField(config, path, section)
+ return section
+}
+
+// SafeCastMap safely casts to map[string]any with fallback to empty map
+func SafeCastMap(value any) map[string]any {
+ if m, ok := value.(map[string]any); ok {
+ return m
+ }
+ return make(map[string]any)
+}
+
+// SafeCastSlice safely casts to []interface{} with fallback to empty slice
+func SafeCastSlice(value any) []interface{} {
+ if s, ok := value.([]interface{}); ok {
+ return s
+ }
+ return []interface{}{}
+}
+
+// ReplaceDefaultsWithAuto replaces default values with "auto" in a map
+func ReplaceDefaultsWithAuto(values map[string]any, defaults map[string]string) map[string]string {
+ result := make(map[string]string)
+ for k, v := range values {
+ if vStr, ok := v.(string); ok {
+ if replacement, isDefault := defaults[vStr]; isDefault {
+ result[k] = replacement
+ } else {
+ result[k] = vStr
+ }
+ }
+ }
+ return result
+}
+
+// EnsureSliceContains ensures a slice field contains a value
+func EnsureSliceContains(config map[string]any, path string, value string) {
+ existing, exists := GetField(config, path)
+ if !exists {
+ SetField(config, path, []string{value})
+ return
+ }
+
+ if slice, ok := existing.([]interface{}); ok {
+ // Check if value already exists
+ for _, item := range slice {
+ if str, ok := item.(string); ok && str == value {
+ return // Already contains value
+ }
+ }
+ // Add value
+ SetField(config, path, append(slice, value))
+ } else if strSlice, ok := existing.([]string); ok {
+ if !slices.Contains(strSlice, value) {
+ SetField(config, path, append(strSlice, value))
+ }
+ } else {
+ // Replace with new slice containing value
+ SetField(config, path, []string{value})
+ }
+}
+
+// ReplaceInSlice replaces old values with new in a slice field
+func ReplaceInSlice(config map[string]any, path string, oldValue, newValue string) {
+ existing, exists := GetField(config, path)
+ if !exists {
+ return
+ }
+
+ if slice, ok := existing.([]interface{}); ok {
+ result := make([]string, 0, len(slice))
+ for _, item := range slice {
+ if str, ok := item.(string); ok {
+ if str == oldValue {
+ result = append(result, newValue)
+ } else {
+ result = append(result, str)
+ }
+ }
+ }
+ SetField(config, path, result)
+ }
+}
+
+// GetMapSection gets a map section with error handling
+func GetMapSection(config map[string]any, path string) (map[string]any, error) {
+ value, exists := GetField(config, path)
+ if !exists {
+ return nil, fmt.Errorf("section %s does not exist", path)
+ }
+
+ section, ok := value.(map[string]any)
+ if !ok {
+ return nil, fmt.Errorf("section %s is not a map", path)
+ }
+
+ return section, nil
+}
+
+// CloneStringMap clones a map[string]any to map[string]string
+func CloneStringMap(m map[string]any) map[string]string {
+ result := make(map[string]string, len(m))
+ for k, v := range m {
+ if str, ok := v.(string); ok {
+ result[k] = str
+ }
+ }
+ return result
+}
+
+// IsEmptySlice checks if a value is an empty slice
+func IsEmptySlice(value any) bool {
+ if value == nil {
+ return true
+ }
+ if slice, ok := value.([]interface{}); ok {
+ return len(slice) == 0
+ }
+ if slice, ok := value.([]string); ok {
+ return len(slice) == 0
+ }
+ return false
+}
diff --git a/repo/fsrepo/migrations/common/migration.go b/repo/fsrepo/migrations/common/migration.go
new file mode 100644
index 00000000000..7d72cfea3f7
--- /dev/null
+++ b/repo/fsrepo/migrations/common/migration.go
@@ -0,0 +1,16 @@
+// Package common contains common types and interfaces for file system repository migrations
+package common
+
+// Options contains migration options for embedded migrations
+type Options struct {
+ Path string
+ Verbose bool
+}
+
+// Migration is the interface that all migrations must implement
+type Migration interface {
+ Versions() string
+ Apply(opts Options) error
+ Revert(opts Options) error
+ Reversible() bool
+}
diff --git a/repo/fsrepo/migrations/common/testing_helpers.go b/repo/fsrepo/migrations/common/testing_helpers.go
new file mode 100644
index 00000000000..5ed08e18ec7
--- /dev/null
+++ b/repo/fsrepo/migrations/common/testing_helpers.go
@@ -0,0 +1,290 @@
+package common
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "maps"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+// TestCase represents a single migration test case
+type TestCase struct {
+ Name string
+ InputConfig map[string]any
+ Assertions []ConfigAssertion
+}
+
+// ConfigAssertion represents an assertion about the migrated config
+type ConfigAssertion struct {
+ Path string
+ Expected any
+}
+
+// RunMigrationTest runs a migration test with the given test case
+func RunMigrationTest(t *testing.T, migration Migration, tc TestCase) {
+ t.Helper()
+
+ // Convert input to JSON
+ inputJSON, err := json.MarshalIndent(tc.InputConfig, "", " ")
+ if err != nil {
+ t.Fatalf("failed to marshal input config: %v", err)
+ }
+
+ // Run the migration's convert function
+ var output bytes.Buffer
+ if baseMig, ok := migration.(*BaseMigration); ok {
+ err = baseMig.Convert(bytes.NewReader(inputJSON), &output)
+ if err != nil {
+ t.Fatalf("migration failed: %v", err)
+ }
+ } else {
+ t.Skip("migration is not a BaseMigration")
+ }
+
+ // Parse output
+ var result map[string]any
+ err = json.Unmarshal(output.Bytes(), &result)
+ if err != nil {
+ t.Fatalf("failed to unmarshal output: %v", err)
+ }
+
+ // Run assertions
+ for _, assertion := range tc.Assertions {
+ AssertConfigField(t, result, assertion.Path, assertion.Expected)
+ }
+}
+
+// AssertConfigField asserts that a field in the config has the expected value
+func AssertConfigField(t *testing.T, config map[string]any, path string, expected any) {
+ t.Helper()
+
+ actual, exists := GetField(config, path)
+ if expected == nil {
+ if exists {
+ t.Errorf("expected field %s to not exist, but it has value: %v", path, actual)
+ }
+ return
+ }
+
+ if !exists {
+ t.Errorf("expected field %s to exist with value %v, but it doesn't exist", path, expected)
+ return
+ }
+
+ // Handle different types of comparisons
+ switch exp := expected.(type) {
+ case []string:
+ actualSlice, ok := actual.([]interface{})
+ if !ok {
+ t.Errorf("field %s: expected []string, got %T", path, actual)
+ return
+ }
+ if len(exp) != len(actualSlice) {
+ t.Errorf("field %s: expected slice of length %d, got %d", path, len(exp), len(actualSlice))
+ return
+ }
+ for i, expVal := range exp {
+ if actualSlice[i] != expVal {
+ t.Errorf("field %s[%d]: expected %v, got %v", path, i, expVal, actualSlice[i])
+ }
+ }
+ case map[string]string:
+ actualMap, ok := actual.(map[string]any)
+ if !ok {
+ t.Errorf("field %s: expected map, got %T", path, actual)
+ return
+ }
+ for k, v := range exp {
+ if actualMap[k] != v {
+ t.Errorf("field %s[%s]: expected %v, got %v", path, k, v, actualMap[k])
+ }
+ }
+ default:
+ if actual != expected {
+ t.Errorf("field %s: expected %v, got %v", path, expected, actual)
+ }
+ }
+}
+
+// GenerateTestConfig creates a basic test config with the given fields
+func GenerateTestConfig(fields map[string]any) map[string]any {
+ // Start with a minimal valid config
+ config := map[string]any{
+ "Identity": map[string]any{
+ "PeerID": "QmTest",
+ },
+ }
+
+ // Merge in the provided fields
+ maps.Copy(config, fields)
+
+ return config
+}
+
+// CreateTestRepo creates a temporary test repository with the given version and config
+func CreateTestRepo(t *testing.T, version int, config map[string]any) string {
+ t.Helper()
+
+ tempDir := t.TempDir()
+
+ // Write version file
+ versionPath := filepath.Join(tempDir, "version")
+ err := os.WriteFile(versionPath, []byte(fmt.Sprintf("%d", version)), 0644)
+ if err != nil {
+ t.Fatalf("failed to write version file: %v", err)
+ }
+
+ // Write config file
+ configPath := filepath.Join(tempDir, "config")
+ configData, err := json.MarshalIndent(config, "", " ")
+ if err != nil {
+ t.Fatalf("failed to marshal config: %v", err)
+ }
+ err = os.WriteFile(configPath, configData, 0644)
+ if err != nil {
+ t.Fatalf("failed to write config file: %v", err)
+ }
+
+ return tempDir
+}
+
+// AssertMigrationSuccess runs a full migration and checks that it succeeds
+func AssertMigrationSuccess(t *testing.T, migration Migration, fromVersion, toVersion int, inputConfig map[string]any) map[string]any {
+ t.Helper()
+
+ // Create test repo
+ repoPath := CreateTestRepo(t, fromVersion, inputConfig)
+
+ // Run migration
+ opts := Options{
+ Path: repoPath,
+ Verbose: false,
+ }
+
+ err := migration.Apply(opts)
+ if err != nil {
+ t.Fatalf("migration failed: %v", err)
+ }
+
+ // Check version was updated
+ versionBytes, err := os.ReadFile(filepath.Join(repoPath, "version"))
+ if err != nil {
+ t.Fatalf("failed to read version file: %v", err)
+ }
+ actualVersion := string(versionBytes)
+ if actualVersion != fmt.Sprintf("%d", toVersion) {
+ t.Errorf("expected version %d, got %s", toVersion, actualVersion)
+ }
+
+ // Read and return the migrated config
+ configBytes, err := os.ReadFile(filepath.Join(repoPath, "config"))
+ if err != nil {
+ t.Fatalf("failed to read config file: %v", err)
+ }
+
+ var result map[string]any
+ err = json.Unmarshal(configBytes, &result)
+ if err != nil {
+ t.Fatalf("failed to unmarshal config: %v", err)
+ }
+
+ return result
+}
+
+// AssertMigrationReversible checks that a migration can be reverted
+func AssertMigrationReversible(t *testing.T, migration Migration, fromVersion, toVersion int, inputConfig map[string]any) {
+ t.Helper()
+
+ // Create test repo at target version
+ repoPath := CreateTestRepo(t, toVersion, inputConfig)
+
+ // Create backup file (simulating a previous migration)
+ backupPath := filepath.Join(repoPath, fmt.Sprintf("config.%d-to-%d.bak", fromVersion, toVersion))
+ originalConfig, err := json.MarshalIndent(inputConfig, "", " ")
+ if err != nil {
+ t.Fatalf("failed to marshal original config: %v", err)
+ }
+
+ if err := os.WriteFile(backupPath, originalConfig, 0644); err != nil {
+ t.Fatalf("failed to write backup file: %v", err)
+ }
+
+ // Run revert
+ if err := migration.Revert(Options{Path: repoPath}); err != nil {
+ t.Fatalf("revert failed: %v", err)
+ }
+
+ // Verify version was reverted
+ versionBytes, err := os.ReadFile(filepath.Join(repoPath, "version"))
+ if err != nil {
+ t.Fatalf("failed to read version file: %v", err)
+ }
+
+ if actualVersion := string(versionBytes); actualVersion != fmt.Sprintf("%d", fromVersion) {
+ t.Errorf("expected version %d after revert, got %s", fromVersion, actualVersion)
+ }
+
+ // Verify config was reverted
+ configBytes, err := os.ReadFile(filepath.Join(repoPath, "config"))
+ if err != nil {
+ t.Fatalf("failed to read reverted config file: %v", err)
+ }
+
+ var revertedConfig map[string]any
+ if err := json.Unmarshal(configBytes, &revertedConfig); err != nil {
+ t.Fatalf("failed to unmarshal reverted config: %v", err)
+ }
+
+ // Compare reverted config with original
+ compareConfigs(t, inputConfig, revertedConfig, "")
+}
+
+// compareConfigs recursively compares two config maps and reports differences
+func compareConfigs(t *testing.T, expected, actual map[string]any, path string) {
+ t.Helper()
+
+ // Build current path helper
+ buildPath := func(key string) string {
+ if path == "" {
+ return key
+ }
+ return path + "." + key
+ }
+
+ // Check all expected fields exist and match
+ for key, expectedValue := range expected {
+ currentPath := buildPath(key)
+
+ actualValue, exists := actual[key]
+ if !exists {
+ t.Errorf("reverted config missing field %s", currentPath)
+ continue
+ }
+
+ switch exp := expectedValue.(type) {
+ case map[string]any:
+ act, ok := actualValue.(map[string]any)
+ if !ok {
+ t.Errorf("field %s: expected map, got %T", currentPath, actualValue)
+ continue
+ }
+ compareConfigs(t, exp, act, currentPath)
+ default:
+ if !reflect.DeepEqual(expectedValue, actualValue) {
+ t.Errorf("field %s: expected %v, got %v after revert",
+ currentPath, expectedValue, actualValue)
+ }
+ }
+ }
+
+ // Check for unexpected fields using maps.Keys (Go 1.23+)
+ for key := range actual {
+ if _, exists := expected[key]; !exists {
+ t.Errorf("reverted config has unexpected field %s", buildPath(key))
+ }
+ }
+}
diff --git a/repo/fsrepo/migrations/common/utils.go b/repo/fsrepo/migrations/common/utils.go
new file mode 100644
index 00000000000..217da609fcf
--- /dev/null
+++ b/repo/fsrepo/migrations/common/utils.go
@@ -0,0 +1,107 @@
+package common
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile"
+)
+
+// CheckVersion verifies the repo is at the expected version
+func CheckVersion(repoPath string, expectedVersion string) error {
+ versionPath := filepath.Join(repoPath, "version")
+ versionBytes, err := os.ReadFile(versionPath)
+ if err != nil {
+ return fmt.Errorf("could not read version file: %w", err)
+ }
+ version := strings.TrimSpace(string(versionBytes))
+ if version != expectedVersion {
+ return fmt.Errorf("expected version %s, got %s", expectedVersion, version)
+ }
+ return nil
+}
+
+// WriteVersion writes the version to the repo
+func WriteVersion(repoPath string, version string) error {
+ versionPath := filepath.Join(repoPath, "version")
+ return os.WriteFile(versionPath, []byte(version), 0644)
+}
+
+// Must panics if the error is not nil. Use only for errors that cannot be handled gracefully.
+func Must(err error) {
+ if err != nil {
+ panic(fmt.Errorf("error can't be dealt with transactionally: %w", err))
+ }
+}
+
+// WithBackup performs a config file operation with automatic backup and rollback on error
+func WithBackup(configPath string, backupSuffix string, fn func(in io.ReadSeeker, out io.Writer) error) error {
+ in, err := os.Open(configPath)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ // Create backup
+ backup, err := atomicfile.New(configPath+backupSuffix, 0600)
+ if err != nil {
+ return err
+ }
+
+ // Copy to backup
+ if _, err := backup.ReadFrom(in); err != nil {
+ Must(backup.Abort())
+ return err
+ }
+
+ // Reset input for reading
+ if _, err := in.Seek(0, io.SeekStart); err != nil {
+ Must(backup.Abort())
+ return err
+ }
+
+ // Create output file
+ out, err := atomicfile.New(configPath, 0600)
+ if err != nil {
+ Must(backup.Abort())
+ return err
+ }
+
+ // Run the conversion function
+ if err := fn(in, out); err != nil {
+ Must(out.Abort())
+ Must(backup.Abort())
+ return err
+ }
+
+ // Close everything on success
+ Must(out.Close())
+ Must(backup.Close())
+
+ return nil
+}
+
+// RevertBackup restores a backup file
+func RevertBackup(configPath string, backupSuffix string) error {
+ return os.Rename(configPath+backupSuffix, configPath)
+}
+
+// ReadConfig reads and unmarshals a JSON config file into a map
+func ReadConfig(r io.Reader) (map[string]any, error) {
+ confMap := make(map[string]any)
+ if err := json.NewDecoder(r).Decode(&confMap); err != nil {
+ return nil, err
+ }
+ return confMap, nil
+}
+
+// WriteConfig marshals and writes a config map as indented JSON
+func WriteConfig(w io.Writer, config map[string]any) error {
+ enc := json.NewEncoder(w)
+ enc.SetIndent("", " ")
+ return enc.Encode(config)
+}
diff --git a/repo/fsrepo/migrations/embedded.go b/repo/fsrepo/migrations/embedded.go
index 6c839ff1fa5..a2aa4d2523f 100644
--- a/repo/fsrepo/migrations/embedded.go
+++ b/repo/fsrepo/migrations/embedded.go
@@ -6,25 +6,30 @@ import (
"log"
"os"
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/common"
mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration"
+ mg17 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-17-to-18/migration"
)
-// EmbeddedMigration represents an embedded migration that can be run directly
-type EmbeddedMigration interface {
- Versions() string
- Apply(opts mg16.Options) error
- Revert(opts mg16.Options) error
- Reversible() bool
+// embeddedMigrations contains all embedded migrations
+// Using a slice to maintain order and allow for future range-based operations
+var embeddedMigrations = []common.Migration{
+ mg16.Migration,
+ mg17.Migration,
}
-// embeddedMigrations contains all embedded migrations
-var embeddedMigrations = map[string]EmbeddedMigration{
- "fs-repo-16-to-17": &mg16.Migration{},
+// migrationsByName provides quick lookup by name
+var migrationsByName = make(map[string]common.Migration)
+
+func init() {
+ for _, m := range embeddedMigrations {
+ migrationsByName["fs-repo-"+m.Versions()] = m
+ }
}
// RunEmbeddedMigration runs an embedded migration if available
func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir string, revert bool) error {
- migration, exists := embeddedMigrations[migrationName]
+ migration, exists := migrationsByName[migrationName]
if !exists {
return fmt.Errorf("embedded migration %s not found", migrationName)
}
@@ -36,7 +41,7 @@ func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir str
logger := log.New(os.Stdout, "", 0)
logger.Printf("Running embedded migration %s...", migrationName)
- opts := mg16.Options{
+ opts := common.Options{
Path: ipfsDir,
Verbose: true,
}
@@ -58,7 +63,7 @@ func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir str
// HasEmbeddedMigration checks if a migration is available as embedded
func HasEmbeddedMigration(migrationName string) bool {
- _, exists := embeddedMigrations[migrationName]
+ _, exists := migrationsByName[migrationName]
return exists
}
diff --git a/repo/fsrepo/migrations/fetch_test.go b/repo/fsrepo/migrations/fetch_test.go
index 6e87c966bbe..c09b3444a7b 100644
--- a/repo/fsrepo/migrations/fetch_test.go
+++ b/repo/fsrepo/migrations/fetch_test.go
@@ -20,10 +20,7 @@ func TestGetDistPath(t *testing.T) {
}
testDist := "/unit/test/dist"
- err := os.Setenv(envIpfsDistPath, testDist)
- if err != nil {
- panic(err)
- }
+ t.Setenv(envIpfsDistPath, testDist)
defer func() {
os.Unsetenv(envIpfsDistPath)
}()
@@ -139,18 +136,12 @@ func TestFetchBinary(t *testing.T) {
if err != nil {
panic(err)
}
- err = os.Setenv("TMPDIR", tmpDir)
- if err != nil {
- panic(err)
- }
+ t.Setenv("TMPDIR", tmpDir)
_, err = FetchBinary(ctx, fetcher, "go-ipfs", "v1.0.0", "ipfs", tmpDir)
if !os.IsPermission(err) {
t.Error("expected 'permission' error, got:", err)
}
- err = os.Setenv("TMPDIR", "/tmp")
- if err != nil {
- panic(err)
- }
+ t.Setenv("TMPDIR", "/tmp")
err = os.Chmod(tmpDir, 0o755)
if err != nil {
panic(err)
diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/main.go b/repo/fsrepo/migrations/fs-repo-16-to-17/main.go
index df0963f3bca..835b002fbaa 100644
--- a/repo/fsrepo/migrations/fs-repo-16-to-17/main.go
+++ b/repo/fsrepo/migrations/fs-repo-16-to-17/main.go
@@ -28,6 +28,7 @@ import (
"fmt"
"os"
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/common"
mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration"
)
@@ -43,17 +44,16 @@ func main() {
os.Exit(1)
}
- m := mg16.Migration{}
- opts := mg16.Options{
+ opts := common.Options{
Path: *path,
Verbose: *verbose,
}
var err error
if *revert {
- err = m.Revert(opts)
+ err = mg16.Migration.Revert(opts)
} else {
- err = m.Apply(opts)
+ err = mg16.Migration.Apply(opts)
}
if err != nil {
diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go
index 01cab8932c4..248423b2893 100644
--- a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go
+++ b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go
@@ -7,27 +7,13 @@
package mg16
import (
- "encoding/json"
- "fmt"
"io"
- "os"
- "path/filepath"
- "reflect"
"slices"
- "strings"
"github.com/ipfs/kubo/config"
- "github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile"
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/common"
)
-// Options contains migration options for embedded migrations
-type Options struct {
- Path string
- Verbose bool
-}
-
-const backupSuffix = ".16-to-17.bak"
-
// DefaultBootstrapAddresses are the hardcoded bootstrap addresses from Kubo 0.36
// for IPFS. they are nodes run by the IPFS team. docs on these later.
// As with all p2p networks, bootstrap is an important security concern.
@@ -42,148 +28,23 @@ var DefaultBootstrapAddresses = []string{
"/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
}
-// Migration implements the migration described above.
-type Migration struct{}
-
-// Versions returns the current version string for this migration.
-func (m Migration) Versions() string {
- return "16-to-17"
-}
-
-// Reversible returns true, as we keep old config around
-func (m Migration) Reversible() bool {
- return true
-}
-
-// Apply update the config.
-func (m Migration) Apply(opts Options) error {
- if opts.Verbose {
- fmt.Printf("applying %s repo migration\n", m.Versions())
- }
-
- // Check version
- if err := checkVersion(opts.Path, "16"); err != nil {
- return err
- }
-
- if opts.Verbose {
- fmt.Println("> Upgrading config to use AutoConf system")
- }
-
- path := filepath.Join(opts.Path, "config")
- in, err := os.Open(path)
- if err != nil {
- return err
- }
-
- // make backup
- backup, err := atomicfile.New(path+backupSuffix, 0600)
- if err != nil {
- return err
- }
- if _, err := backup.ReadFrom(in); err != nil {
- panicOnError(backup.Abort())
- return err
- }
- if _, err := in.Seek(0, io.SeekStart); err != nil {
- panicOnError(backup.Abort())
- return err
- }
-
- // Create a temp file to write the output to on success
- out, err := atomicfile.New(path, 0600)
- if err != nil {
- panicOnError(backup.Abort())
- panicOnError(in.Close())
- return err
- }
-
- if err := convert(in, out, opts.Path); err != nil {
- panicOnError(out.Abort())
- panicOnError(backup.Abort())
- panicOnError(in.Close())
- return err
- }
-
- if err := in.Close(); err != nil {
- panicOnError(out.Abort())
- panicOnError(backup.Abort())
- }
-
- if err := writeVersion(opts.Path, "17"); err != nil {
- fmt.Println("failed to update version file to 17")
- // There was an error so abort writing the output and clean up temp file
- panicOnError(out.Abort())
- panicOnError(backup.Abort())
- return err
- } else {
- // Write the output and clean up temp file
- panicOnError(out.Close())
- panicOnError(backup.Close())
- }
-
- if opts.Verbose {
- fmt.Println("updated version file")
- fmt.Println("Migration 16 to 17 succeeded")
- }
- return nil
-}
-
-// panicOnError is reserved for checks we can't solve transactionally if an error occurs
-func panicOnError(e error) {
- if e != nil {
- panic(fmt.Errorf("error can't be dealt with transactionally: %w", e))
- }
-}
-
-func (m Migration) Revert(opts Options) error {
- if opts.Verbose {
- fmt.Println("reverting migration")
- }
-
- if err := checkVersion(opts.Path, "17"); err != nil {
- return err
- }
-
- cfg := filepath.Join(opts.Path, "config")
- if err := os.Rename(cfg+backupSuffix, cfg); err != nil {
- return err
- }
-
- if err := writeVersion(opts.Path, "16"); err != nil {
- return err
- }
- if opts.Verbose {
- fmt.Println("lowered version number to 16")
- }
-
- return nil
-}
-
-// checkVersion verifies the repo is at the expected version
-func checkVersion(repoPath string, expectedVersion string) error {
- versionPath := filepath.Join(repoPath, "version")
- versionBytes, err := os.ReadFile(versionPath)
- if err != nil {
- return fmt.Errorf("could not read version file: %w", err)
- }
- version := strings.TrimSpace(string(versionBytes))
- if version != expectedVersion {
- return fmt.Errorf("expected version %s, got %s", expectedVersion, version)
- }
- return nil
+// Migration is the main exported migration for 16-to-17
+var Migration = &common.BaseMigration{
+ FromVersion: "16",
+ ToVersion: "17",
+ Description: "Upgrading config to use AutoConf system",
+ Convert: convert,
}
-// writeVersion writes the version to the repo
-func writeVersion(repoPath string, version string) error {
- versionPath := filepath.Join(repoPath, "version")
- return os.WriteFile(versionPath, []byte(version), 0644)
+// NewMigration creates a new migration instance (for compatibility)
+func NewMigration() common.Migration {
+ return Migration
}
// convert converts the config from version 16 to 17
-func convert(in io.Reader, out io.Writer, repoPath string) error {
- confMap := make(map[string]any)
- if err := json.NewDecoder(in).Decode(&confMap); err != nil {
+func convert(in io.ReadSeeker, out io.Writer) error {
+ confMap, err := common.ReadConfig(in)
+ if err != nil {
return err
}
@@ -193,7 +54,7 @@ func convert(in io.Reader, out io.Writer, repoPath string) error {
}
// Migrate Bootstrap peers
- if err := migrateBootstrap(confMap, repoPath); err != nil {
+ if err := migrateBootstrap(confMap); err != nil {
return err
}
@@ -213,88 +74,62 @@ func convert(in io.Reader, out io.Writer, repoPath string) error {
}
// Save new config
- fixed, err := json.MarshalIndent(confMap, "", " ")
- if err != nil {
- return err
- }
-
- if _, err := out.Write(fixed); err != nil {
- return err
- }
- _, err = out.Write([]byte("\n"))
- return err
+ return common.WriteConfig(out, confMap)
}
// enableAutoConf adds AutoConf section to config
func enableAutoConf(confMap map[string]any) error {
- // Check if AutoConf already exists
- if _, exists := confMap["AutoConf"]; exists {
- return nil
- }
-
- // Add empty AutoConf section - all fields will use implicit defaults:
+ // Add empty AutoConf section if it doesn't exist - all fields will use implicit defaults:
// - Enabled defaults to true (via DefaultAutoConfEnabled)
// - URL defaults to mainnet URL (via DefaultAutoConfURL)
// - RefreshInterval defaults to 24h (via DefaultAutoConfRefreshInterval)
// - TLSInsecureSkipVerify defaults to false (no WithDefault, but false is zero value)
- confMap["AutoConf"] = map[string]any{}
-
+ common.SetDefault(confMap, "AutoConf", map[string]any{})
return nil
}
// migrateBootstrap migrates bootstrap peers to use "auto"
-func migrateBootstrap(confMap map[string]any, repoPath string) error {
+func migrateBootstrap(confMap map[string]any) error {
bootstrap, exists := confMap["Bootstrap"]
if !exists {
// No bootstrap section, add "auto"
- confMap["Bootstrap"] = []string{"auto"}
+ confMap["Bootstrap"] = []string{config.AutoPlaceholder}
return nil
}
- bootstrapSlice, ok := bootstrap.([]interface{})
- if !ok {
+ // Convert to string slice using helper
+ bootstrapPeers := common.ConvertInterfaceSlice(common.SafeCastSlice(bootstrap))
+ if len(bootstrapPeers) == 0 && bootstrap != nil {
// Invalid bootstrap format, replace with "auto"
- confMap["Bootstrap"] = []string{"auto"}
+ confMap["Bootstrap"] = []string{config.AutoPlaceholder}
return nil
}
- // Convert to string slice
- var bootstrapPeers []string
- for _, peer := range bootstrapSlice {
- if peerStr, ok := peer.(string); ok {
- bootstrapPeers = append(bootstrapPeers, peerStr)
- }
- }
-
- // Check if we should replace with "auto"
- newBootstrap := processBootstrapPeers(bootstrapPeers, repoPath)
+ // Process bootstrap peers according to migration rules
+ newBootstrap := processBootstrapPeers(bootstrapPeers)
confMap["Bootstrap"] = newBootstrap
return nil
}
// processBootstrapPeers processes bootstrap peers according to migration rules
-func processBootstrapPeers(peers []string, repoPath string) []string {
+func processBootstrapPeers(peers []string) []string {
// If empty, use "auto"
if len(peers) == 0 {
- return []string{"auto"}
+ return []string{config.AutoPlaceholder}
}
- // Separate default peers from custom ones
- var customPeers []string
- var hasDefaultPeers bool
+ // Filter out default peers to get only custom ones
+ customPeers := slices.DeleteFunc(slices.Clone(peers), func(peer string) bool {
+ return slices.Contains(DefaultBootstrapAddresses, peer)
+ })
- for _, peer := range peers {
- if slices.Contains(DefaultBootstrapAddresses, peer) {
- hasDefaultPeers = true
- } else {
- customPeers = append(customPeers, peer)
- }
- }
+ // Check if any default peers were removed
+ hasDefaultPeers := len(customPeers) < len(peers)
// If we have default peers, replace them with "auto"
if hasDefaultPeers {
- return append([]string{"auto"}, customPeers...)
+ return append([]string{config.AutoPlaceholder}, customPeers...)
}
// No default peers found, keep as is
@@ -303,68 +138,25 @@ func processBootstrapPeers(peers []string, repoPath string) []string {
// migrateDNSResolvers migrates DNS resolvers to use "auto" for "." eTLD
func migrateDNSResolvers(confMap map[string]any) error {
- dnsSection, exists := confMap["DNS"]
- if !exists {
- // No DNS section, create it with "auto"
- confMap["DNS"] = map[string]any{
- "Resolvers": map[string]string{
- ".": config.AutoPlaceholder,
- },
- }
- return nil
- }
-
- dns, ok := dnsSection.(map[string]any)
- if !ok {
- // Invalid DNS format, replace with "auto"
- confMap["DNS"] = map[string]any{
- "Resolvers": map[string]string{
- ".": config.AutoPlaceholder,
- },
- }
- return nil
- }
+ // Get or create DNS section
+ dns := common.GetOrCreateSection(confMap, "DNS")
- resolvers, exists := dns["Resolvers"]
- if !exists {
- // No resolvers, add "auto"
- dns["Resolvers"] = map[string]string{
- ".": config.AutoPlaceholder,
- }
- return nil
- }
+ // Get existing resolvers or create empty map
+ resolvers := common.SafeCastMap(dns["Resolvers"])
- resolversMap, ok := resolvers.(map[string]any)
- if !ok {
- // Invalid resolvers format, replace with "auto"
- dns["Resolvers"] = map[string]string{
- ".": config.AutoPlaceholder,
- }
- return nil
- }
-
- // Convert to string map and replace default resolvers with "auto"
- stringResolvers := make(map[string]string)
+ // Define default resolvers that should be replaced with "auto"
defaultResolvers := map[string]string{
- "https://dns.eth.limo/dns-query": "auto",
- "https://dns.eth.link/dns-query": "auto",
- "https://resolver.cloudflare-eth.com/dns-query": "auto",
+ "https://dns.eth.limo/dns-query": config.AutoPlaceholder,
+ "https://dns.eth.link/dns-query": config.AutoPlaceholder,
+ "https://resolver.cloudflare-eth.com/dns-query": config.AutoPlaceholder,
}
- for k, v := range resolversMap {
- if vStr, ok := v.(string); ok {
- // Check if this is a default resolver that should be replaced
- if replacement, isDefault := defaultResolvers[vStr]; isDefault {
- stringResolvers[k] = replacement
- } else {
- stringResolvers[k] = vStr
- }
- }
- }
+ // Replace default resolvers with "auto"
+ stringResolvers := common.ReplaceDefaultsWithAuto(resolvers, defaultResolvers)
- // If "." is not set or empty, set it to "auto"
+ // Ensure "." is set to "auto" if not already set
if _, exists := stringResolvers["."]; !exists {
- stringResolvers["."] = "auto"
+ stringResolvers["."] = config.AutoPlaceholder
}
dns["Resolvers"] = stringResolvers
@@ -373,120 +165,57 @@ func migrateDNSResolvers(confMap map[string]any) error {
// migrateDelegatedRouters migrates DelegatedRouters to use "auto"
func migrateDelegatedRouters(confMap map[string]any) error {
- routing, exists := confMap["Routing"]
- if !exists {
- // No routing section, create it with "auto"
- confMap["Routing"] = map[string]any{
- "DelegatedRouters": []string{"auto"},
- }
- return nil
- }
+ // Get or create Routing section
+ routing := common.GetOrCreateSection(confMap, "Routing")
- routingMap, ok := routing.(map[string]any)
- if !ok {
- // Invalid routing format, replace with "auto"
- confMap["Routing"] = map[string]any{
- "DelegatedRouters": []string{"auto"},
- }
- return nil
- }
-
- delegatedRouters, exists := routingMap["DelegatedRouters"]
- if !exists {
- // No delegated routers, add "auto"
- routingMap["DelegatedRouters"] = []string{"auto"}
- return nil
- }
+ // Get existing delegated routers
+ delegatedRouters, exists := routing["DelegatedRouters"]
// Check if it's empty or nil
- if shouldReplaceWithAuto(delegatedRouters) {
- routingMap["DelegatedRouters"] = []string{"auto"}
+ if !exists || common.IsEmptySlice(delegatedRouters) {
+ routing["DelegatedRouters"] = []string{config.AutoPlaceholder}
return nil
}
// Process the list to replace cid.contact with "auto" and preserve others
- if slice, ok := delegatedRouters.([]interface{}); ok {
- var newRouters []string
- hasAuto := false
-
- for _, router := range slice {
- if routerStr, ok := router.(string); ok {
- if routerStr == "https://cid.contact" {
- if !hasAuto {
- newRouters = append(newRouters, "auto")
- hasAuto = true
- }
- } else {
- newRouters = append(newRouters, routerStr)
- }
+ routers := common.ConvertInterfaceSlice(common.SafeCastSlice(delegatedRouters))
+ var newRouters []string
+ hasAuto := false
+
+ for _, router := range routers {
+ if router == "https://cid.contact" {
+ if !hasAuto {
+ newRouters = append(newRouters, config.AutoPlaceholder)
+ hasAuto = true
}
+ } else {
+ newRouters = append(newRouters, router)
}
+ }
- // If empty after processing, add "auto"
- if len(newRouters) == 0 {
- newRouters = []string{"auto"}
- }
-
- routingMap["DelegatedRouters"] = newRouters
+ // If empty after processing, add "auto"
+ if len(newRouters) == 0 {
+ newRouters = []string{config.AutoPlaceholder}
}
+ routing["DelegatedRouters"] = newRouters
return nil
}
// migrateDelegatedPublishers migrates DelegatedPublishers to use "auto"
func migrateDelegatedPublishers(confMap map[string]any) error {
- ipns, exists := confMap["Ipns"]
- if !exists {
- // No IPNS section, create it with "auto"
- confMap["Ipns"] = map[string]any{
- "DelegatedPublishers": []string{"auto"},
- }
- return nil
- }
-
- ipnsMap, ok := ipns.(map[string]any)
- if !ok {
- // Invalid IPNS format, replace with "auto"
- confMap["Ipns"] = map[string]any{
- "DelegatedPublishers": []string{"auto"},
- }
- return nil
- }
+ // Get or create Ipns section
+ ipns := common.GetOrCreateSection(confMap, "Ipns")
- delegatedPublishers, exists := ipnsMap["DelegatedPublishers"]
- if !exists {
- // No delegated publishers, add "auto"
- ipnsMap["DelegatedPublishers"] = []string{"auto"}
- return nil
- }
+ // Get existing delegated publishers
+ delegatedPublishers, exists := ipns["DelegatedPublishers"]
// Check if it's empty or nil - only then replace with "auto"
// Otherwise preserve custom publishers
- if shouldReplaceWithAuto(delegatedPublishers) {
- ipnsMap["DelegatedPublishers"] = []string{"auto"}
+ if !exists || common.IsEmptySlice(delegatedPublishers) {
+ ipns["DelegatedPublishers"] = []string{config.AutoPlaceholder}
}
// If there are custom publishers, leave them as is
return nil
}
-
-// shouldReplaceWithAuto checks if a field should be replaced with "auto"
-func shouldReplaceWithAuto(field any) bool {
- // If it's nil, replace with "auto"
- if field == nil {
- return true
- }
-
- // If it's an empty slice, replace with "auto"
- if slice, ok := field.([]interface{}); ok {
- return len(slice) == 0
- }
-
- // If it's an empty array, replace with "auto"
- if reflect.TypeOf(field).Kind() == reflect.Slice {
- v := reflect.ValueOf(field)
- return v.Len() == 0
- }
-
- return false
-}
diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go
index 2e80809a4fb..ef13eb92a5c 100644
--- a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go
+++ b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go
@@ -7,6 +7,7 @@ import (
"path/filepath"
"testing"
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -15,9 +16,7 @@ import (
func runMigrationOnJSON(t *testing.T, input string) map[string]interface{} {
t.Helper()
var output bytes.Buffer
- // Use t.TempDir() for test isolation and parallel execution support
- tempDir := t.TempDir()
- err := convert(bytes.NewReader([]byte(input)), &output, tempDir)
+ err := convert(bytes.NewReader([]byte(input)), &output)
require.NoError(t, err)
var result map[string]interface{}
@@ -137,13 +136,12 @@ func TestMigration(t *testing.T) {
require.NoError(t, err)
// Run migration
- migration := &Migration{}
- opts := Options{
+ opts := common.Options{
Path: tempDir,
Verbose: true,
}
- err = migration.Apply(opts)
+ err = Migration.Apply(opts)
require.NoError(t, err)
// Verify version was updated
@@ -191,7 +189,7 @@ func TestMigration(t *testing.T) {
assert.Equal(t, "auto", delegatedPublishers[0], "Expected DelegatedPublishers to be ['auto']")
// Test revert
- err = migration.Revert(opts)
+ err = Migration.Revert(opts)
require.NoError(t, err)
// Verify version was reverted
@@ -273,7 +271,7 @@ func TestBootstrapMigration(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
- result := processBootstrapPeers(tt.peers, "")
+ result := processBootstrapPeers(tt.peers)
require.Equal(t, len(tt.expected), len(result), "Expected %d peers, got %d", len(tt.expected), len(result))
for i, expected := range tt.expected {
assert.Equal(t, expected, result[i], "Expected peer %d to be %s", i, expected)
diff --git a/repo/fsrepo/migrations/fs-repo-17-to-18/main.go b/repo/fsrepo/migrations/fs-repo-17-to-18/main.go
new file mode 100644
index 00000000000..777c242d27e
--- /dev/null
+++ b/repo/fsrepo/migrations/fs-repo-17-to-18/main.go
@@ -0,0 +1,60 @@
+// Package main implements fs-repo-17-to-18 migration for IPFS repositories.
+//
+// This migration consolidates the Provider and Reprovider configurations into
+// a unified Provide configuration section.
+//
+// Changes made:
+// - Migrates Provider.Enabled to Provide.Enabled
+// - Migrates Provider.WorkerCount to Provide.DHT.MaxWorkers
+// - Migrates Reprovider.Strategy to Provide.Strategy (converts "flat" to "all")
+// - Migrates Reprovider.Interval to Provide.DHT.Interval
+// - Removes deprecated Provider and Reprovider sections
+//
+// The migration is reversible and creates config.17-to-18.bak for rollback.
+//
+// Usage:
+//
+// fs-repo-17-to-18 -path /path/to/ipfs/repo [-verbose] [-revert]
+//
+// This migration is embedded in Kubo and runs automatically during daemon startup.
+// This standalone binary is provided for manual migration scenarios.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/common"
+ mg17 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-17-to-18/migration"
+)
+
+func main() {
+ var path = flag.String("path", "", "Path to IPFS repository")
+ var verbose = flag.Bool("verbose", false, "Enable verbose output")
+ var revert = flag.Bool("revert", false, "Revert migration")
+ flag.Parse()
+
+ if *path == "" {
+ fmt.Fprintf(os.Stderr, "Error: -path flag is required\n")
+ flag.Usage()
+ os.Exit(1)
+ }
+
+ opts := common.Options{
+ Path: *path,
+ Verbose: *verbose,
+ }
+
+ var err error
+ if *revert {
+ err = mg17.Migration.Revert(opts)
+ } else {
+ err = mg17.Migration.Apply(opts)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Migration failed: %v\n", err)
+ os.Exit(1)
+ }
+}
diff --git a/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go
new file mode 100644
index 00000000000..27fd9a7de20
--- /dev/null
+++ b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go
@@ -0,0 +1,121 @@
+// package mg17 contains the code to perform 17-18 repository migration in Kubo.
+// This handles the following:
+// - Migrate Provider and Reprovider configs to unified Provide config
+// - Clear deprecated Provider and Reprovider fields
+// - Increment repo version to 18
+package mg17
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/common"
+)
+
+// Migration is the main exported migration for 17-to-18
+var Migration = &common.BaseMigration{
+ FromVersion: "17",
+ ToVersion: "18",
+ Description: "Migrating Provider and Reprovider configuration to unified Provide configuration",
+ Convert: convert,
+}
+
+// NewMigration creates a new migration instance (for compatibility)
+func NewMigration() common.Migration {
+ return Migration
+}
+
+// convert performs the actual configuration transformation
+func convert(in io.ReadSeeker, out io.Writer) error {
+ // Read the configuration
+ confMap, err := common.ReadConfig(in)
+ if err != nil {
+ return err
+ }
+
+ // Create new Provide section with DHT subsection from Provider and Reprovider
+ provide := make(map[string]any)
+ dht := make(map[string]any)
+ hasNonDefaultValues := false
+
+ // Migrate Provider fields if they exist
+ provider := common.SafeCastMap(confMap["Provider"])
+ if enabled, exists := provider["Enabled"]; exists {
+ provide["Enabled"] = enabled
+ // Log migration for non-default values
+ if enabledBool, ok := enabled.(bool); ok && !enabledBool {
+ fmt.Printf(" Migrated Provider.Enabled=%v to Provide.Enabled=%v\n", enabledBool, enabledBool)
+ hasNonDefaultValues = true
+ }
+ }
+ if workerCount, exists := provider["WorkerCount"]; exists {
+ dht["MaxWorkers"] = workerCount
+ // Log migration for all worker count values
+ if count, ok := workerCount.(float64); ok {
+ fmt.Printf(" Migrated Provider.WorkerCount=%v to Provide.DHT.MaxWorkers=%v\n", int(count), int(count))
+ hasNonDefaultValues = true
+
+ // Additional guidance for high WorkerCount
+ if count > 5 {
+ fmt.Printf(" ⚠️ For better resource utilization, consider enabling Provide.DHT.SweepEnabled=true\n")
+ fmt.Printf(" and adjusting Provide.DHT.DedicatedBurstWorkers if announcement of new CIDs\n")
+ fmt.Printf(" should take priority over periodic reprovide interval.\n")
+ }
+ }
+ }
+ // Note: Skip Provider.Strategy as it was unused
+
+ // Migrate Reprovider fields if they exist
+ reprovider := common.SafeCastMap(confMap["Reprovider"])
+ if strategy, exists := reprovider["Strategy"]; exists {
+ if strategyStr, ok := strategy.(string); ok {
+ // Convert deprecated "flat" strategy to "all"
+ if strategyStr == "flat" {
+ provide["Strategy"] = "all"
+ fmt.Printf(" Migrated deprecated Reprovider.Strategy=\"flat\" to Provide.Strategy=\"all\"\n")
+ } else {
+ // Migrate any other strategy value as-is
+ provide["Strategy"] = strategyStr
+ fmt.Printf(" Migrated Reprovider.Strategy=\"%s\" to Provide.Strategy=\"%s\"\n", strategyStr, strategyStr)
+ }
+ hasNonDefaultValues = true
+ } else {
+ // Not a string, set to default "all" to ensure valid config
+ provide["Strategy"] = "all"
+ fmt.Printf(" Warning: Reprovider.Strategy was not a string, setting Provide.Strategy=\"all\"\n")
+ hasNonDefaultValues = true
+ }
+ }
+ if interval, exists := reprovider["Interval"]; exists {
+ dht["Interval"] = interval
+ // Log migration for non-default intervals
+ if intervalStr, ok := interval.(string); ok && intervalStr != "22h" && intervalStr != "" {
+ fmt.Printf(" Migrated Reprovider.Interval=\"%s\" to Provide.DHT.Interval=\"%s\"\n", intervalStr, intervalStr)
+ hasNonDefaultValues = true
+ }
+ }
+ // Note: Sweep is a new field introduced in v0.38, not present in v0.37
+ // So we don't need to migrate it from Reprovider
+
+ // Set the DHT section if we have any DHT fields to migrate
+ if len(dht) > 0 {
+ provide["DHT"] = dht
+ }
+
+ // Set the new Provide section if we have any fields to migrate
+ if len(provide) > 0 {
+ confMap["Provide"] = provide
+ }
+
+ // Clear old Provider and Reprovider sections
+ delete(confMap, "Provider")
+ delete(confMap, "Reprovider")
+
+ // Print documentation link if we migrated any non-default values
+ if hasNonDefaultValues {
+ fmt.Printf(" See: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide\n")
+ }
+
+ // Write the updated config
+ return common.WriteConfig(out, confMap)
+}
diff --git a/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration_test.go b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration_test.go
new file mode 100644
index 00000000000..2987a407a4b
--- /dev/null
+++ b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration_test.go
@@ -0,0 +1,176 @@
+package mg17
+
+import (
+ "testing"
+
+ "github.com/ipfs/kubo/repo/fsrepo/migrations/common"
+)
+
+func TestMigration17to18(t *testing.T) {
+ migration := NewMigration()
+
+ testCases := []common.TestCase{
+ {
+ Name: "Migrate Provider and Reprovider to Provide",
+ InputConfig: common.GenerateTestConfig(map[string]any{
+ "Provider": map[string]any{
+ "Enabled": true,
+ "WorkerCount": 8,
+ "Strategy": "unused", // This field was unused and should be ignored
+ },
+ "Reprovider": map[string]any{
+ "Strategy": "pinned",
+ "Interval": "12h",
+ },
+ }),
+ Assertions: []common.ConfigAssertion{
+ {Path: "Provide.Enabled", Expected: true},
+ {Path: "Provide.DHT.MaxWorkers", Expected: float64(8)}, // JSON unmarshals to float64
+ {Path: "Provide.Strategy", Expected: "pinned"},
+ {Path: "Provide.DHT.Interval", Expected: "12h"},
+ {Path: "Provider", Expected: nil}, // Should be deleted
+ {Path: "Reprovider", Expected: nil}, // Should be deleted
+ },
+ },
+ {
+ Name: "Convert flat strategy to all",
+ InputConfig: common.GenerateTestConfig(map[string]any{
+ "Provider": map[string]any{
+ "Enabled": false,
+ },
+ "Reprovider": map[string]any{
+ "Strategy": "flat", // Deprecated, should be converted to "all"
+ "Interval": "24h",
+ },
+ }),
+ Assertions: []common.ConfigAssertion{
+ {Path: "Provide.Enabled", Expected: false},
+ {Path: "Provide.Strategy", Expected: "all"}, // "flat" converted to "all"
+ {Path: "Provide.DHT.Interval", Expected: "24h"},
+ {Path: "Provider", Expected: nil},
+ {Path: "Reprovider", Expected: nil},
+ },
+ },
+ {
+ Name: "Handle missing Provider section",
+ InputConfig: common.GenerateTestConfig(map[string]any{
+ "Reprovider": map[string]any{
+ "Strategy": "roots",
+ "Interval": "6h",
+ },
+ }),
+ Assertions: []common.ConfigAssertion{
+ {Path: "Provide.Strategy", Expected: "roots"},
+ {Path: "Provide.DHT.Interval", Expected: "6h"},
+ {Path: "Provider", Expected: nil},
+ {Path: "Reprovider", Expected: nil},
+ },
+ },
+ {
+ Name: "Handle missing Reprovider section",
+ InputConfig: common.GenerateTestConfig(map[string]any{
+ "Provider": map[string]any{
+ "Enabled": true,
+ "WorkerCount": 16,
+ },
+ }),
+ Assertions: []common.ConfigAssertion{
+ {Path: "Provide.Enabled", Expected: true},
+ {Path: "Provide.DHT.MaxWorkers", Expected: float64(16)},
+ {Path: "Provider", Expected: nil},
+ {Path: "Reprovider", Expected: nil},
+ },
+ },
+ {
+ Name: "Handle empty Provider and Reprovider sections",
+ InputConfig: common.GenerateTestConfig(map[string]any{
+ "Provider": map[string]any{},
+ "Reprovider": map[string]any{},
+ }),
+ Assertions: []common.ConfigAssertion{
+ {Path: "Provide", Expected: nil}, // No fields to migrate
+ {Path: "Provider", Expected: nil},
+ {Path: "Reprovider", Expected: nil},
+ },
+ },
+ {
+ Name: "Handle missing both sections",
+ InputConfig: common.GenerateTestConfig(map[string]any{
+ "Datastore": map[string]any{
+ "StorageMax": "10GB",
+ },
+ }),
+ Assertions: []common.ConfigAssertion{
+ {Path: "Provide", Expected: nil}, // No Provider/Reprovider to migrate
+ {Path: "Provider", Expected: nil},
+ {Path: "Reprovider", Expected: nil},
+ {Path: "Datastore.StorageMax", Expected: "10GB"}, // Other config preserved
+ },
+ },
+ {
+ Name: "Preserve other config sections",
+ InputConfig: common.GenerateTestConfig(map[string]any{
+ "Provider": map[string]any{
+ "Enabled": true,
+ },
+ "Reprovider": map[string]any{
+ "Strategy": "all",
+ },
+ "Swarm": map[string]any{
+ "ConnMgr": map[string]any{
+ "Type": "basic",
+ },
+ },
+ }),
+ Assertions: []common.ConfigAssertion{
+ {Path: "Provide.Enabled", Expected: true},
+ {Path: "Provide.Strategy", Expected: "all"},
+ {Path: "Swarm.ConnMgr.Type", Expected: "basic"}, // Other config preserved
+ {Path: "Provider", Expected: nil},
+ {Path: "Reprovider", Expected: nil},
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ common.RunMigrationTest(t, migration, tc)
+ })
+ }
+}
+
+func TestMigration17to18Reversible(t *testing.T) {
+ migration := NewMigration()
+
+ // Test that migration is reversible
+ inputConfig := common.GenerateTestConfig(map[string]any{
+ "Provide": map[string]any{
+ "Enabled": true,
+ "WorkerCount": 8,
+ "Strategy": "pinned",
+ "Interval": "12h",
+ },
+ })
+
+ // Test full migration and revert
+ migratedConfig := common.AssertMigrationSuccess(t, migration, 17, 18, inputConfig)
+
+ // Check that Provide section exists after migration
+ common.AssertConfigField(t, migratedConfig, "Provide.Enabled", true)
+
+ // Test revert
+ common.AssertMigrationReversible(t, migration, 17, 18, migratedConfig)
+}
+
+func TestMigration17to18Integration(t *testing.T) {
+ migration := NewMigration()
+
+ // Test that the migration properly integrates with the common framework
+ if migration.Versions() != "17-to-18" {
+ t.Errorf("expected versions '17-to-18', got '%s'", migration.Versions())
+ }
+
+ if !migration.Reversible() {
+ t.Error("migration should be reversible")
+ }
+}
diff --git a/repo/fsrepo/migrations/ipfsdir.go b/repo/fsrepo/migrations/ipfsdir.go
index 8cb087d5345..88b39459b79 100644
--- a/repo/fsrepo/migrations/ipfsdir.go
+++ b/repo/fsrepo/migrations/ipfsdir.go
@@ -8,12 +8,11 @@ import (
"strconv"
"strings"
+ "github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/misc/fsutil"
)
const (
- envIpfsPath = "IPFS_PATH"
- defIpfsDir = ".ipfs"
versionFile = "version"
)
@@ -24,25 +23,16 @@ const (
func IpfsDir(dir string) (string, error) {
var err error
if dir == "" {
- dir = os.Getenv(envIpfsPath)
- }
- if dir != "" {
- dir, err = fsutil.ExpandHome(dir)
+ dir, err = config.PathRoot()
if err != nil {
return "", err
}
- return dir, nil
}
-
- home, err := os.UserHomeDir()
+ dir, err = fsutil.ExpandHome(dir)
if err != nil {
return "", err
}
- if home == "" {
- return "", errors.New("could not determine IPFS_PATH, home dir not set")
- }
-
- return filepath.Join(home, defIpfsDir), nil
+ return dir, nil
}
// CheckIpfsDir gets the ipfs directory and checks that the directory exists.
diff --git a/repo/fsrepo/migrations/ipfsdir_test.go b/repo/fsrepo/migrations/ipfsdir_test.go
index e4e6267943b..c94ebc58671 100644
--- a/repo/fsrepo/migrations/ipfsdir_test.go
+++ b/repo/fsrepo/migrations/ipfsdir_test.go
@@ -4,24 +4,28 @@ import (
"os"
"path/filepath"
"testing"
-)
-var (
- fakeHome string
- fakeIpfs string
+ "github.com/ipfs/kubo/config"
)
func TestRepoDir(t *testing.T) {
- fakeHome = t.TempDir()
- os.Setenv("HOME", fakeHome)
- fakeIpfs = filepath.Join(fakeHome, ".ipfs")
-
- t.Run("testIpfsDir", testIpfsDir)
- t.Run("testCheckIpfsDir", testCheckIpfsDir)
- t.Run("testRepoVersion", testRepoVersion)
+ fakeHome := t.TempDir()
+ t.Setenv("HOME", fakeHome)
+ fakeIpfs := filepath.Join(fakeHome, ".ipfs")
+ t.Setenv(config.EnvDir, fakeIpfs)
+
+ t.Run("testIpfsDir", func(t *testing.T) {
+ testIpfsDir(t, fakeIpfs)
+ })
+ t.Run("testCheckIpfsDir", func(t *testing.T) {
+ testCheckIpfsDir(t, fakeIpfs)
+ })
+ t.Run("testRepoVersion", func(t *testing.T) {
+ testRepoVersion(t, fakeIpfs)
+ })
}
-func testIpfsDir(t *testing.T) {
+func testIpfsDir(t *testing.T, fakeIpfs string) {
_, err := CheckIpfsDir("")
if err == nil {
t.Fatal("expected error when no .ipfs directory to find")
@@ -37,16 +41,16 @@ func testIpfsDir(t *testing.T) {
t.Fatal(err)
}
if dir != fakeIpfs {
- t.Fatal("wrong ipfs directory:", dir)
+ t.Fatalf("wrong ipfs directory: got %s, expected %s", dir, fakeIpfs)
}
- os.Setenv(envIpfsPath, "~/.ipfs")
+ t.Setenv(config.EnvDir, "~/.ipfs")
dir, err = IpfsDir("")
if err != nil {
t.Fatal(err)
}
if dir != fakeIpfs {
- t.Fatal("wrong ipfs directory:", dir)
+ t.Fatalf("wrong ipfs directory: got %s, expected %s", dir, fakeIpfs)
}
_, err = IpfsDir("~somesuer/foo")
@@ -54,15 +58,12 @@ func testIpfsDir(t *testing.T) {
t.Fatal("expected error with user-specific home dir")
}
- err = os.Setenv(envIpfsPath, "~somesuer/foo")
- if err != nil {
- panic(err)
- }
+ t.Setenv(config.EnvDir, "~somesuer/foo")
_, err = IpfsDir("~somesuer/foo")
if err == nil {
t.Fatal("expected error with user-specific home dir")
}
- err = os.Unsetenv(envIpfsPath)
+ err = os.Unsetenv(config.EnvDir)
if err != nil {
panic(err)
}
@@ -72,7 +73,7 @@ func testIpfsDir(t *testing.T) {
t.Fatal(err)
}
if dir != fakeIpfs {
- t.Fatal("wrong ipfs directory:", dir)
+ t.Fatalf("wrong ipfs directory: got %s, expected %s", dir, fakeIpfs)
}
_, err = IpfsDir("")
@@ -81,7 +82,7 @@ func testIpfsDir(t *testing.T) {
}
}
-func testCheckIpfsDir(t *testing.T) {
+func testCheckIpfsDir(t *testing.T, fakeIpfs string) {
_, err := CheckIpfsDir("~somesuer/foo")
if err == nil {
t.Fatal("expected error with user-specific home dir")
@@ -101,7 +102,7 @@ func testCheckIpfsDir(t *testing.T) {
}
}
-func testRepoVersion(t *testing.T) {
+func testRepoVersion(t *testing.T, fakeIpfs string) {
badDir := "~somesuer/foo"
_, err := RepoVersion(badDir)
if err == nil {
diff --git a/repo/fsrepo/migrations/migrations_test.go b/repo/fsrepo/migrations/migrations_test.go
index f690290f861..c84e2d22812 100644
--- a/repo/fsrepo/migrations/migrations_test.go
+++ b/repo/fsrepo/migrations/migrations_test.go
@@ -33,9 +33,7 @@ func TestFindMigrations(t *testing.T) {
createFakeBin(i-1, i, tmpDir)
}
- origPath := os.Getenv("PATH")
- os.Setenv("PATH", tmpDir)
- defer os.Setenv("PATH", origPath)
+ t.Setenv("PATH", tmpDir)
migs, bins, err = findMigrations(ctx, 0, 5)
if err != nil {
@@ -80,9 +78,7 @@ func TestFindMigrationsReverse(t *testing.T) {
createFakeBin(i-1, i, tmpDir)
}
- origPath := os.Getenv("PATH")
- os.Setenv("PATH", tmpDir)
- defer os.Setenv("PATH", origPath)
+ t.Setenv("PATH", tmpDir)
migs, bins, err = findMigrations(ctx, 5, 0)
if err != nil {
@@ -144,10 +140,8 @@ func TestFetchMigrations(t *testing.T) {
}
func TestRunMigrations(t *testing.T) {
- fakeHome := t.TempDir()
-
- os.Setenv("HOME", fakeHome)
- fakeIpfs := filepath.Join(fakeHome, ".ipfs")
+ fakeIpfs := filepath.Join(t.TempDir(), ".ipfs")
+ t.Setenv(config.EnvDir, fakeIpfs)
err := os.Mkdir(fakeIpfs, os.ModePerm)
if err != nil {
diff --git a/test/cli/agent_version_unicode_test.go b/test/cli/agent_version_unicode_test.go
new file mode 100644
index 00000000000..732f13466e4
--- /dev/null
+++ b/test/cli/agent_version_unicode_test.go
@@ -0,0 +1,220 @@
+package cli
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/ipfs/kubo/core/commands/cmdutils"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCleanAndTrimUnicode(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "Basic ASCII",
+ input: "kubo/1.0.0",
+ expected: "kubo/1.0.0",
+ },
+ {
+ name: "Polish characters preserved",
+ input: "test-ąęćłńóśźż",
+ expected: "test-ąęćłńóśźż",
+ },
+ {
+ name: "Chinese characters preserved",
+ input: "版本-中文测试",
+ expected: "版本-中文测试",
+ },
+ {
+ name: "Arabic text preserved",
+ input: "اختبار-العربية",
+ expected: "اختبار-العربية",
+ },
+ {
+ name: "Emojis preserved",
+ input: "version-1.0-🚀-🎉",
+ expected: "version-1.0-🚀-🎉",
+ },
+ {
+ name: "Complex Unicode with combining marks preserved",
+ input: "h̸̢̢̢̢̢̢̢̢̢̢e̵̵̵̵̵̵̵̵̵̵l̷̷̷̷̷̷̷̷̷̷l̶̶̶̶̶̶̶̶̶̶o̴̴̴̴̴̴̴̴̴̴",
+ expected: "h̸̢̢̢̢̢̢̢̢̢̢e̵̵̵̵̵̵̵̵̵̵l̷̷̷̷̷̷̷̷̷̷l̶̶̶̶̶̶̶̶̶̶o̴̴̴̴̴̴̴̴̴̴", // Preserved as-is (only 50 runes)
+ },
+ {
+ name: "Long text with combining marks truncated at 128",
+ input: strings.Repeat("ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́", 10), // Very long text (260 runes)
+ expected: "ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂", // Truncated at 128 runes
+ },
+ {
+ name: "Zero-width characters replaced with U+FFFD",
+ input: "test\u200Bzero\u200Cwidth\u200D\uFEFFchars",
+ expected: "test�zero�width��chars",
+ },
+ {
+ name: "RTL/LTR override replaced with U+FFFD",
+ input: "test\u202Drtl\u202Eltr\u202Aoverride",
+ expected: "test�rtl�ltr�override",
+ },
+ {
+ name: "Bidi isolates replaced with U+FFFD",
+ input: "test\u2066bidi\u2067isolate\u2068text\u2069end",
+ expected: "test�bidi�isolate�text�end",
+ },
+ {
+ name: "Control characters replaced with U+FFFD",
+ input: "test\x00null\x1Fescape\x7Fdelete",
+ expected: "test�null�escape�delete",
+ },
+ {
+ name: "Combining marks preserved",
+ input: "e\u0301\u0302\u0303\u0304\u0305", // e with 5 combining marks
+ expected: "e\u0301\u0302\u0303\u0304\u0305", // All preserved
+ },
+ {
+ name: "No truncation at 70 characters",
+ input: "123456789012345678901234567890123456789012345678901234567890123456789",
+ expected: "123456789012345678901234567890123456789012345678901234567890123456789",
+ },
+ {
+ name: "No truncation with Unicode - 70 rockets preserved",
+ input: strings.Repeat("🚀", 70),
+ expected: strings.Repeat("🚀", 70),
+ },
+ {
+ name: "Empty string",
+ input: "",
+ expected: "",
+ },
+ {
+ name: "Only whitespace with control chars",
+ input: " \t\n ",
+ expected: "\uFFFD\uFFFD", // Tab and newline become U+FFFD, spaces trimmed
+ },
+ {
+ name: "Leading and trailing whitespace",
+ input: " test ",
+ expected: "test",
+ },
+ {
+ name: "Complex mix - invisible chars replaced with U+FFFD, Unicode preserved",
+ input: "kubo/1.0-🚀\u200B h̸̢̏̔ḛ̶̽̀s̵t\u202E-ąęł-中文",
+ expected: "kubo/1.0-🚀� h̸̢̏̔ḛ̶̽̀s̵t�-ąęł-中文",
+ },
+ {
+ name: "Emoji with skin tone preserved",
+ input: "👍🏽", // Thumbs up with skin tone modifier
+ expected: "👍🏽", // Preserved as-is
+ },
+ {
+ name: "Mixed scripts preserved",
+ input: "Hello-你好-مرحبا-Здравствуйте",
+ expected: "Hello-你好-مرحبا-Здравствуйте",
+ },
+ {
+ name: "Format characters replaced with U+FFFD",
+ input: "test\u00ADsoft\u2060word\u206Fnom\u200Ebreak",
+ expected: "test�soft�word�nom�break", // Soft hyphen, word joiner, etc replaced
+ },
+ {
+ name: "Complex Unicode text with many combining marks (91 runes, no truncation)",
+ input: "ț̸̢͙̞̖̏̔ȩ̶̰͓̪͎̱̠̥̳͔̽̀̃̿̌̾̀͗̕̕͜s̵̢̛̖̬͈͉͖͇͈̭̥̃́̓̌̾͊̊̂̄̍̅̂͌́ͅţ̴̯̹̪͖͓̘̊́̑̄̋̈́͐̈́̔̇̄̂́̎̓͛͠ͅ test",
+ expected: "ț̸̢͙̞̖̏̔ȩ̶̰͓̪͎̱̠̥̳͔̽̀̃̿̌̾̀͗̕̕͜s̵̢̛̖̬͈͉͖͇͈̭̥̃́̓̌̾͊̊̂̄̍̅̂͌́ͅţ̴̯̹̪͖͓̘̊́̑̄̋̈́͐̈́̔̇̄̂́̎̓͛͠ͅ test", // Not truncated (91 < 128)
+ },
+ {
+ name: "Truncation at 128 characters",
+ input: strings.Repeat("a", 150),
+ expected: strings.Repeat("a", 128),
+ },
+ {
+ name: "Truncation with Unicode at 128",
+ input: strings.Repeat("🚀", 150),
+ expected: strings.Repeat("🚀", 128),
+ },
+ {
+ name: "Private use characters preserved (per spec)",
+ input: "test\uE000\uF8FF", // Private use area characters
+ expected: "test\uE000\uF8FF", // Should be preserved
+ },
+ {
+ name: "U+FFFD replacement for multiple categories",
+ input: "a\x00b\u200Cc\u202Ed", // control, format chars
+ expected: "a\uFFFDb\uFFFDc\uFFFDd", // All replaced with U+FFFD
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := cmdutils.CleanAndTrim(tt.input)
+ assert.Equal(t, tt.expected, result, "CleanAndTrim(%q) = %q, want %q", tt.input, result, tt.expected)
+ })
+ }
+}
+
+func TestCleanAndTrimIdempotent(t *testing.T) {
+ // Test that applying CleanAndTrim twice gives the same result
+ inputs := []string{
+ "test-ąęćłńóśźż",
+ "版本-中文测试",
+ "version-1.0-🚀-🎉",
+ "h̸e̵l̷l̶o̴ w̸o̵r̷l̶d̴",
+ "test\u200Bzero\u200Cwidth",
+ }
+
+ for _, input := range inputs {
+ once := cmdutils.CleanAndTrim(input)
+ twice := cmdutils.CleanAndTrim(once)
+ assert.Equal(t, once, twice, "CleanAndTrim should be idempotent for %q", input)
+ }
+}
+
+func TestCleanAndTrimSecurity(t *testing.T) {
+ // Test that all invisible/dangerous characters are removed
+ tests := []struct {
+ name string
+ input string
+ check func(string) bool
+ }{
+ {
+ name: "No zero-width spaces",
+ input: "test\u200B\u200C\u200Dtest",
+ check: func(s string) bool {
+ return !strings.Contains(s, "\u200B") && !strings.Contains(s, "\u200C") && !strings.Contains(s, "\u200D")
+ },
+ },
+ {
+ name: "No bidi overrides",
+ input: "test\u202A\u202B\u202C\u202D\u202Etest",
+ check: func(s string) bool {
+ for _, r := range []rune{0x202A, 0x202B, 0x202C, 0x202D, 0x202E} {
+ if strings.ContainsRune(s, r) {
+ return false
+ }
+ }
+ return true
+ },
+ },
+ {
+ name: "No control characters",
+ input: "test\x00\x01\x02\x1F\x7Ftest",
+ check: func(s string) bool {
+ for _, r := range s {
+ if r < 0x20 || r == 0x7F {
+ return false
+ }
+ }
+ return true
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := cmdutils.CleanAndTrim(tt.input)
+ assert.True(t, tt.check(result), "Security check failed for %q -> %q", tt.input, result)
+ })
+ }
+}
diff --git a/test/cli/autoconf/expand_test.go b/test/cli/autoconf/expand_test.go
index 45a46560f5b..253c8000b15 100644
--- a/test/cli/autoconf/expand_test.go
+++ b/test/cli/autoconf/expand_test.go
@@ -337,8 +337,8 @@ func testExpandAutoFiltersUnsupportedPathsDelegated(t *testing.T) {
node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"})
node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"})
// Disable content providing when using delegated routing
- node.SetIPFSConfig("Provider.Enabled", false)
- node.SetIPFSConfig("Reprovider.Interval", "0")
+ node.SetIPFSConfig("Provide.Enabled", false)
+ node.SetIPFSConfig("Provide.DHT.Interval", "0")
// Load test autoconf data with unsupported paths
autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json")
@@ -421,8 +421,8 @@ func testExpandAutoWithoutCacheDelegated(t *testing.T) {
node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"})
node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"})
// Disable content providing when using delegated routing
- node.SetIPFSConfig("Provider.Enabled", false)
- node.SetIPFSConfig("Reprovider.Interval", "0")
+ node.SetIPFSConfig("Provide.Enabled", false)
+ node.SetIPFSConfig("Provide.DHT.Interval", "0")
// Load test autoconf data with unsupported paths (this won't be used since no daemon)
autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json")
diff --git a/test/cli/autoconf/ipns_test.go b/test/cli/autoconf/ipns_test.go
index ce5b20bd499..043841e49c4 100644
--- a/test/cli/autoconf/ipns_test.go
+++ b/test/cli/autoconf/ipns_test.go
@@ -200,8 +200,8 @@ func setupNodeWithAutoconf(t *testing.T, publisherURL string, routingType string
// Additional config for delegated routing mode
if routingType == "delegated" {
- node.SetIPFSConfig("Provider.Enabled", false)
- node.SetIPFSConfig("Reprovider.Interval", "0s")
+ node.SetIPFSConfig("Provide.Enabled", false)
+ node.SetIPFSConfig("Provide.DHT.Interval", "0s")
}
// Add bootstrap peers for connectivity
diff --git a/test/cli/files_test.go b/test/cli/files_test.go
index 27526189785..ece87850e19 100644
--- a/test/cli/files_test.go
+++ b/test/cli/files_test.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
"testing"
+ "github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -118,3 +119,223 @@ func TestFilesCp(t *testing.T) {
assert.Equal(t, data, catRes.Stdout.Trimmed())
})
}
+
+func TestFilesRm(t *testing.T) {
+ t.Parallel()
+
+ t.Run("files rm with --flush=false returns error", func(t *testing.T) {
+ // Test that files rm rejects --flush=false so user does not assume disabling flush works
+ // (rm ignored it before, better to explicitly error)
+ // See https://github.com/ipfs/kubo/issues/10842
+ t.Parallel()
+
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+
+ // Create a file to remove
+ node.IPFS("files", "mkdir", "/test-dir")
+
+ // Try to remove with --flush=false, should error
+ res := node.RunIPFS("files", "rm", "-r", "--flush=false", "/test-dir")
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), "files rm always flushes for safety")
+ assert.Contains(t, res.Stderr.String(), "cannot be set to false")
+
+ // Verify the directory still exists (wasn't removed due to error)
+ lsRes := node.IPFS("files", "ls", "/")
+ assert.Contains(t, lsRes.Stdout.String(), "test-dir")
+ })
+
+ t.Run("files rm with --flush=true works", func(t *testing.T) {
+ t.Parallel()
+
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+
+ // Create a file to remove
+ node.IPFS("files", "mkdir", "/test-dir")
+
+ // Remove with explicit --flush=true, should work
+ res := node.IPFS("files", "rm", "-r", "--flush=true", "/test-dir")
+ assert.NoError(t, res.Err)
+
+ // Verify the directory was removed
+ lsRes := node.IPFS("files", "ls", "/")
+ assert.NotContains(t, lsRes.Stdout.String(), "test-dir")
+ })
+
+ t.Run("files rm without flush flag works (default behavior)", func(t *testing.T) {
+ t.Parallel()
+
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+
+ // Create a file to remove
+ node.IPFS("files", "mkdir", "/test-dir")
+
+ // Remove without flush flag (should use default which is true)
+ res := node.IPFS("files", "rm", "-r", "/test-dir")
+ assert.NoError(t, res.Err)
+
+ // Verify the directory was removed
+ lsRes := node.IPFS("files", "ls", "/")
+ assert.NotContains(t, lsRes.Stdout.String(), "test-dir")
+ })
+}
+
+func TestFilesNoFlushLimit(t *testing.T) {
+ t.Parallel()
+
+ t.Run("reaches default limit of 256 operations", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+
+ // Perform 256 operations with --flush=false (should succeed)
+ for i := 0; i < 256; i++ {
+ res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i))
+ assert.NoError(t, res.Err, "operation %d should succeed", i+1)
+ }
+
+ // 257th operation should fail
+ res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir256")
+ require.NotNil(t, res.ExitErr, "command should have failed")
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), "reached limit of 256 unflushed MFS operations")
+ assert.Contains(t, res.Stderr.String(), "run 'ipfs files flush'")
+ assert.Contains(t, res.Stderr.String(), "use --flush=true")
+ assert.Contains(t, res.Stderr.String(), "increase Internal.MFSNoFlushLimit")
+ })
+
+ t.Run("custom limit via config", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ // Set custom limit to 5
+ node.UpdateConfig(func(cfg *config.Config) {
+ limit := config.NewOptionalInteger(5)
+ cfg.Internal.MFSNoFlushLimit = limit
+ })
+
+ node.StartDaemon()
+
+ // Perform 5 operations (should succeed)
+ for i := 0; i < 5; i++ {
+ res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i))
+ assert.NoError(t, res.Err, "operation %d should succeed", i+1)
+ }
+
+ // 6th operation should fail
+ res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir5")
+ require.NotNil(t, res.ExitErr, "command should have failed")
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), "reached limit of 5 unflushed MFS operations")
+ })
+
+ t.Run("flush=true resets counter", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ // Set limit to 3 for faster testing
+ node.UpdateConfig(func(cfg *config.Config) {
+ limit := config.NewOptionalInteger(3)
+ cfg.Internal.MFSNoFlushLimit = limit
+ })
+
+ node.StartDaemon()
+
+ // Do 2 operations with --flush=false
+ node.IPFS("files", "mkdir", "--flush=false", "/dir1")
+ node.IPFS("files", "mkdir", "--flush=false", "/dir2")
+
+ // Operation with --flush=true should reset counter
+ node.IPFS("files", "mkdir", "--flush=true", "/dir3")
+
+ // Now we should be able to do 3 more operations with --flush=false
+ for i := 4; i <= 6; i++ {
+ res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i))
+ assert.NoError(t, res.Err, "operation after flush should succeed")
+ }
+
+ // 4th operation after reset should fail
+ res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir7")
+ require.NotNil(t, res.ExitErr, "command should have failed")
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), "reached limit of 3 unflushed MFS operations")
+ })
+
+ t.Run("explicit flush command resets counter", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ // Set limit to 3 for faster testing
+ node.UpdateConfig(func(cfg *config.Config) {
+ limit := config.NewOptionalInteger(3)
+ cfg.Internal.MFSNoFlushLimit = limit
+ })
+
+ node.StartDaemon()
+
+ // Do 2 operations with --flush=false
+ node.IPFS("files", "mkdir", "--flush=false", "/dir1")
+ node.IPFS("files", "mkdir", "--flush=false", "/dir2")
+
+ // Explicit flush should reset counter
+ node.IPFS("files", "flush")
+
+ // Now we should be able to do 3 more operations
+ for i := 3; i <= 5; i++ {
+ res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i))
+ assert.NoError(t, res.Err, "operation after flush should succeed")
+ }
+
+ // 4th operation should fail
+ res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir6")
+ require.NotNil(t, res.ExitErr, "command should have failed")
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), "reached limit of 3 unflushed MFS operations")
+ })
+
+ t.Run("limit=0 disables the feature", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ // Set limit to 0 (disabled)
+ node.UpdateConfig(func(cfg *config.Config) {
+ limit := config.NewOptionalInteger(0)
+ cfg.Internal.MFSNoFlushLimit = limit
+ })
+
+ node.StartDaemon()
+
+ // Should be able to do many operations without error
+ for i := 0; i < 300; i++ {
+ res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i))
+ assert.NoError(t, res.Err, "operation %d should succeed with limit disabled", i+1)
+ }
+ })
+
+ t.Run("different MFS commands count towards limit", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ // Set limit to 5 for testing
+ node.UpdateConfig(func(cfg *config.Config) {
+ limit := config.NewOptionalInteger(5)
+ cfg.Internal.MFSNoFlushLimit = limit
+ })
+
+ node.StartDaemon()
+
+ // Mix of different MFS operations (5 operations to hit the limit)
+ node.IPFS("files", "mkdir", "--flush=false", "/testdir")
+ // Create a file first, then copy it
+ testCid := node.IPFSAddStr("test content")
+ node.IPFS("files", "cp", "--flush=false", fmt.Sprintf("/ipfs/%s", testCid), "/testfile")
+ node.IPFS("files", "cp", "--flush=false", "/testfile", "/testfile2")
+ node.IPFS("files", "mv", "--flush=false", "/testfile2", "/testfile3")
+ node.IPFS("files", "mkdir", "--flush=false", "/anotherdir")
+
+ // 6th operation should fail
+ res := node.RunIPFS("files", "mkdir", "--flush=false", "/another")
+ require.NotNil(t, res.ExitErr, "command should have failed")
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), "reached limit of 5 unflushed MFS operations")
+ })
+}
diff --git a/test/cli/identity_cid_test.go b/test/cli/identity_cid_test.go
new file mode 100644
index 00000000000..61a464ac5f7
--- /dev/null
+++ b/test/cli/identity_cid_test.go
@@ -0,0 +1,310 @@
+package cli
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/ipfs/boxo/verifcid"
+ "github.com/ipfs/kubo/config"
+ "github.com/ipfs/kubo/test/cli/harness"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIdentityCIDOverflowProtection(t *testing.T) {
+ t.Parallel()
+
+ t.Run("ipfs add --hash=identity with small data succeeds", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ // small data that fits in identity CID
+ smallData := "small data"
+ tempFile := filepath.Join(node.Dir, "small.txt")
+ err := os.WriteFile(tempFile, []byte(smallData), 0644)
+ require.NoError(t, err)
+
+ res := node.IPFS("add", "--hash=identity", tempFile)
+ assert.NoError(t, res.Err)
+ cid := strings.Fields(res.Stdout.String())[1]
+
+ // verify it's actually using identity hash
+ res = node.IPFS("cid", "format", "-f", "%h", cid)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "identity", res.Stdout.Trimmed())
+ })
+
+ t.Run("ipfs add --hash=identity with large data fails", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ // data larger than verifcid.DefaultMaxIdentityDigestSize
+ largeData := strings.Repeat("x", verifcid.DefaultMaxIdentityDigestSize+50)
+ tempFile := filepath.Join(node.Dir, "large.txt")
+ err := os.WriteFile(tempFile, []byte(largeData), 0644)
+ require.NoError(t, err)
+
+ res := node.RunIPFS("add", "--hash=identity", tempFile)
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ // should error with digest too large message
+ assert.Contains(t, res.Stderr.String(), "digest too large")
+ })
+
+ t.Run("ipfs add --inline with valid --inline-limit succeeds", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ smallData := "small inline data"
+ tempFile := filepath.Join(node.Dir, "inline.txt")
+ err := os.WriteFile(tempFile, []byte(smallData), 0644)
+ require.NoError(t, err)
+
+ // use limit just under the maximum
+ limit := verifcid.DefaultMaxIdentityDigestSize - 10
+ res := node.IPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", limit), tempFile)
+ assert.NoError(t, res.Err)
+ cid := strings.Fields(res.Stdout.String())[1]
+
+ // verify the CID is using identity hash (inline)
+ res = node.IPFS("cid", "format", "-f", "%h", cid)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "identity", res.Stdout.Trimmed())
+
+ // verify the codec (may be dag-pb or raw depending on kubo version)
+ res = node.IPFS("cid", "format", "-f", "%c", cid)
+ assert.NoError(t, res.Err)
+ // Accept either raw or dag-pb as both are valid for inline data
+ codec := res.Stdout.Trimmed()
+ assert.True(t, codec == "raw" || codec == "dag-pb", "expected raw or dag-pb codec, got %s", codec)
+ })
+
+ t.Run("ipfs add --inline with excessive --inline-limit fails", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ smallData := "data"
+ tempFile := filepath.Join(node.Dir, "inline2.txt")
+ err := os.WriteFile(tempFile, []byte(smallData), 0644)
+ require.NoError(t, err)
+
+ excessiveLimit := verifcid.DefaultMaxIdentityDigestSize + 50
+ res := node.RunIPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", excessiveLimit), tempFile)
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), fmt.Sprintf("inline-limit %d exceeds maximum allowed size of %d bytes", excessiveLimit, verifcid.DefaultMaxIdentityDigestSize))
+ })
+
+ t.Run("ipfs files write --hash=identity appending to identity CID switches to configured hash", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ // create initial small file with identity CID
+ initialData := "initial"
+ tempFile := filepath.Join(node.Dir, "initial.txt")
+ err := os.WriteFile(tempFile, []byte(initialData), 0644)
+ require.NoError(t, err)
+
+ res := node.IPFS("add", "--hash=identity", tempFile)
+ assert.NoError(t, res.Err)
+ cid1 := strings.Fields(res.Stdout.String())[1]
+
+ // verify initial CID uses identity
+ res = node.IPFS("cid", "format", "-f", "%h", cid1)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "identity", res.Stdout.Trimmed())
+
+ // copy to MFS
+ res = node.IPFS("files", "cp", fmt.Sprintf("/ipfs/%s", cid1), "/identity-file")
+ assert.NoError(t, res.Err)
+
+ // append data that would exceed identity CID limit
+ appendData := strings.Repeat("a", verifcid.DefaultMaxIdentityDigestSize)
+ appendFile := filepath.Join(node.Dir, "append.txt")
+ err = os.WriteFile(appendFile, []byte(appendData), 0644)
+ require.NoError(t, err)
+
+ // append to the end of the file
+ // get the current data size
+ res = node.IPFS("files", "stat", "--format", "", "/identity-file")
+ assert.NoError(t, res.Err)
+ size := res.Stdout.Trimmed()
+ // this should succeed because DagModifier in boxo handles the overflow
+ res = node.IPFS("files", "write", "--hash=identity", "--offset="+size, "/identity-file", appendFile)
+ assert.NoError(t, res.Err)
+
+ // check that the file now uses non-identity hash
+ res = node.IPFS("files", "stat", "--hash", "/identity-file")
+ assert.NoError(t, res.Err)
+ newCid := res.Stdout.Trimmed()
+
+ // verify new CID does NOT use identity
+ res = node.IPFS("cid", "format", "-f", "%h", newCid)
+ assert.NoError(t, res.Err)
+ assert.NotEqual(t, "identity", res.Stdout.Trimmed())
+
+ // verify it switched to a cryptographic hash
+ assert.Equal(t, config.DefaultHashFunction, res.Stdout.Trimmed())
+ })
+
+ t.Run("ipfs files write --hash=identity with small write creates identity CID", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ // create a small file with identity hash directly in MFS
+ smallData := "small"
+ tempFile := filepath.Join(node.Dir, "small.txt")
+ err := os.WriteFile(tempFile, []byte(smallData), 0644)
+ require.NoError(t, err)
+
+ // write to MFS with identity hash
+ res := node.IPFS("files", "write", "--create", "--hash=identity", "/mfs-identity", tempFile)
+ assert.NoError(t, res.Err)
+
+ // verify using identity CID
+ res = node.IPFS("files", "stat", "--hash", "/mfs-identity")
+ assert.NoError(t, res.Err)
+ cid := res.Stdout.Trimmed()
+
+ // verify CID uses identity hash
+ res = node.IPFS("cid", "format", "-f", "%h", cid)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "identity", res.Stdout.Trimmed())
+
+ // verify content
+ res = node.IPFS("files", "read", "/mfs-identity")
+ assert.NoError(t, res.Err)
+ assert.Equal(t, smallData, res.Stdout.Trimmed())
+ })
+
+ t.Run("raw node with identity CID converts to UnixFS when appending", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ // create raw block with identity CID
+ rawData := "raw"
+ tempFile := filepath.Join(node.Dir, "raw.txt")
+ err := os.WriteFile(tempFile, []byte(rawData), 0644)
+ require.NoError(t, err)
+
+ res := node.IPFS("block", "put", "--format=raw", "--mhtype=identity", tempFile)
+ assert.NoError(t, res.Err)
+ rawCid := res.Stdout.Trimmed()
+
+ // verify initial CID uses identity hash and raw codec
+ res = node.IPFS("cid", "format", "-f", "%h", rawCid)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "identity", res.Stdout.Trimmed())
+
+ res = node.IPFS("cid", "format", "-f", "%c", rawCid)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "raw", res.Stdout.Trimmed())
+
+ // copy to MFS
+ res = node.IPFS("files", "cp", fmt.Sprintf("/ipfs/%s", rawCid), "/raw-identity")
+ assert.NoError(t, res.Err)
+
+ // append data
+ appendData := "appended"
+ appendFile := filepath.Join(node.Dir, "append-raw.txt")
+ err = os.WriteFile(appendFile, []byte(appendData), 0644)
+ require.NoError(t, err)
+
+ // get current data size for appending
+ res = node.IPFS("files", "stat", "--format", "", "/raw-identity")
+ assert.NoError(t, res.Err)
+ size := res.Stdout.Trimmed()
+ res = node.IPFS("files", "write", "--hash=identity", "--offset="+size, "/raw-identity", appendFile)
+ assert.NoError(t, res.Err)
+
+ // verify content
+ res = node.IPFS("files", "read", "/raw-identity")
+ assert.NoError(t, res.Err)
+ assert.Equal(t, rawData+appendData, res.Stdout.Trimmed())
+
+ // check that it's now a UnixFS structure (dag-pb)
+ res = node.IPFS("files", "stat", "--hash", "/raw-identity")
+ assert.NoError(t, res.Err)
+ newCid := res.Stdout.Trimmed()
+
+ res = node.IPFS("cid", "format", "-f", "%c", newCid)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "dag-pb", res.Stdout.Trimmed())
+
+ res = node.IPFS("files", "stat", "/raw-identity")
+ assert.NoError(t, res.Err)
+ assert.Contains(t, res.Stdout.String(), "Type: file")
+ })
+
+ t.Run("ipfs add --inline-limit at exactly max size succeeds", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ // create small data that will be inlined
+ smallData := "test data for inline"
+ tempFile := filepath.Join(node.Dir, "exact.txt")
+ err := os.WriteFile(tempFile, []byte(smallData), 0644)
+ require.NoError(t, err)
+
+ // exactly at the limit should succeed
+ res := node.IPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", verifcid.DefaultMaxIdentityDigestSize), tempFile)
+ assert.NoError(t, res.Err)
+ cid := strings.Fields(res.Stdout.String())[1]
+
+ // verify it uses identity hash (inline) since data is small enough
+ res = node.IPFS("cid", "format", "-f", "%h", cid)
+ assert.NoError(t, res.Err)
+ assert.Equal(t, "identity", res.Stdout.Trimmed())
+ })
+
+ t.Run("ipfs add --inline-limit one byte over max fails", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ smallData := "test"
+ tempFile := filepath.Join(node.Dir, "oneover.txt")
+ err := os.WriteFile(tempFile, []byte(smallData), 0644)
+ require.NoError(t, err)
+
+ // one byte over should fail
+ overLimit := verifcid.DefaultMaxIdentityDigestSize + 1
+ res := node.RunIPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", overLimit), tempFile)
+ assert.NotEqual(t, 0, res.ExitErr.ExitCode())
+ assert.Contains(t, res.Stderr.String(), fmt.Sprintf("inline-limit %d exceeds maximum allowed size of %d bytes", overLimit, verifcid.DefaultMaxIdentityDigestSize))
+ })
+
+ t.Run("ipfs add --inline with data larger than limit uses configured hash", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init().StartDaemon()
+ defer node.StopDaemon()
+
+ // data larger than inline limit
+ largeData := strings.Repeat("y", 100)
+ tempFile := filepath.Join(node.Dir, "toolarge.txt")
+ err := os.WriteFile(tempFile, []byte(largeData), 0644)
+ require.NoError(t, err)
+
+ // set inline limit smaller than data
+ res := node.IPFS("add", "--inline", "--inline-limit=50", tempFile)
+ assert.NoError(t, res.Err)
+ cid := strings.Fields(res.Stdout.String())[1]
+
+ // verify it's NOT using identity hash (data too large for inline)
+ res = node.IPFS("cid", "format", "-f", "%h", cid)
+ assert.NoError(t, res.Err)
+ assert.NotEqual(t, "identity", res.Stdout.Trimmed())
+
+ // should use configured hash
+ assert.Equal(t, config.DefaultHashFunction, res.Stdout.Trimmed())
+ })
+}
diff --git a/test/cli/migrations/migration_16_to_17_test.go b/test/cli/migrations/migration_16_to_latest_test.go
similarity index 83%
rename from test/cli/migrations/migration_16_to_17_test.go
rename to test/cli/migrations/migration_16_to_latest_test.go
index e4d75bffdda..521b3164671 100644
--- a/test/cli/migrations/migration_16_to_17_test.go
+++ b/test/cli/migrations/migration_16_to_latest_test.go
@@ -1,8 +1,6 @@
package migrations
// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH.
-// The tests migrate from repo version 16 to 17, which requires Kubo version 0.37.0+ (expects repo v17).
-// If using system ipfs binary v0.36.0 or older (expects repo v16), no migration will be triggered.
//
// To run these tests successfully:
// export PATH="$(pwd)/cmd/ipfs:$PATH"
@@ -12,6 +10,7 @@ import (
"bufio"
"context"
"encoding/json"
+ "fmt"
"io"
"os"
"os/exec"
@@ -20,11 +19,28 @@ import (
"testing"
"time"
+ ipfs "github.com/ipfs/kubo"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/require"
)
-func TestMigration16To17(t *testing.T) {
+// TestMigration16ToLatest tests migration from repo version 16 to the latest version.
+//
+// This test uses a real IPFS repository snapshot from Kubo v0.36.0 (the last version that used repo v16).
+// The intention is to confirm that users can upgrade from Kubo v0.36.0 to the latest version by applying
+// all intermediate migrations successfully.
+//
+// NOTE: This test comprehensively tests all migration methods (daemon --migrate, repo migrate,
+// and reverse migration) because 16-to-17 was the first embedded migration that did not fetch
+// external files. It serves as a reference implementation for migration testing.
+//
+// Future migrations can have simplified tests (like 17-to-18 in migration_17_to_latest_test.go)
+// that focus on specific migration logic rather than testing all migration methods.
+//
+// If you need to test migration of configuration keys that appeared in later repo versions,
+// create a new test file migration_N_to_latest_test.go with a separate IPFS repository test vector
+// from the appropriate Kubo version.
+func TestMigration16ToLatest(t *testing.T) {
t.Parallel()
// Primary tests using 'ipfs daemon --migrate' command (default in Docker)
@@ -71,12 +87,13 @@ func testDaemonMigrationWithAuto(t *testing.T) {
// Verify migration was successful based on monitoring
require.True(t, migrationSuccess, "Migration should have been successful")
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
- require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully")
+ require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully")
- // Verify version was updated to 17
+ // Verify version was updated to latest
versionData, err := os.ReadFile(versionPath)
require.NoError(t, err)
- require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
+ expectedVersion := fmt.Sprint(ipfs.RepoVersion)
+ require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion)
// Verify migration results using DRY helper
helper := NewMigrationTestHelper(t, configPath)
@@ -131,7 +148,7 @@ func testDaemonMigrationWithoutAuto(t *testing.T) {
// Verify migration was successful based on monitoring
require.True(t, migrationSuccess, "Migration should have been successful")
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
- require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully")
+ require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully")
// Verify migration results: custom values preserved alongside "auto"
helper := NewMigrationTestHelper(t, configPath)
@@ -487,12 +504,13 @@ func testDaemonMissingFieldsHandling(t *testing.T) {
// Verify migration was successful
require.True(t, migrationSuccess, "Migration should have been successful")
require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered")
- require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully")
+ require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully")
- // Verify version was updated
+ // Verify version was updated to latest
versionData, err := os.ReadFile(versionPath)
require.NoError(t, err)
- require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
+ expectedVersion := fmt.Sprint(ipfs.RepoVersion)
+ require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion)
// Verify migration adds all required fields to minimal config
NewMigrationTestHelper(t, configPath).
@@ -543,10 +561,11 @@ func testRepoBackwardMigration(t *testing.T) {
result := node.RunIPFS("repo", "migrate")
require.Empty(t, result.Stderr.String(), "Forward migration should succeed")
- // Verify we're at v17
+ // Verify we're at the latest version
versionData, err := os.ReadFile(versionPath)
require.NoError(t, err)
- require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Should be at version 17 after forward migration")
+ expectedVersion := fmt.Sprint(ipfs.RepoVersion)
+ require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Should be at version %s (latest) after forward migration", expectedVersion)
// Now run reverse migration back to v16
result = node.RunIPFS("repo", "migrate", "--to=16", "--allow-downgrade")
@@ -565,18 +584,40 @@ func testRepoBackwardMigration(t *testing.T) {
// runDaemonMigrationWithMonitoring starts daemon --migrate, monitors output until "Daemon is ready",
// then gracefully shuts down the daemon and returns the captured output and success status.
-// This is a generic helper that can monitor for any migration patterns.
+// This monitors for all expected migrations from version 16 to latest.
func runDaemonMigrationWithMonitoring(t *testing.T, node *harness.Node) (string, bool) {
- // Use specific patterns for 16-to-17 migration
- return runDaemonWithMigrationMonitoring(t, node, "applying 16-to-17 repo migration", "Migration 16 to 17 succeeded")
+ // Monitor migrations from repo v16 to latest
+ return runDaemonWithExpectedMigrations(t, node, 16, ipfs.RepoVersion)
+}
+
+// runDaemonWithExpectedMigrations monitors daemon startup for a sequence of migrations from startVersion to endVersion
+func runDaemonWithExpectedMigrations(t *testing.T, node *harness.Node, startVersion, endVersion int) (string, bool) {
+ // Build list of expected migrations
+ var expectedMigrations []struct {
+ pattern string
+ success string
+ }
+
+ for v := startVersion; v < endVersion; v++ {
+ from := v
+ to := v + 1
+ expectedMigrations = append(expectedMigrations, struct {
+ pattern string
+ success string
+ }{
+ pattern: fmt.Sprintf("applying %d-to-%d repo migration", from, to),
+ success: fmt.Sprintf("Migration %d-to-%d succeeded", from, to),
+ })
+ }
+
+ return runDaemonWithMultipleMigrationMonitoring(t, node, expectedMigrations)
}
-// runDaemonWithMigrationMonitoring is a generic helper for running daemon --migrate and monitoring output.
-// It waits for the daemon to be ready, then shuts it down gracefully.
-// migrationPattern: pattern to detect migration started (e.g., "applying X-to-Y repo migration")
-// successPattern: pattern to detect migration succeeded (e.g., "Migration X to Y succeeded")
-// Returns the stdout output and whether both patterns were detected.
-func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migrationPattern, successPattern string) (string, bool) {
+// runDaemonWithMultipleMigrationMonitoring monitors daemon startup for multiple sequential migrations
+func runDaemonWithMultipleMigrationMonitoring(t *testing.T, node *harness.Node, expectedMigrations []struct {
+ pattern string
+ success string
+}) (string, bool) {
// Create context with timeout as safety net
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@@ -601,7 +642,11 @@ func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migratio
require.NoError(t, err)
var allOutput strings.Builder
- var migrationDetected, migrationSucceeded, daemonReady bool
+ var daemonReady bool
+
+ // Track which migrations have been detected
+ migrationsDetected := make([]bool, len(expectedMigrations))
+ migrationsSucceeded := make([]bool, len(expectedMigrations))
// Monitor stdout for completion signals
scanner := bufio.NewScanner(stdout)
@@ -611,11 +656,13 @@ func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migratio
allOutput.WriteString(line + "\n")
// Check for migration messages
- if migrationPattern != "" && strings.Contains(line, migrationPattern) {
- migrationDetected = true
- }
- if successPattern != "" && strings.Contains(line, successPattern) {
- migrationSucceeded = true
+ for i, migration := range expectedMigrations {
+ if strings.Contains(line, migration.pattern) {
+ migrationsDetected[i] = true
+ }
+ if strings.Contains(line, migration.success) {
+ migrationsSucceeded[i] = true
+ }
}
if strings.Contains(line, "Daemon is ready") {
daemonReady = true
@@ -667,17 +714,41 @@ func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migratio
// Wait for process to exit
_ = cmd.Wait()
- // Return success if we detected migration
- success := migrationDetected && migrationSucceeded
- return allOutput.String(), success
+ // Check all migrations were detected and succeeded
+ allDetected := true
+ allSucceeded := true
+ for i := range expectedMigrations {
+ if !migrationsDetected[i] {
+ allDetected = false
+ t.Logf("Migration %s was not detected", expectedMigrations[i].pattern)
+ }
+ if !migrationsSucceeded[i] {
+ allSucceeded = false
+ t.Logf("Migration %s did not succeed", expectedMigrations[i].success)
+ }
+ }
+
+ return allOutput.String(), allDetected && allSucceeded
}
// Check if process has exited (e.g., due to startup failure after migration)
if cmd.ProcessState != nil && cmd.ProcessState.Exited() {
// Process exited - migration may have completed but daemon failed to start
// This is expected for corrupted config tests
- success := migrationDetected && migrationSucceeded
- return allOutput.String(), success
+
+ // Check all migrations status
+ allDetected := true
+ allSucceeded := true
+ for i := range expectedMigrations {
+ if !migrationsDetected[i] {
+ allDetected = false
+ }
+ if !migrationsSucceeded[i] {
+ allSucceeded = false
+ }
+ }
+
+ return allOutput.String(), allDetected && allSucceeded
}
}
}
diff --git a/test/cli/migrations/migration_17_to_latest_test.go b/test/cli/migrations/migration_17_to_latest_test.go
new file mode 100644
index 00000000000..635573461e4
--- /dev/null
+++ b/test/cli/migrations/migration_17_to_latest_test.go
@@ -0,0 +1,360 @@
+package migrations
+
+// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH.
+//
+// To run these tests successfully:
+// export PATH="$(pwd)/cmd/ipfs:$PATH"
+// go test ./test/cli/migrations/
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ ipfs "github.com/ipfs/kubo"
+ "github.com/ipfs/kubo/test/cli/harness"
+ "github.com/stretchr/testify/require"
+)
+
+// TestMigration17ToLatest tests migration from repo version 17 to the latest version.
+//
+// Since we don't have a v17 repo fixture, we start with v16 and migrate it to v17 first,
+// then test the 17-to-18 migration specifically.
+//
+// This test focuses on the Provider/Reprovider to Provide consolidation that happens in 17-to-18.
+func TestMigration17ToLatest(t *testing.T) {
+ t.Parallel()
+
+ // Tests for Provider/Reprovider to Provide migration (17-to-18)
+ t.Run("daemon migrate: Provider/Reprovider to Provide consolidation", testProviderReproviderMigration)
+ t.Run("daemon migrate: flat strategy conversion", testFlatStrategyConversion)
+ t.Run("daemon migrate: empty Provider/Reprovider sections", testEmptyProviderReproviderMigration)
+ t.Run("daemon migrate: partial configuration (Provider only)", testProviderOnlyMigration)
+ t.Run("daemon migrate: partial configuration (Reprovider only)", testReproviderOnlyMigration)
+ t.Run("repo migrate: invalid strategy values preserved", testInvalidStrategyMigration)
+ t.Run("repo migrate: Provider/Reprovider to Provide consolidation", testRepoProviderReproviderMigration)
+}
+
+// =============================================================================
+// MIGRATION 17-to-18 SPECIFIC TESTS: Provider/Reprovider to Provide consolidation
+// =============================================================================
+
+func testProviderReproviderMigration(t *testing.T) {
+ // TEST: 17-to-18 migration with explicit Provider/Reprovider configuration
+ node := setupV17RepoWithProviderConfig(t)
+
+ configPath := filepath.Join(node.Dir, "config")
+ versionPath := filepath.Join(node.Dir, "version")
+
+ // Run migration using daemon --migrate command
+ stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
+
+ // Debug: Print the actual output
+ t.Logf("Daemon output:\n%s", stdoutOutput)
+
+ // Verify migration was successful
+ require.True(t, migrationSuccess, "Migration should have been successful")
+ require.Contains(t, stdoutOutput, "applying 17-to-18 repo migration", "Migration 17-to-18 should have been triggered")
+ require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded", "Migration 17-to-18 should have completed successfully")
+
+ // Verify version was updated to latest
+ versionData, err := os.ReadFile(versionPath)
+ require.NoError(t, err)
+ expectedVersion := fmt.Sprint(ipfs.RepoVersion)
+ require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion)
+
+ // =============================================================================
+ // MIGRATION 17-to-18 ASSERTIONS: Provider/Reprovider to Provide consolidation
+ // =============================================================================
+ helper := NewMigrationTestHelper(t, configPath)
+
+ // Verify Provider/Reprovider migration to Provide
+ helper.RequireProviderMigration().
+ RequireFieldEquals("Provide.Enabled", true). // Migrated from Provider.Enabled
+ RequireFieldEquals("Provide.DHT.MaxWorkers", float64(8)). // Migrated from Provider.WorkerCount
+ RequireFieldEquals("Provide.Strategy", "roots"). // Migrated from Reprovider.Strategy
+ RequireFieldEquals("Provide.DHT.Interval", "24h") // Migrated from Reprovider.Interval
+
+ // Verify old sections are removed
+ helper.RequireFieldAbsent("Provider").
+ RequireFieldAbsent("Reprovider")
+}
+
+func testFlatStrategyConversion(t *testing.T) {
+ // TEST: 17-to-18 migration with "flat" strategy that should convert to "all"
+ node := setupV17RepoWithFlatStrategy(t)
+
+ configPath := filepath.Join(node.Dir, "config")
+
+ // Run migration using daemon --migrate command
+ stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
+
+ // Verify migration was successful
+ require.True(t, migrationSuccess, "Migration should have been successful")
+ require.Contains(t, stdoutOutput, "applying 17-to-18 repo migration", "Migration 17-to-18 should have been triggered")
+ require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded", "Migration 17-to-18 should have completed successfully")
+
+ // =============================================================================
+ // MIGRATION 17-to-18 ASSERTIONS: "flat" to "all" strategy conversion
+ // =============================================================================
+ helper := NewMigrationTestHelper(t, configPath)
+
+ // Verify "flat" was converted to "all"
+ helper.RequireProviderMigration().
+ RequireFieldEquals("Provide.Strategy", "all"). // "flat" converted to "all"
+ RequireFieldEquals("Provide.DHT.Interval", "12h")
+}
+
+func testEmptyProviderReproviderMigration(t *testing.T) {
+ // TEST: 17-to-18 migration with empty Provider and Reprovider sections
+ node := setupV17RepoWithEmptySections(t)
+
+ configPath := filepath.Join(node.Dir, "config")
+
+ // Run migration
+ stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
+
+ // Verify migration was successful
+ require.True(t, migrationSuccess, "Migration should have been successful")
+ require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded")
+
+ // Verify empty sections are removed and no Provide section is created
+ helper := NewMigrationTestHelper(t, configPath)
+ helper.RequireFieldAbsent("Provider").
+ RequireFieldAbsent("Reprovider").
+ RequireFieldAbsent("Provide") // No Provide section should be created for empty configs
+}
+
+func testProviderOnlyMigration(t *testing.T) {
+ // TEST: 17-to-18 migration with only Provider configuration
+ node := setupV17RepoWithProviderOnly(t)
+
+ configPath := filepath.Join(node.Dir, "config")
+
+ // Run migration
+ stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
+
+ // Verify migration was successful
+ require.True(t, migrationSuccess, "Migration should have been successful")
+ require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded")
+
+ // Verify only Provider fields are migrated
+ helper := NewMigrationTestHelper(t, configPath)
+ helper.RequireProviderMigration().
+ RequireFieldEquals("Provide.Enabled", false).
+ RequireFieldEquals("Provide.DHT.MaxWorkers", float64(32)).
+ RequireFieldAbsent("Provide.Strategy"). // No Reprovider.Strategy to migrate
+ RequireFieldAbsent("Provide.DHT.Interval") // No Reprovider.Interval to migrate
+}
+
+func testReproviderOnlyMigration(t *testing.T) {
+ // TEST: 17-to-18 migration with only Reprovider configuration
+ node := setupV17RepoWithReproviderOnly(t)
+
+ configPath := filepath.Join(node.Dir, "config")
+
+ // Run migration
+ stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node)
+
+ // Verify migration was successful
+ require.True(t, migrationSuccess, "Migration should have been successful")
+ require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded")
+
+ // Verify only Reprovider fields are migrated
+ helper := NewMigrationTestHelper(t, configPath)
+ helper.RequireProviderMigration().
+ RequireFieldEquals("Provide.Strategy", "pinned").
+ RequireFieldEquals("Provide.DHT.Interval", "48h").
+ RequireFieldAbsent("Provide.Enabled"). // No Provider.Enabled to migrate
+ RequireFieldAbsent("Provide.DHT.MaxWorkers") // No Provider.WorkerCount to migrate
+}
+
+func testInvalidStrategyMigration(t *testing.T) {
+ // TEST: 17-to-18 migration with invalid strategy values (should be preserved as-is)
+ // The migration itself should succeed, but daemon start will fail due to invalid strategy
+ node := setupV17RepoWithInvalidStrategy(t)
+
+ configPath := filepath.Join(node.Dir, "config")
+
+ // Run the migration using 'ipfs repo migrate' (not daemon --migrate)
+ // because daemon would fail to start with invalid strategy after migration
+ result := node.RunIPFS("repo", "migrate")
+ require.Empty(t, result.Stderr.String(), "Migration should succeed without errors")
+
+ // Verify invalid strategy is preserved as-is (not validated during migration)
+ helper := NewMigrationTestHelper(t, configPath)
+ helper.RequireProviderMigration().
+ RequireFieldEquals("Provide.Strategy", "invalid-strategy") // Should be preserved
+
+ // Now verify that daemon fails to start with invalid strategy
+ // Note: We cannot use --offline as it skips provider validation
+ // Use a context with timeout to avoid hanging
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, node.IPFSBin, "daemon")
+ cmd.Dir = node.Dir
+ for k, v := range node.Runner.Env {
+ cmd.Env = append(cmd.Env, k+"="+v)
+ }
+
+ output, err := cmd.CombinedOutput()
+
+ // The daemon should fail (either with error or timeout if it's hanging)
+ require.Error(t, err, "Daemon should fail to start with invalid strategy")
+
+ // Check if we got the expected error message
+ outputStr := string(output)
+ t.Logf("Daemon output with invalid strategy: %s", outputStr)
+
+ // The error should mention unknown strategy
+ require.Contains(t, outputStr, "unknown strategy", "Should report unknown strategy error")
+}
+
+func testRepoProviderReproviderMigration(t *testing.T) {
+ // TEST: 17-to-18 migration using 'ipfs repo migrate' command
+ node := setupV17RepoWithProviderConfig(t)
+
+ configPath := filepath.Join(node.Dir, "config")
+
+ // Run migration using 'ipfs repo migrate' command
+ result := node.RunIPFS("repo", "migrate")
+ require.Empty(t, result.Stderr.String(), "Migration should succeed without errors")
+
+ // Verify same results as daemon migrate
+ helper := NewMigrationTestHelper(t, configPath)
+ helper.RequireProviderMigration().
+ RequireFieldEquals("Provide.Enabled", true).
+ RequireFieldEquals("Provide.DHT.MaxWorkers", float64(8)).
+ RequireFieldEquals("Provide.Strategy", "roots").
+ RequireFieldEquals("Provide.DHT.Interval", "24h")
+}
+
+// =============================================================================
+// HELPER FUNCTIONS
+// =============================================================================
+
+// setupV17RepoWithProviderConfig creates a v17 repo with Provider/Reprovider configuration
+func setupV17RepoWithProviderConfig(t *testing.T) *harness.Node {
+ return setupV17RepoWithConfig(t,
+ map[string]interface{}{
+ "Enabled": true,
+ "WorkerCount": 8,
+ },
+ map[string]interface{}{
+ "Strategy": "roots",
+ "Interval": "24h",
+ })
+}
+
+// setupV17RepoWithFlatStrategy creates a v17 repo with "flat" strategy for testing conversion
+func setupV17RepoWithFlatStrategy(t *testing.T) *harness.Node {
+ return setupV17RepoWithConfig(t,
+ map[string]interface{}{
+ "Enabled": false,
+ },
+ map[string]interface{}{
+ "Strategy": "flat", // This should be converted to "all"
+ "Interval": "12h",
+ })
+}
+
+// setupV17RepoWithConfig is a helper that creates a v17 repo with specified Provider/Reprovider config
+func setupV17RepoWithConfig(t *testing.T, providerConfig, reproviderConfig map[string]interface{}) *harness.Node {
+ node := setupStaticV16Repo(t)
+
+ // First migrate to v17
+ result := node.RunIPFS("repo", "migrate", "--to=17")
+ require.Empty(t, result.Stderr.String(), "Migration to v17 should succeed")
+
+ // Update config with specified Provider and Reprovider settings
+ configPath := filepath.Join(node.Dir, "config")
+ var config map[string]interface{}
+ configData, err := os.ReadFile(configPath)
+ require.NoError(t, err)
+ require.NoError(t, json.Unmarshal(configData, &config))
+
+ if providerConfig != nil {
+ config["Provider"] = providerConfig
+ } else {
+ config["Provider"] = map[string]interface{}{}
+ }
+
+ if reproviderConfig != nil {
+ config["Reprovider"] = reproviderConfig
+ } else {
+ config["Reprovider"] = map[string]interface{}{}
+ }
+
+ modifiedConfigData, err := json.MarshalIndent(config, "", " ")
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(configPath, modifiedConfigData, 0644))
+
+ return node
+}
+
+// setupV17RepoWithEmptySections creates a v17 repo with empty Provider/Reprovider sections
+func setupV17RepoWithEmptySections(t *testing.T) *harness.Node {
+ return setupV17RepoWithConfig(t,
+ map[string]interface{}{},
+ map[string]interface{}{})
+}
+
+// setupV17RepoWithProviderOnly creates a v17 repo with only Provider configuration
+func setupV17RepoWithProviderOnly(t *testing.T) *harness.Node {
+ return setupV17RepoWithConfig(t,
+ map[string]interface{}{
+ "Enabled": false,
+ "WorkerCount": 32,
+ },
+ map[string]interface{}{})
+}
+
+// setupV17RepoWithReproviderOnly creates a v17 repo with only Reprovider configuration
+func setupV17RepoWithReproviderOnly(t *testing.T) *harness.Node {
+ return setupV17RepoWithConfig(t,
+ map[string]interface{}{},
+ map[string]interface{}{
+ "Strategy": "pinned",
+ "Interval": "48h",
+ })
+}
+
+// setupV17RepoWithInvalidStrategy creates a v17 repo with an invalid strategy value
+func setupV17RepoWithInvalidStrategy(t *testing.T) *harness.Node {
+ return setupV17RepoWithConfig(t,
+ map[string]interface{}{},
+ map[string]interface{}{
+ "Strategy": "invalid-strategy", // This is not a valid strategy
+ "Interval": "24h",
+ })
+}
+
+// runDaemonMigrationFromV17 monitors daemon startup for 17-to-18 migration only
+func runDaemonMigrationFromV17(t *testing.T, node *harness.Node) (string, bool) {
+ // Monitor only the 17-to-18 migration
+ expectedMigrations := []struct {
+ pattern string
+ success string
+ }{
+ {
+ pattern: "applying 17-to-18 repo migration",
+ success: "Migration 17-to-18 succeeded",
+ },
+ }
+
+ return runDaemonWithMultipleMigrationMonitoring(t, node, expectedMigrations)
+}
+
+// RequireProviderMigration verifies that Provider/Reprovider have been migrated to Provide section
+func (h *MigrationTestHelper) RequireProviderMigration() *MigrationTestHelper {
+ return h.RequireFieldExists("Provide").
+ RequireFieldAbsent("Provider").
+ RequireFieldAbsent("Reprovider")
+}
diff --git a/test/cli/migrations/migration_legacy_15_to_17_test.go b/test/cli/migrations/migration_mixed_15_to_latest_test.go
similarity index 73%
rename from test/cli/migrations/migration_legacy_15_to_17_test.go
rename to test/cli/migrations/migration_mixed_15_to_latest_test.go
index 1471cab1f42..9f1a482f81d 100644
--- a/test/cli/migrations/migration_legacy_15_to_17_test.go
+++ b/test/cli/migrations/migration_mixed_15_to_latest_test.go
@@ -1,8 +1,14 @@
package migrations
-// NOTE: These legacy migration tests require the local Kubo binary (built with 'make build') to be in PATH.
-// The tests migrate from repo version 15 to 17, which requires both external (15→16) and embedded (16→17) migrations.
-// This validates the transition from legacy external binaries to modern embedded migrations.
+// NOTE: These mixed migration tests validate the transition from old Kubo versions that used external
+// migration binaries to the latest version with embedded migrations. This ensures users can upgrade
+// from very old installations (v15) to the latest version seamlessly.
+//
+// The tests verify hybrid migration paths:
+// - Forward: external binary (15→16) + embedded migrations (16→latest)
+// - Backward: embedded migrations (latest→16) + external binary (16→15)
+//
+// This confirms compatibility between the old external migration system and the new embedded system.
//
// To run these tests successfully:
// export PATH="$(pwd)/cmd/ipfs:$PATH"
@@ -22,30 +28,36 @@ import (
"testing"
"time"
+ ipfs "github.com/ipfs/kubo"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/require"
)
-func TestMigration15To17(t *testing.T) {
+// TestMixedMigration15ToLatest tests migration from old Kubo (v15 with external migrations)
+// to the latest version using a hybrid approach: external binary for 15→16, then embedded
+// migrations for 16→latest. This ensures backward compatibility for users upgrading from
+// very old Kubo installations.
+func TestMixedMigration15ToLatest(t *testing.T) {
t.Parallel()
- // Test legacy migration from v15 to v17 (combines external 15→16 + embedded 16→17)
- t.Run("daemon migrate: legacy 15 to 17", testDaemonMigration15To17)
- t.Run("repo migrate: legacy 15 to 17", testRepoMigration15To17)
+ // Test mixed migration from v15 to latest (combines external 15→16 + embedded 16→latest)
+ t.Run("daemon migrate: mixed 15 to latest", testDaemonMigration15ToLatest)
+ t.Run("repo migrate: mixed 15 to latest", testRepoMigration15ToLatest)
}
-func TestMigration17To15Downgrade(t *testing.T) {
+// TestMixedMigrationLatestTo15Downgrade tests downgrading from the latest version back to v15
+// using a hybrid approach: embedded migrations for latest→16, then external binary for 16→15.
+// This ensures the migration system works bidirectionally for recovery scenarios.
+func TestMixedMigrationLatestTo15Downgrade(t *testing.T) {
t.Parallel()
- // Test reverse hybrid migration from v17 to v15 (embedded 17→16 + external 16→15)
- t.Run("repo migrate: reverse hybrid 17 to 15", testRepoReverseHybridMigration17To15)
+ // Test reverse hybrid migration from latest to v15 (embedded latest→16 + external 16→15)
+ t.Run("repo migrate: reverse hybrid latest to 15", testRepoReverseHybridMigrationLatestTo15)
}
-func testDaemonMigration15To17(t *testing.T) {
- // TEST: Migration from v15 to v17 using 'ipfs daemon --migrate'
- // This tests the dual migration path: external binary (15→16) + embedded (16→17)
- // NOTE: This test may need to be revised/updated once repo version 18 is released,
- // at that point only keep tests that use 'ipfs repo migrate'
+func testDaemonMigration15ToLatest(t *testing.T) {
+ // TEST: Migration from v15 to latest using 'ipfs daemon --migrate'
+ // This tests the mixed migration path: external binary (15→16) + embedded (16→latest)
node := setupStaticV15Repo(t)
// Create mock migration binary for 15→16 (16→17 will use embedded migration)
@@ -76,13 +88,16 @@ func testDaemonMigration15To17(t *testing.T) {
// Verify hybrid migration was successful
require.True(t, migrationSuccess, "Hybrid migration should have been successful")
require.Contains(t, stdoutOutput, "Phase 1: External migration from v15 to v16", "Should detect external migration phase")
- require.Contains(t, stdoutOutput, "Phase 2: Embedded migration from v16 to v17", "Should detect embedded migration phase")
+ // Verify each embedded migration step from 16 to latest
+ verifyMigrationSteps(t, stdoutOutput, 16, ipfs.RepoVersion, true)
+ require.Contains(t, stdoutOutput, fmt.Sprintf("Phase 2: Embedded migration from v16 to v%d", ipfs.RepoVersion), "Should detect embedded migration phase")
require.Contains(t, stdoutOutput, "Hybrid migration completed successfully", "Should confirm hybrid migration completion")
- // Verify final version is 17
+ // Verify final version is latest
versionData, err = os.ReadFile(versionPath)
require.NoError(t, err)
- require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
+ latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion)
+ require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Version should be updated to latest")
// Verify config is still valid JSON and key fields preserved
var finalConfig map[string]interface{}
@@ -103,8 +118,8 @@ func testDaemonMigration15To17(t *testing.T) {
require.NotNil(t, autoConf, "AutoConf should be added by 16→17 migration")
}
-func testRepoMigration15To17(t *testing.T) {
- // TEST: Migration from v15 to v17 using 'ipfs repo migrate'
+func testRepoMigration15ToLatest(t *testing.T) {
+ // TEST: Migration from v15 to latest using 'ipfs repo migrate'
// Comparison test to verify repo migrate produces same results as daemon migrate
node := setupStaticV15Repo(t)
@@ -132,10 +147,11 @@ func testRepoMigration15To17(t *testing.T) {
})
require.Empty(t, result.Stderr.String(), "Migration should succeed without errors")
- // Verify final version is 17
+ // Verify final version is latest
versionData, err = os.ReadFile(versionPath)
require.NoError(t, err)
- require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17")
+ latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion)
+ require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Version should be updated to latest")
// Verify config is valid JSON
var finalConfig map[string]interface{}
@@ -177,7 +193,7 @@ func runDaemonWithLegacyMigrationMonitoring(t *testing.T, node *harness.Node) (s
// Check for hybrid migration patterns in output
hasHybridStart := strings.Contains(stdoutOutput, "Using hybrid migration strategy")
hasPhase1 := strings.Contains(stdoutOutput, "Phase 1: External migration from v15 to v16")
- hasPhase2 := strings.Contains(stdoutOutput, "Phase 2: Embedded migration from v16 to v17")
+ hasPhase2 := strings.Contains(stdoutOutput, fmt.Sprintf("Phase 2: Embedded migration from v16 to v%d", ipfs.RepoVersion))
hasHybridSuccess := strings.Contains(stdoutOutput, "Hybrid migration completed successfully")
// Success requires daemon to start and hybrid migration patterns to be detected
@@ -342,6 +358,37 @@ func main() {
require.NoError(t, err, "Mock binary should exist")
}
+// expectedMigrationSteps generates the expected migration step strings for a version range.
+// For forward migrations (from < to), it returns strings like "Running embedded migration fs-repo-16-to-17"
+// For reverse migrations (from > to), it returns strings for the reverse path.
+func expectedMigrationSteps(from, to int, forward bool) []string {
+ var steps []string
+
+ if forward {
+ // Forward migration: increment by 1 each step
+ for v := from; v < to; v++ {
+ migrationName := fmt.Sprintf("fs-repo-%d-to-%d", v, v+1)
+ steps = append(steps, fmt.Sprintf("Running embedded migration %s", migrationName))
+ }
+ } else {
+ // Reverse migration: decrement by 1 each step
+ for v := from; v > to; v-- {
+ migrationName := fmt.Sprintf("fs-repo-%d-to-%d", v, v-1)
+ steps = append(steps, fmt.Sprintf("Running reverse migration %s", migrationName))
+ }
+ }
+
+ return steps
+}
+
+// verifyMigrationSteps checks that all expected migration steps appear in the output
+func verifyMigrationSteps(t *testing.T, output string, from, to int, forward bool) {
+ steps := expectedMigrationSteps(from, to, forward)
+ for _, step := range steps {
+ require.Contains(t, output, step, "Migration output should contain: %s", step)
+ }
+}
+
// getNestedValue retrieves a nested value from a config map using dot notation
func getNestedValue(config map[string]interface{}, path string) interface{} {
parts := strings.Split(path, ".")
@@ -362,11 +409,11 @@ func getNestedValue(config map[string]interface{}, path string) interface{} {
return current
}
-func testRepoReverseHybridMigration17To15(t *testing.T) {
- // TEST: Reverse hybrid migration from v17 to v15 using 'ipfs repo migrate --to=15 --allow-downgrade'
+func testRepoReverseHybridMigrationLatestTo15(t *testing.T) {
+ // TEST: Reverse hybrid migration from latest to v15 using 'ipfs repo migrate --to=15 --allow-downgrade'
// This tests reverse hybrid migration: embedded (17→16) + external (16→15)
- // Start with v15 fixture and migrate forward to v17 to create proper backup files
+ // Start with v15 fixture and migrate forward to latest to create proper backup files
node := setupStaticV15Repo(t)
// Create mock migration binary for 15→16 (needed for forward migration)
@@ -377,8 +424,8 @@ func testRepoReverseHybridMigration17To15(t *testing.T) {
configPath := filepath.Join(node.Dir, "config")
versionPath := filepath.Join(node.Dir, "version")
- // Step 1: Forward migration from v15 to v17 to create backup files
- t.Log("Step 1: Forward migration v15 → v17")
+ // Step 1: Forward migration from v15 to latest to create backup files
+ t.Logf("Step 1: Forward migration v15 → v%d", ipfs.RepoVersion)
result := node.Runner.Run(harness.RunRequest{
Path: node.IPFSBin,
Args: []string{"repo", "migrate"},
@@ -396,21 +443,22 @@ func testRepoReverseHybridMigration17To15(t *testing.T) {
require.Empty(t, result.Stderr.String(), "Forward migration should succeed without errors")
- // Verify we're at v17 after forward migration
+ // Verify we're at latest version after forward migration
versionData, err := os.ReadFile(versionPath)
require.NoError(t, err)
- require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Should be at version 17 after forward migration")
+ latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion)
+ require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Should be at latest version after forward migration")
// Read config after forward migration to use as baseline for downgrade
- var v17Config map[string]interface{}
+ var latestConfig map[string]interface{}
configData, err := os.ReadFile(configPath)
require.NoError(t, err)
- require.NoError(t, json.Unmarshal(configData, &v17Config))
+ require.NoError(t, json.Unmarshal(configData, &latestConfig))
- originalPeerID := getNestedValue(v17Config, "Identity.PeerID")
+ originalPeerID := getNestedValue(latestConfig, "Identity.PeerID")
- // Step 2: Reverse hybrid migration from v17 to v15
- t.Log("Step 2: Reverse hybrid migration v17 → v15")
+ // Step 2: Reverse hybrid migration from latest to v15
+ t.Logf("Step 2: Reverse hybrid migration v%d → v15", ipfs.RepoVersion)
result = node.Runner.Run(harness.RunRequest{
Path: node.IPFSBin,
Args: []string{"repo", "migrate", "--to=15", "--allow-downgrade"},
diff --git a/test/cli/pin_ls_names_test.go b/test/cli/pin_ls_names_test.go
new file mode 100644
index 00000000000..54532b6b2d6
--- /dev/null
+++ b/test/cli/pin_ls_names_test.go
@@ -0,0 +1,534 @@
+package cli
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/ipfs/kubo/test/cli/harness"
+ "github.com/stretchr/testify/require"
+)
+
+// pinInfo represents the JSON structure for pin ls output
+type pinInfo struct {
+ Type string `json:"Type"`
+ Name string `json:"Name"`
+}
+
+// pinLsJSON represents the JSON output structure for pin ls command
+type pinLsJSON struct {
+ Keys map[string]pinInfo `json:"Keys"`
+}
+
+// Helper function to initialize a test node with daemon
+func setupTestNode(t *testing.T) *harness.Node {
+ t.Helper()
+ node := harness.NewT(t).NewNode().Init()
+ node.StartDaemon("--offline")
+ return node
+}
+
+// Helper function to assert pin name and CID are present in output
+func assertPinOutput(t *testing.T, output, cid, pinName string) {
+ t.Helper()
+ require.Contains(t, output, pinName, "pin name '%s' not found in output: %s", pinName, output)
+ require.Contains(t, output, cid, "CID %s not found in output: %s", cid, output)
+}
+
+// Helper function to assert CID is present but name is not
+func assertCIDOnly(t *testing.T, output, cid string) {
+ t.Helper()
+ require.Contains(t, output, cid, "CID %s not found in output: %s", cid, output)
+}
+
+// Helper function to assert neither CID nor name are present
+func assertNotPresent(t *testing.T, output, cid, pinName string) {
+ t.Helper()
+ require.NotContains(t, output, cid, "CID %s should not be present in output: %s", cid, output)
+ require.NotContains(t, output, pinName, "pin name '%s' should not be present in output: %s", pinName, output)
+}
+
+// Test that pin ls returns names when querying specific CIDs with --names flag
+func TestPinLsWithNamesForSpecificCIDs(t *testing.T) {
+ t.Parallel()
+
+ t.Run("pin ls with specific CID returns name", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Add content without pinning
+ cidA := node.IPFSAddStr("content A", "--pin=false")
+ cidB := node.IPFSAddStr("content B", "--pin=false")
+ cidC := node.IPFSAddStr("content C", "--pin=false")
+
+ // Pin with names
+ node.IPFS("pin", "add", "--name=pin-a", cidA)
+ node.IPFS("pin", "add", "--name=pin-b", cidB)
+ node.IPFS("pin", "add", cidC) // No name
+
+ // Test: pin ls --names should return the name
+ res := node.IPFS("pin", "ls", cidA, "--names")
+ assertPinOutput(t, res.Stdout.String(), cidA, "pin-a")
+
+ res = node.IPFS("pin", "ls", cidB, "--names")
+ assertPinOutput(t, res.Stdout.String(), cidB, "pin-b")
+
+ // Test: pin without name should work
+ res = node.IPFS("pin", "ls", cidC, "--names")
+ output := res.Stdout.String()
+ assertCIDOnly(t, output, cidC)
+ require.Contains(t, output, "recursive", "pin type 'recursive' not found for CID %s in output: %s", cidC, output)
+
+ // Test: without --names flag, no names returned
+ res = node.IPFS("pin", "ls", cidA)
+ output = res.Stdout.String()
+ require.NotContains(t, output, "pin-a", "pin name 'pin-a' should not be present without --names flag, but found in: %s", output)
+ assertCIDOnly(t, output, cidA)
+ })
+
+ t.Run("pin ls with multiple CIDs returns names", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Create test content
+ cidA := node.IPFSAddStr("multi A", "--pin=false")
+ cidB := node.IPFSAddStr("multi B", "--pin=false")
+
+ // Pin with names
+ node.IPFS("pin", "add", "--name=multi-pin-a", cidA)
+ node.IPFS("pin", "add", "--name=multi-pin-b", cidB)
+
+ // Test multiple CIDs at once
+ res := node.IPFS("pin", "ls", cidA, cidB, "--names")
+ output := res.Stdout.String()
+ assertPinOutput(t, output, cidA, "multi-pin-a")
+ assertPinOutput(t, output, cidB, "multi-pin-b")
+ })
+
+ t.Run("pin ls without CID lists all pins with names", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Create and pin content with names
+ cidA := node.IPFSAddStr("list all A", "--pin=false")
+ cidB := node.IPFSAddStr("list all B", "--pin=false")
+ cidC := node.IPFSAddStr("list all C", "--pin=false")
+
+ node.IPFS("pin", "add", "--name=all-pin-a", cidA)
+ node.IPFS("pin", "add", "--name=all-pin-b", "--recursive=false", cidB)
+ node.IPFS("pin", "add", cidC) // No name
+
+ // Test: pin ls --names (without CID) should list all pins with their names
+ res := node.IPFS("pin", "ls", "--names")
+ output := res.Stdout.String()
+
+ // Should contain all pins with their names
+ assertPinOutput(t, output, cidA, "all-pin-a")
+ assertPinOutput(t, output, cidB, "all-pin-b")
+ assertCIDOnly(t, output, cidC)
+
+ // Pin C should appear but without a name (just type)
+ lines := strings.Split(output, "\n")
+ for _, line := range lines {
+ if strings.Contains(line, cidC) {
+ // Should have CID and type but no name
+ require.Contains(t, line, "recursive", "pin type 'recursive' not found for unnamed pin %s in line: %s", cidC, line)
+ require.NotContains(t, line, "all-pin", "pin name should not be present for unnamed pin %s, but found in line: %s", cidC, line)
+ }
+ }
+ })
+
+ t.Run("pin ls --type with --names", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Create test content
+ cidDirect := node.IPFSAddStr("direct content", "--pin=false")
+ cidRecursive := node.IPFSAddStr("recursive content", "--pin=false")
+
+ // Create a DAG for indirect testing
+ childCid := node.IPFSAddStr("child for indirect", "--pin=false")
+ parentContent := fmt.Sprintf(`{"link": "/ipfs/%s"}`, childCid)
+ parentCid := node.PipeStrToIPFS(parentContent, "dag", "put", "--input-codec=json", "--store-codec=dag-cbor").Stdout.Trimmed()
+
+ // Pin with different types and names
+ node.IPFS("pin", "add", "--name=direct-pin", "--recursive=false", cidDirect)
+ node.IPFS("pin", "add", "--name=recursive-pin", cidRecursive)
+ node.IPFS("pin", "add", "--name=parent-pin", parentCid)
+
+ // Test: --type=direct --names
+ res := node.IPFS("pin", "ls", "--type=direct", "--names")
+ output := res.Stdout.String()
+ assertPinOutput(t, output, cidDirect, "direct-pin")
+ assertNotPresent(t, output, cidRecursive, "recursive-pin")
+
+ // Test: --type=recursive --names
+ res = node.IPFS("pin", "ls", "--type=recursive", "--names")
+ output = res.Stdout.String()
+ assertPinOutput(t, output, cidRecursive, "recursive-pin")
+ assertPinOutput(t, output, parentCid, "parent-pin")
+ assertNotPresent(t, output, cidDirect, "direct-pin")
+
+ // Test: --type=indirect with proper directory structure
+ // Create a directory with a file for indirect pin testing
+ dirPath := t.TempDir()
+ require.NoError(t, os.WriteFile(filepath.Join(dirPath, "file.txt"), []byte("test content"), 0644))
+
+ // Add directory recursively
+ dirAddRes := node.IPFS("add", "-r", "-q", dirPath)
+ dirCidStr := strings.TrimSpace(dirAddRes.Stdout.Lines()[len(dirAddRes.Stdout.Lines())-1])
+
+ // Add file separately without pinning to get its CID
+ fileAddRes := node.IPFS("add", "-q", "--pin=false", filepath.Join(dirPath, "file.txt"))
+ fileCidStr := strings.TrimSpace(fileAddRes.Stdout.String())
+
+ // Check if file shows as indirect
+ res = node.IPFS("pin", "ls", "--type=indirect", fileCidStr)
+ output = res.Stdout.String()
+ require.Contains(t, output, fileCidStr, "indirect pin CID %s not found in output: %s", fileCidStr, output)
+ require.Contains(t, output, "indirect through "+dirCidStr, "indirect relationship not found for CID %s through %s in output: %s", fileCidStr, dirCidStr, output)
+
+ // Test: --type=all --names
+ res = node.IPFS("pin", "ls", "--type=all", "--names")
+ output = res.Stdout.String()
+ assertPinOutput(t, output, cidDirect, "direct-pin")
+ assertPinOutput(t, output, cidRecursive, "recursive-pin")
+ assertPinOutput(t, output, parentCid, "parent-pin")
+ // Indirect pins are included in --type=all output
+ })
+
+ t.Run("pin ls JSON output with names", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Add and pin content with name
+ cidA := node.IPFSAddStr("json content", "--pin=false")
+ node.IPFS("pin", "add", "--name=json-pin", cidA)
+
+ // Test JSON output with specific CID
+ res := node.IPFS("pin", "ls", cidA, "--names", "--enc=json")
+ var pinOutput pinLsJSON
+ err := json.Unmarshal([]byte(res.Stdout.String()), &pinOutput)
+ require.NoError(t, err, "failed to unmarshal JSON output: %s", res.Stdout.String())
+
+ pinData, ok := pinOutput.Keys[cidA]
+ require.True(t, ok, "CID %s should be in Keys map, got: %+v", cidA, pinOutput.Keys)
+ require.Equal(t, "recursive", pinData.Type, "expected pin type 'recursive', got '%s'", pinData.Type)
+ require.Equal(t, "json-pin", pinData.Name, "expected pin name 'json-pin', got '%s'", pinData.Name)
+
+ // Without names flag
+ res = node.IPFS("pin", "ls", cidA, "--enc=json")
+ err = json.Unmarshal([]byte(res.Stdout.String()), &pinOutput)
+ require.NoError(t, err, "failed to unmarshal JSON output: %s", res.Stdout.String())
+
+ pinData, ok = pinOutput.Keys[cidA]
+ require.True(t, ok, "CID %s should be in Keys map, got: %+v", cidA, pinOutput.Keys)
+ // Name should be empty without --names flag
+ require.Equal(t, "", pinData.Name, "pin name should be empty without --names flag, got '%s'", pinData.Name)
+
+ // Test JSON output without CID (list all)
+ res = node.IPFS("pin", "ls", "--names", "--enc=json")
+ var listOutput pinLsJSON
+ err = json.Unmarshal([]byte(res.Stdout.String()), &listOutput)
+ require.NoError(t, err, "failed to unmarshal JSON list output: %s", res.Stdout.String())
+ // Should have at least one pin (the one we just added)
+ require.NotEmpty(t, listOutput.Keys, "pin list should not be empty")
+ // Check that our pin is in the list
+ pinData, ok = listOutput.Keys[cidA]
+ require.True(t, ok, "our pin with CID %s should be in the list, got: %+v", cidA, listOutput.Keys)
+ require.Equal(t, "json-pin", pinData.Name, "expected pin name 'json-pin' in list, got '%s'", pinData.Name)
+ })
+
+ t.Run("direct and indirect pins with names", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Create a small DAG: parent -> child
+ childCid := node.IPFSAddStr("child content", "--pin=false")
+
+ // Create parent that references child
+ parentContent := fmt.Sprintf(`{"link": "/ipfs/%s"}`, childCid)
+ parentCid := node.PipeStrToIPFS(parentContent, "dag", "put", "--input-codec=json", "--store-codec=dag-cbor").Stdout.Trimmed()
+
+ // Pin child directly with a name
+ node.IPFS("pin", "add", "--name=direct-child", "--recursive=false", childCid)
+
+ // Pin parent recursively with a name
+ node.IPFS("pin", "add", "--name=recursive-parent", parentCid)
+
+ // Check direct pin with specific CID
+ res := node.IPFS("pin", "ls", "--type=direct", childCid, "--names")
+ output := res.Stdout.String()
+ require.Contains(t, output, "direct-child", "pin name 'direct-child' not found in output: %s", output)
+ require.Contains(t, output, "direct", "pin type 'direct' not found in output: %s", output)
+
+ // Check recursive pin with specific CID
+ res = node.IPFS("pin", "ls", "--type=recursive", parentCid, "--names")
+ output = res.Stdout.String()
+ require.Contains(t, output, "recursive-parent", "pin name 'recursive-parent' not found in output: %s", output)
+ require.Contains(t, output, "recursive", "pin type 'recursive' not found in output: %s", output)
+
+ // Child is both directly pinned and indirectly pinned through parent
+ // Both relationships are valid and can be checked
+ })
+
+ t.Run("pin update preserves name", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Create two pieces of content
+ cidOld := node.IPFSAddStr("old content", "--pin=false")
+ cidNew := node.IPFSAddStr("new content", "--pin=false")
+
+ // Pin with name
+ node.IPFS("pin", "add", "--name=my-pin", cidOld)
+
+ // Update pin
+ node.IPFS("pin", "update", cidOld, cidNew)
+
+ // Check that new pin has the same name
+ res := node.IPFS("pin", "ls", cidNew, "--names")
+ require.Contains(t, res.Stdout.String(), "my-pin", "pin name 'my-pin' not preserved after update, output: %s", res.Stdout.String())
+
+ // Old pin should not exist
+ res = node.RunIPFS("pin", "ls", cidOld)
+ require.Equal(t, 1, res.ExitCode(), "expected exit code 1 for unpinned CID, got %d", res.ExitCode())
+ require.Contains(t, res.Stderr.String(), "is not pinned", "expected 'is not pinned' error for old CID %s, got: %s", cidOld, res.Stderr.String())
+ })
+
+ t.Run("pin ls with invalid CID returns error", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ res := node.RunIPFS("pin", "ls", "invalid-cid")
+ require.Equal(t, 1, res.ExitCode(), "expected exit code 1 for invalid CID, got %d", res.ExitCode())
+ require.Contains(t, res.Stderr.String(), "invalid", "expected 'invalid' in error message, got: %s", res.Stderr.String())
+ })
+
+ t.Run("pin ls with unpinned CID returns error", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Add content without pinning
+ cid := node.IPFSAddStr("unpinned content", "--pin=false")
+
+ res := node.RunIPFS("pin", "ls", cid)
+ require.Equal(t, 1, res.ExitCode(), "expected exit code 1 for unpinned CID, got %d", res.ExitCode())
+ require.Contains(t, res.Stderr.String(), "is not pinned", "expected 'is not pinned' error for CID %s, got: %s", cid, res.Stderr.String())
+ })
+
+ t.Run("pin with special characters in name", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ testCases := []struct {
+ name string
+ pinName string
+ }{
+ {"unicode", "test-📌-pin"},
+ {"spaces", "test pin name"},
+ {"special chars", "test!@#$%"},
+ {"path-like", "test/pin/name"},
+ {"dots", "test.pin.name"},
+ {"long name", strings.Repeat("a", 255)},
+ {"empty name", ""},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ cid := node.IPFSAddStr("content for "+tc.name, "--pin=false")
+ node.IPFS("pin", "add", "--name="+tc.pinName, cid)
+
+ res := node.IPFS("pin", "ls", cid, "--names")
+ if tc.pinName != "" {
+ require.Contains(t, res.Stdout.String(), tc.pinName,
+ "pin name '%s' not found in output for test case '%s'", tc.pinName, tc.name)
+ }
+ })
+ }
+ })
+
+ t.Run("concurrent pin operations with names", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Create multiple goroutines adding pins with names
+ numPins := 10
+ done := make(chan struct{}, numPins)
+
+ for i := 0; i < numPins; i++ {
+ go func(idx int) {
+ defer func() { done <- struct{}{} }()
+
+ content := fmt.Sprintf("concurrent content %d", idx)
+ cid := node.IPFSAddStr(content, "--pin=false")
+ pinName := fmt.Sprintf("concurrent-pin-%d", idx)
+ node.IPFS("pin", "add", "--name="+pinName, cid)
+ }(i)
+ }
+
+ // Wait for all goroutines
+ for i := 0; i < numPins; i++ {
+ <-done
+ }
+
+ // Verify all pins have correct names
+ res := node.IPFS("pin", "ls", "--names")
+ output := res.Stdout.String()
+ for i := 0; i < numPins; i++ {
+ pinName := fmt.Sprintf("concurrent-pin-%d", i)
+ require.Contains(t, output, pinName,
+ "concurrent pin name '%s' not found in output", pinName)
+ }
+ })
+
+ t.Run("pin rm removes name association", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Add and pin with name
+ cid := node.IPFSAddStr("content to remove", "--pin=false")
+ node.IPFS("pin", "add", "--name=to-be-removed", cid)
+
+ // Verify pin exists with name
+ res := node.IPFS("pin", "ls", cid, "--names")
+ require.Contains(t, res.Stdout.String(), "to-be-removed")
+
+ // Remove pin
+ node.IPFS("pin", "rm", cid)
+
+ // Verify pin and name are gone
+ res = node.RunIPFS("pin", "ls", cid)
+ require.Equal(t, 1, res.ExitCode())
+ require.Contains(t, res.Stderr.String(), "is not pinned")
+ })
+
+ t.Run("garbage collection preserves named pins", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Add content with and without pin names
+ cidNamed := node.IPFSAddStr("named content", "--pin=false")
+ cidUnnamed := node.IPFSAddStr("unnamed content", "--pin=false")
+ cidUnpinned := node.IPFSAddStr("unpinned content", "--pin=false")
+
+ node.IPFS("pin", "add", "--name=important-data", cidNamed)
+ node.IPFS("pin", "add", cidUnnamed)
+
+ // Run garbage collection
+ node.IPFS("repo", "gc")
+
+ // Named and unnamed pins should still exist
+ res := node.IPFS("pin", "ls", cidNamed, "--names")
+ require.Contains(t, res.Stdout.String(), "important-data")
+
+ res = node.IPFS("pin", "ls", cidUnnamed)
+ require.Contains(t, res.Stdout.String(), cidUnnamed)
+
+ // Unpinned content should be gone (cat should fail)
+ res = node.RunIPFS("cat", cidUnpinned)
+ require.NotEqual(t, 0, res.ExitCode(), "unpinned content should be garbage collected")
+ })
+
+ t.Run("pin add with same name can be used for multiple pins", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+
+ // Add two different pieces of content
+ cid1 := node.IPFSAddStr("first content", "--pin=false")
+ cid2 := node.IPFSAddStr("second content", "--pin=false")
+
+ // Pin both with the same name - this is allowed
+ node.IPFS("pin", "add", "--name=shared-name", cid1)
+ node.IPFS("pin", "add", "--name=shared-name", cid2)
+
+ // List all pins with names
+ res := node.IPFS("pin", "ls", "--names")
+ output := res.Stdout.String()
+
+ // Both CIDs should be pinned
+ require.Contains(t, output, cid1)
+ require.Contains(t, output, cid2)
+
+ // Both pins can have the same name
+ lines := strings.Split(output, "\n")
+ foundCid1WithName := false
+ foundCid2WithName := false
+ for _, line := range lines {
+ if strings.Contains(line, cid1) && strings.Contains(line, "shared-name") {
+ foundCid1WithName = true
+ }
+ if strings.Contains(line, cid2) && strings.Contains(line, "shared-name") {
+ foundCid2WithName = true
+ }
+ }
+ require.True(t, foundCid1WithName, "first pin should have the name")
+ require.True(t, foundCid2WithName, "second pin should have the name")
+ })
+
+ t.Run("pin names persist across daemon restarts", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+ node.StartDaemon("--offline")
+
+ // Add content with pin name
+ cid := node.IPFSAddStr("persistent content")
+ node.IPFS("pin", "add", "--name=persistent-pin", cid)
+
+ // Restart daemon
+ node.StopDaemon()
+ node.StartDaemon("--offline")
+
+ // Check pin name persisted
+ res := node.IPFS("pin", "ls", cid, "--names")
+ require.Contains(t, res.Stdout.String(), "persistent-pin",
+ "pin name should persist across daemon restarts")
+
+ node.StopDaemon()
+ })
+}
+
+// TestPinLsEdgeCases tests edge cases for pin ls command
+func TestPinLsEdgeCases(t *testing.T) {
+ t.Parallel()
+
+ t.Run("invalid pin type returns error", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+ defer node.StopDaemon()
+
+ // Try to list pins with invalid type
+ res := node.RunIPFS("pin", "ls", "--type=invalid")
+ require.NotEqual(t, 0, res.ExitCode())
+ require.Contains(t, res.Stderr.String(), "invalid type 'invalid'")
+ require.Contains(t, res.Stderr.String(), "must be one of {direct, indirect, recursive, all}")
+ })
+
+ t.Run("non-existent path returns proper error", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+ defer node.StopDaemon()
+
+ // Try to list a non-existent CID
+ fakeCID := "QmNonExistent123456789"
+ res := node.RunIPFS("pin", "ls", fakeCID)
+ require.NotEqual(t, 0, res.ExitCode())
+ })
+
+ t.Run("unpinned CID returns not pinned error", func(t *testing.T) {
+ t.Parallel()
+ node := setupTestNode(t)
+ defer node.StopDaemon()
+
+ // Add content but don't pin it explicitly (it's just in blockstore)
+ unpinnedCID := node.IPFSAddStr("unpinned content", "--pin=false")
+
+ // Try to list specific unpinned CID
+ res := node.RunIPFS("pin", "ls", unpinnedCID)
+ require.NotEqual(t, 0, res.ExitCode())
+ require.Contains(t, res.Stderr.String(), "is not pinned")
+ })
+}
diff --git a/test/cli/pin_name_validation_test.go b/test/cli/pin_name_validation_test.go
new file mode 100644
index 00000000000..049118642e1
--- /dev/null
+++ b/test/cli/pin_name_validation_test.go
@@ -0,0 +1,184 @@
+package cli
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/ipfs/kubo/test/cli/harness"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPinNameValidation(t *testing.T) {
+ t.Parallel()
+
+ // Create a test node and add a test file
+ node := harness.NewT(t).NewNode().Init().StartDaemon("--offline")
+ defer node.StopDaemon()
+
+ // Add a test file to get a CID
+ testContent := "test content for pin name validation"
+ testCID := node.IPFSAddStr(testContent, "--pin=false")
+
+ t.Run("pin add accepts valid names", func(t *testing.T) {
+ testCases := []struct {
+ name string
+ pinName string
+ description string
+ }{
+ {
+ name: "empty_name",
+ pinName: "",
+ description: "Empty name should be allowed",
+ },
+ {
+ name: "short_name",
+ pinName: "test",
+ description: "Short ASCII name should be allowed",
+ },
+ {
+ name: "max_255_bytes",
+ pinName: strings.Repeat("a", 255),
+ description: "Exactly 255 bytes should be allowed",
+ },
+ {
+ name: "unicode_within_limit",
+ pinName: "测试名称🔥", // Chinese characters and emoji
+ description: "Unicode characters within 255 bytes should be allowed",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var args []string
+ if tc.pinName != "" {
+ args = []string{"pin", "add", "--name", tc.pinName, testCID}
+ } else {
+ args = []string{"pin", "add", testCID}
+ }
+
+ res := node.RunIPFS(args...)
+ require.Equal(t, 0, res.ExitCode(), tc.description)
+
+ // Clean up - unpin
+ node.RunIPFS("pin", "rm", testCID)
+ })
+ }
+ })
+
+ t.Run("pin add rejects names exceeding 255 bytes", func(t *testing.T) {
+ testCases := []struct {
+ name string
+ pinName string
+ description string
+ }{
+ {
+ name: "256_bytes",
+ pinName: strings.Repeat("a", 256),
+ description: "256 bytes should be rejected",
+ },
+ {
+ name: "300_bytes",
+ pinName: strings.Repeat("b", 300),
+ description: "300 bytes should be rejected",
+ },
+ {
+ name: "unicode_exceeding_limit",
+ pinName: strings.Repeat("测", 100), // Each Chinese character is 3 bytes, total 300 bytes
+ description: "Unicode string exceeding 255 bytes should be rejected",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res := node.RunIPFS("pin", "add", "--name", tc.pinName, testCID)
+ require.NotEqual(t, 0, res.ExitCode(), tc.description)
+ require.Contains(t, res.Stderr.String(), "max 255 bytes", "Error should mention the 255 byte limit")
+ })
+ }
+ })
+
+ t.Run("pin ls with name filter validates length", func(t *testing.T) {
+ // Test valid filter
+ res := node.RunIPFS("pin", "ls", "--name", strings.Repeat("a", 255))
+ require.Equal(t, 0, res.ExitCode(), "255-byte name filter should be accepted")
+
+ // Test invalid filter
+ res = node.RunIPFS("pin", "ls", "--name", strings.Repeat("a", 256))
+ require.NotEqual(t, 0, res.ExitCode(), "256-byte name filter should be rejected")
+ require.Contains(t, res.Stderr.String(), "max 255 bytes", "Error should mention the 255 byte limit")
+ })
+}
+
+func TestAddPinNameValidation(t *testing.T) {
+ t.Parallel()
+
+ node := harness.NewT(t).NewNode().Init().StartDaemon("--offline")
+ defer node.StopDaemon()
+
+ // Create a test file
+ testFile := "test.txt"
+ node.WriteBytes(testFile, []byte("test content for add command"))
+
+ t.Run("ipfs add with --pin-name accepts valid names", func(t *testing.T) {
+ testCases := []struct {
+ name string
+ pinName string
+ description string
+ }{
+ {
+ name: "short_name",
+ pinName: "test-add",
+ description: "Short ASCII name should be allowed",
+ },
+ {
+ name: "max_255_bytes",
+ pinName: strings.Repeat("x", 255),
+ description: "Exactly 255 bytes should be allowed",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res := node.RunIPFS("add", fmt.Sprintf("--pin-name=%s", tc.pinName), "-q", testFile)
+ require.Equal(t, 0, res.ExitCode(), tc.description)
+ cid := strings.TrimSpace(res.Stdout.String())
+
+ // Verify pin exists with name
+ lsRes := node.RunIPFS("pin", "ls", "--names", "--type=recursive", cid)
+ require.Equal(t, 0, lsRes.ExitCode())
+ require.Contains(t, lsRes.Stdout.String(), tc.pinName, "Pin should have the specified name")
+
+ // Clean up
+ node.RunIPFS("pin", "rm", cid)
+ })
+ }
+ })
+
+ t.Run("ipfs add with --pin-name rejects names exceeding 255 bytes", func(t *testing.T) {
+ testCases := []struct {
+ name string
+ pinName string
+ description string
+ }{
+ {
+ name: "256_bytes",
+ pinName: strings.Repeat("y", 256),
+ description: "256 bytes should be rejected",
+ },
+ {
+ name: "500_bytes",
+ pinName: strings.Repeat("z", 500),
+ description: "500 bytes should be rejected",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res := node.RunIPFS("add", fmt.Sprintf("--pin-name=%s", tc.pinName), testFile)
+ require.NotEqual(t, 0, res.ExitCode(), tc.description)
+ require.Contains(t, res.Stderr.String(), "max 255 bytes", "Error should mention the 255 byte limit")
+ })
+ }
+ })
+}
diff --git a/test/cli/provider_test.go b/test/cli/provider_test.go
index f0d04e1d28d..debeddcd016 100644
--- a/test/cli/provider_test.go
+++ b/test/cli/provider_test.go
@@ -3,6 +3,9 @@ package cli
import (
"bytes"
"encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "strings"
"testing"
"time"
@@ -12,17 +15,28 @@ import (
"github.com/stretchr/testify/require"
)
-func TestProvider(t *testing.T) {
- t.Parallel()
+const (
+ timeStep = 20 * time.Millisecond
+ timeout = time.Second
+)
+
+type cfgApplier func(*harness.Node)
+
+func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
+ t.Helper()
initNodes := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes {
nodes := harness.NewT(t).NewNodes(n).Init()
+ nodes.ForEachPar(apply)
nodes.ForEachPar(fn)
- return nodes.StartDaemons().Connect()
+ nodes = nodes.StartDaemons().Connect()
+ time.Sleep(500 * time.Millisecond) // wait for DHT clients to be bootstrapped
+ return nodes
}
initNodesWithoutStart := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes {
nodes := harness.NewT(t).NewNodes(n).Init()
+ nodes.ForEachPar(apply)
nodes.ForEachPar(fn)
return nodes
}
@@ -35,17 +49,23 @@ func TestProvider(t *testing.T) {
}
expectProviders := func(t *testing.T, cid, expectedProvider string, nodes ...*harness.Node) {
+ outerLoop:
for _, node := range nodes {
- res := node.IPFS("routing", "findprovs", "-n=1", cid)
- require.Equal(t, expectedProvider, res.Stdout.Trimmed())
+ for i := time.Duration(0); i*timeStep < timeout; i++ {
+ res := node.IPFS("routing", "findprovs", "-n=1", cid)
+ if res.Stdout.Trimmed() == expectedProvider {
+ continue outerLoop
+ }
+ }
+ require.FailNowf(t, "found no providers", "expected a provider for %s", cid)
}
}
- t.Run("Provider.Enabled=true announces new CIDs created by ipfs add", func(t *testing.T) {
+ t.Run("Provide.Enabled=true announces new CIDs created by ipfs add", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", true)
+ n.SetIPFSConfig("Provide.Enabled", true)
})
defer nodes.StopDaemons()
@@ -53,11 +73,11 @@ func TestProvider(t *testing.T) {
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})
- t.Run("Provider.Enabled=true announces new CIDs created by ipfs add --pin=false with default strategy", func(t *testing.T) {
+ t.Run("Provide.Enabled=true announces new CIDs created by ipfs add --pin=false with default strategy", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", true)
+ n.SetIPFSConfig("Provide.Enabled", true)
// Default strategy is "all" which should provide even unpinned content
})
defer nodes.StopDaemons()
@@ -66,11 +86,11 @@ func TestProvider(t *testing.T) {
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})
- t.Run("Provider.Enabled=true announces new CIDs created by ipfs block put --pin=false with default strategy", func(t *testing.T) {
+ t.Run("Provide.Enabled=true announces new CIDs created by ipfs block put --pin=false with default strategy", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", true)
+ n.SetIPFSConfig("Provide.Enabled", true)
// Default strategy is "all" which should provide unpinned content from block put
})
defer nodes.StopDaemons()
@@ -80,11 +100,11 @@ func TestProvider(t *testing.T) {
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})
- t.Run("Provider.Enabled=true announces new CIDs created by ipfs dag put --pin=false with default strategy", func(t *testing.T) {
+ t.Run("Provide.Enabled=true announces new CIDs created by ipfs dag put --pin=false with default strategy", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", true)
+ n.SetIPFSConfig("Provide.Enabled", true)
// Default strategy is "all" which should provide unpinned content from dag put
})
defer nodes.StopDaemons()
@@ -94,11 +114,11 @@ func TestProvider(t *testing.T) {
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})
- t.Run("Provider.Enabled=false disables announcement of new CID from ipfs add", func(t *testing.T) {
+ t.Run("Provide.Enabled=false disables announcement of new CID from ipfs add", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", false)
+ n.SetIPFSConfig("Provide.Enabled", false)
})
defer nodes.StopDaemons()
@@ -106,28 +126,99 @@ func TestProvider(t *testing.T) {
expectNoProviders(t, cid, nodes[1:]...)
})
- t.Run("Provider.Enabled=false disables manual announcement via RPC command", func(t *testing.T) {
+ t.Run("Provide.Enabled=false disables manual announcement via RPC command", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", false)
+ n.SetIPFSConfig("Provide.Enabled", false)
})
defer nodes.StopDaemons()
cid := nodes[0].IPFSAddStr(time.Now().String())
res := nodes[0].RunIPFS("routing", "provide", cid)
- assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provider.Enabled is set to 'false'")
+ assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'")
assert.Equal(t, 1, res.ExitCode())
expectNoProviders(t, cid, nodes[1:]...)
})
+ t.Run("manual provide fails when no libp2p peers and no custom HTTP router", func(t *testing.T) {
+ t.Parallel()
+
+ h := harness.NewT(t)
+ node := h.NewNode().Init()
+ apply(node)
+ node.SetIPFSConfig("Provide.Enabled", true)
+ node.StartDaemon()
+ defer node.StopDaemon()
+
+ cid := node.IPFSAddStr(time.Now().String())
+ res := node.RunIPFS("routing", "provide", cid)
+ assert.Contains(t, res.Stderr.Trimmed(), "cannot provide, no connected peers")
+ assert.Equal(t, 1, res.ExitCode())
+ })
+
+ t.Run("manual provide succeeds via custom HTTP router when no libp2p peers", func(t *testing.T) {
+ t.Parallel()
+
+ // Create a mock HTTP server that accepts provide requests.
+ // This simulates the undocumented API behavior described in
+ // https://discuss.ipfs.tech/t/only-peers-found-from-dht-seem-to-be-getting-used-as-relays-so-cant-use-http-routers/19545/9
+ // Note: This is NOT IPIP-378, which was not implemented.
+ mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Accept both PUT and POST requests to /routing/v1/providers and /routing/v1/ipns
+ if (r.Method == http.MethodPut || r.Method == http.MethodPost) &&
+ (strings.HasPrefix(r.URL.Path, "/routing/v1/providers") || strings.HasPrefix(r.URL.Path, "/routing/v1/ipns")) {
+ // Return HTTP 200 to indicate successful publishing
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer mockServer.Close()
+
+ h := harness.NewT(t)
+ node := h.NewNode().Init()
+ apply(node)
+ node.SetIPFSConfig("Provide.Enabled", true)
+ // Configure a custom HTTP router for providing.
+ // Using our mock server that will accept the provide requests.
+ routingConf := map[string]any{
+ "Type": "custom", // https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md#configuration-file-example
+ "Methods": map[string]any{
+ "provide": map[string]any{"RouterName": "MyCustomRouter"},
+ "get-ipns": map[string]any{"RouterName": "MyCustomRouter"},
+ "put-ipns": map[string]any{"RouterName": "MyCustomRouter"},
+ "find-peers": map[string]any{"RouterName": "MyCustomRouter"},
+ "find-providers": map[string]any{"RouterName": "MyCustomRouter"},
+ },
+ "Routers": map[string]any{
+ "MyCustomRouter": map[string]any{
+ "Type": "http",
+ "Parameters": map[string]any{
+ // Use the mock server URL
+ "Endpoint": mockServer.URL,
+ },
+ },
+ },
+ }
+ node.SetIPFSConfig("Routing", routingConf)
+ node.StartDaemon()
+ defer node.StopDaemon()
+
+ cid := node.IPFSAddStr(time.Now().String())
+ // The command should successfully provide via HTTP even without libp2p peers
+ res := node.RunIPFS("routing", "provide", cid)
+ assert.Empty(t, res.Stderr.String(), "Should have no errors when providing via HTTP router")
+ assert.Equal(t, 0, res.ExitCode(), "Should succeed with exit code 0")
+ })
+
// Right now Provide and Reprovide are tied together
t.Run("Reprovide.Interval=0 disables announcement of new CID too", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Interval", "0")
+ n.SetIPFSConfig("Provide.DHT.Interval", "0")
})
defer nodes.StopDaemons()
@@ -136,11 +227,11 @@ func TestProvider(t *testing.T) {
})
// It is a lesser evil - forces users to fix their config and have some sort of interval
- t.Run("Manual Reprovider trigger does not work when periodic Reprovider is disabled", func(t *testing.T) {
+ t.Run("Manual Reprovide trigger does not work when periodic reprovide is disabled", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Interval", "0")
+ n.SetIPFSConfig("Provide.DHT.Interval", "0")
})
defer nodes.StopDaemons()
@@ -149,18 +240,18 @@ func TestProvider(t *testing.T) {
expectNoProviders(t, cid, nodes[1:]...)
res := nodes[0].RunIPFS("routing", "reprovide")
- assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Reprovider.Interval is set to '0'")
+ assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.DHT.Interval is set to '0'")
assert.Equal(t, 1, res.ExitCode())
expectNoProviders(t, cid, nodes[1:]...)
})
// It is a lesser evil - forces users to fix their config and have some sort of interval
- t.Run("Manual Reprovider trigger does not work when Provider system is disabled", func(t *testing.T) {
+ t.Run("Manual Reprovide trigger does not work when Provide system is disabled", func(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", false)
+ n.SetIPFSConfig("Provide.Enabled", false)
})
defer nodes.StopDaemons()
@@ -169,7 +260,7 @@ func TestProvider(t *testing.T) {
expectNoProviders(t, cid, nodes[1:]...)
res := nodes[0].RunIPFS("routing", "reprovide")
- assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provider.Enabled is set to 'false'")
+ assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'")
assert.Equal(t, 1, res.ExitCode())
expectNoProviders(t, cid, nodes[1:]...)
@@ -179,7 +270,7 @@ func TestProvider(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "all")
+ n.SetIPFSConfig("Provide.Strategy", "all")
})
defer nodes.StopDaemons()
@@ -191,7 +282,7 @@ func TestProvider(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "pinned")
+ n.SetIPFSConfig("Provide.Strategy", "pinned")
})
defer nodes.StopDaemons()
@@ -208,7 +299,7 @@ func TestProvider(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "pinned+mfs")
+ n.SetIPFSConfig("Provide.Strategy", "pinned+mfs")
})
defer nodes.StopDaemons()
@@ -228,7 +319,7 @@ func TestProvider(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "roots")
+ n.SetIPFSConfig("Provide.Strategy", "roots")
})
defer nodes.StopDaemons()
@@ -245,7 +336,7 @@ func TestProvider(t *testing.T) {
t.Parallel()
nodes := initNodes(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "mfs")
+ n.SetIPFSConfig("Provide.Strategy", "mfs")
})
defer nodes.StopDaemons()
@@ -260,173 +351,176 @@ func TestProvider(t *testing.T) {
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})
- t.Run("Reprovides with 'all' strategy when strategy is '' (empty)", func(t *testing.T) {
- t.Parallel()
+ if reprovide {
- nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "")
- })
+ t.Run("Reprovides with 'all' strategy when strategy is '' (empty)", func(t *testing.T) {
+ t.Parallel()
- cid := nodes[0].IPFSAddStr(time.Now().String())
+ nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.Strategy", "")
+ })
- nodes = nodes.StartDaemons().Connect()
- defer nodes.StopDaemons()
- expectNoProviders(t, cid, nodes[1:]...)
-
- nodes[0].IPFS("routing", "reprovide")
+ cid := nodes[0].IPFSAddStr(time.Now().String())
- expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
- })
+ nodes = nodes.StartDaemons().Connect()
+ defer nodes.StopDaemons()
+ expectNoProviders(t, cid, nodes[1:]...)
- t.Run("Reprovides with 'all' strategy", func(t *testing.T) {
- t.Parallel()
+ nodes[0].IPFS("routing", "reprovide")
- nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "all")
+ expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})
- cid := nodes[0].IPFSAddStr(time.Now().String())
+ t.Run("Reprovides with 'all' strategy", func(t *testing.T) {
+ t.Parallel()
- nodes = nodes.StartDaemons().Connect()
- defer nodes.StopDaemons()
- expectNoProviders(t, cid, nodes[1:]...)
+ nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.Strategy", "all")
+ })
- nodes[0].IPFS("routing", "reprovide")
+ cid := nodes[0].IPFSAddStr(time.Now().String())
- expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
- })
-
- t.Run("Reprovides with 'pinned' strategy", func(t *testing.T) {
- t.Parallel()
+ nodes = nodes.StartDaemons().Connect()
+ defer nodes.StopDaemons()
+ expectNoProviders(t, cid, nodes[1:]...)
- foo := random.Bytes(1000)
- bar := random.Bytes(1000)
+ nodes[0].IPFS("routing", "reprovide")
- nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "pinned")
+ expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})
- // Add a pin while offline so it cannot be provided
- cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w")
+ t.Run("Reprovides with 'pinned' strategy", func(t *testing.T) {
+ t.Parallel()
- nodes = nodes.StartDaemons().Connect()
- defer nodes.StopDaemons()
+ foo := random.Bytes(1000)
+ bar := random.Bytes(1000)
- // Add content without pinning while daemon line
- cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--pin=false")
- cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false")
+ nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.Strategy", "pinned")
+ })
- // Nothing should have been provided. The pin was offline, and
- // the others should not be provided per the strategy.
- expectNoProviders(t, cidFoo, nodes[1:]...)
- expectNoProviders(t, cidBar, nodes[1:]...)
- expectNoProviders(t, cidBarDir, nodes[1:]...)
+ // Add a pin while offline so it cannot be provided
+ cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w")
- nodes[0].IPFS("routing", "reprovide")
+ nodes = nodes.StartDaemons().Connect()
+ defer nodes.StopDaemons()
- // cidFoo is not pinned so should not be provided.
- expectNoProviders(t, cidFoo, nodes[1:]...)
- // cidBar gets provided by being a child from cidBarDir even though we added with pin=false.
- expectProviders(t, cidBar, nodes[0].PeerID().String(), nodes[1:]...)
- expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...)
- })
+ // Add content without pinning while daemon line
+ cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--pin=false")
+ cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false")
- t.Run("Reprovides with 'roots' strategy", func(t *testing.T) {
- t.Parallel()
+ // Nothing should have been provided. The pin was offline, and
+ // the others should not be provided per the strategy.
+ expectNoProviders(t, cidFoo, nodes[1:]...)
+ expectNoProviders(t, cidBar, nodes[1:]...)
+ expectNoProviders(t, cidBarDir, nodes[1:]...)
- foo := random.Bytes(1000)
- bar := random.Bytes(1000)
+ nodes[0].IPFS("routing", "reprovide")
- nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "roots")
+ // cidFoo is not pinned so should not be provided.
+ expectNoProviders(t, cidFoo, nodes[1:]...)
+ // cidBar gets provided by being a child from cidBarDir even though we added with pin=false.
+ expectProviders(t, cidBar, nodes[0].PeerID().String(), nodes[1:]...)
+ expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...)
})
- n0pid := nodes[0].PeerID().String()
- // Add a pin. Only root should get pinned but not provided
- // because node not started
- cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w")
+ t.Run("Reprovides with 'roots' strategy", func(t *testing.T) {
+ t.Parallel()
- nodes = nodes.StartDaemons().Connect()
- defer nodes.StopDaemons()
+ foo := random.Bytes(1000)
+ bar := random.Bytes(1000)
- cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo))
- cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false")
+ nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.Strategy", "roots")
+ })
+ n0pid := nodes[0].PeerID().String()
- // cidFoo will get provided per the strategy but cidBar will not.
- expectProviders(t, cidFoo, n0pid, nodes[1:]...)
- expectNoProviders(t, cidBar, nodes[1:]...)
+ // Add a pin. Only root should get pinned but not provided
+ // because node not started
+ cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w")
- nodes[0].IPFS("routing", "reprovide")
+ nodes = nodes.StartDaemons().Connect()
+ defer nodes.StopDaemons()
- expectProviders(t, cidFoo, n0pid, nodes[1:]...)
- expectNoProviders(t, cidBar, nodes[1:]...)
- expectProviders(t, cidBarDir, n0pid, nodes[1:]...)
- })
+ cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo))
+ cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false")
- t.Run("Reprovides with 'mfs' strategy", func(t *testing.T) {
- t.Parallel()
+ // cidFoo will get provided per the strategy but cidBar will not.
+ expectProviders(t, cidFoo, n0pid, nodes[1:]...)
+ expectNoProviders(t, cidBar, nodes[1:]...)
- bar := random.Bytes(1000)
+ nodes[0].IPFS("routing", "reprovide")
- nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "mfs")
+ expectProviders(t, cidFoo, n0pid, nodes[1:]...)
+ expectNoProviders(t, cidBar, nodes[1:]...)
+ expectProviders(t, cidBarDir, n0pid, nodes[1:]...)
})
- n0pid := nodes[0].PeerID().String()
- // add something and lets put it in MFS
- cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false", "-Q")
- nodes[0].IPFS("files", "cp", "/ipfs/"+cidBar, "/myfile")
+ t.Run("Reprovides with 'mfs' strategy", func(t *testing.T) {
+ t.Parallel()
- nodes = nodes.StartDaemons().Connect()
- defer nodes.StopDaemons()
+ bar := random.Bytes(1000)
- // cidBar is in MFS but not provided
- expectNoProviders(t, cidBar, nodes[1:]...)
+ nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.Strategy", "mfs")
+ })
+ n0pid := nodes[0].PeerID().String()
- nodes[0].IPFS("routing", "reprovide")
-
- // And now is provided
- expectProviders(t, cidBar, n0pid, nodes[1:]...)
- })
+ // add something and lets put it in MFS
+ cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false", "-Q")
+ nodes[0].IPFS("files", "cp", "/ipfs/"+cidBar, "/myfile")
- t.Run("Reprovides with 'pinned+mfs' strategy", func(t *testing.T) {
- t.Parallel()
-
- nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
- n.SetIPFSConfig("Reprovider.Strategy", "pinned+mfs")
- })
- n0pid := nodes[0].PeerID().String()
+ nodes = nodes.StartDaemons().Connect()
+ defer nodes.StopDaemons()
- // Add a pinned CID (should be provided)
- cidPinned := nodes[0].IPFSAddStr("pinned content", "--pin=true")
- // Add a CID to MFS (should be provided)
- cidMFS := nodes[0].IPFSAddStr("mfs content")
- nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile")
- // Add a CID that is neither pinned nor in MFS (should not be provided)
- cidNeither := nodes[0].IPFSAddStr("neither content", "--pin=false")
+ // cidBar is in MFS but not provided
+ expectNoProviders(t, cidBar, nodes[1:]...)
- nodes = nodes.StartDaemons().Connect()
- defer nodes.StopDaemons()
+ nodes[0].IPFS("routing", "reprovide")
- // Trigger reprovide
- nodes[0].IPFS("routing", "reprovide")
+ // And now is provided
+ expectProviders(t, cidBar, n0pid, nodes[1:]...)
+ })
- // Check that pinned CID is provided
- expectProviders(t, cidPinned, n0pid, nodes[1:]...)
- // Check that MFS CID is provided
- expectProviders(t, cidMFS, n0pid, nodes[1:]...)
- // Check that neither CID is not provided
- expectNoProviders(t, cidNeither, nodes[1:]...)
- })
+ t.Run("Reprovides with 'pinned+mfs' strategy", func(t *testing.T) {
+ t.Parallel()
+
+ nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.Strategy", "pinned+mfs")
+ })
+ n0pid := nodes[0].PeerID().String()
+
+ // Add a pinned CID (should be provided)
+ cidPinned := nodes[0].IPFSAddStr("pinned content", "--pin=true")
+ // Add a CID to MFS (should be provided)
+ cidMFS := nodes[0].IPFSAddStr("mfs content")
+ nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile")
+ // Add a CID that is neither pinned nor in MFS (should not be provided)
+ cidNeither := nodes[0].IPFSAddStr("neither content", "--pin=false")
+
+ nodes = nodes.StartDaemons().Connect()
+ defer nodes.StopDaemons()
+
+ // Trigger reprovide
+ nodes[0].IPFS("routing", "reprovide")
+
+ // Check that pinned CID is provided
+ expectProviders(t, cidPinned, n0pid, nodes[1:]...)
+ // Check that MFS CID is provided
+ expectProviders(t, cidMFS, n0pid, nodes[1:]...)
+ // Check that neither CID is not provided
+ expectNoProviders(t, cidNeither, nodes[1:]...)
+ })
+ }
t.Run("provide clear command removes items from provide queue", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(1).Init()
nodes.ForEachPar(func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", true)
- n.SetIPFSConfig("Reprovider.Interval", "22h")
- n.SetIPFSConfig("Reprovider.Strategy", "all")
+ n.SetIPFSConfig("Provide.Enabled", true)
+ n.SetIPFSConfig("Provide.DHT.Interval", "22h")
+ n.SetIPFSConfig("Provide.Strategy", "all")
})
nodes.StartDaemons()
defer nodes.StopDaemons()
@@ -452,9 +546,9 @@ func TestProvider(t *testing.T) {
nodes := harness.NewT(t).NewNodes(1).Init()
nodes.ForEachPar(func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", true)
- n.SetIPFSConfig("Reprovider.Interval", "22h")
- n.SetIPFSConfig("Reprovider.Strategy", "all")
+ n.SetIPFSConfig("Provide.Enabled", true)
+ n.SetIPFSConfig("Provide.DHT.Interval", "22h")
+ n.SetIPFSConfig("Provide.Strategy", "all")
})
nodes.StartDaemons()
defer nodes.StopDaemons()
@@ -472,9 +566,9 @@ func TestProvider(t *testing.T) {
nodes := harness.NewT(t).NewNodes(1).Init()
nodes.ForEachPar(func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", false)
- n.SetIPFSConfig("Reprovider.Interval", "22h")
- n.SetIPFSConfig("Reprovider.Strategy", "all")
+ n.SetIPFSConfig("Provide.Enabled", false)
+ n.SetIPFSConfig("Provide.DHT.Interval", "22h")
+ n.SetIPFSConfig("Provide.Strategy", "all")
})
nodes.StartDaemons()
defer nodes.StopDaemons()
@@ -489,9 +583,9 @@ func TestProvider(t *testing.T) {
nodes := harness.NewT(t).NewNodes(1).Init()
nodes.ForEachPar(func(n *harness.Node) {
- n.SetIPFSConfig("Provider.Enabled", true)
- n.SetIPFSConfig("Reprovider.Interval", "22h")
- n.SetIPFSConfig("Reprovider.Strategy", "all")
+ n.SetIPFSConfig("Provide.Enabled", true)
+ n.SetIPFSConfig("Provide.DHT.Interval", "22h")
+ n.SetIPFSConfig("Provide.Strategy", "all")
})
nodes.StartDaemons()
defer nodes.StopDaemons()
@@ -512,5 +606,37 @@ func TestProvider(t *testing.T) {
// Should be a non-negative integer (0 or positive)
assert.GreaterOrEqual(t, result, 0)
})
+}
+
+func TestProvider(t *testing.T) {
+ t.Parallel()
+ variants := []struct {
+ name string
+ reprovide bool
+ apply cfgApplier
+ }{
+ {
+ name: "LegacyProvider",
+ reprovide: true,
+ apply: func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.DHT.SweepEnabled", false)
+ },
+ },
+ {
+ name: "SweepingProvider",
+ reprovide: false,
+ apply: func(n *harness.Node) {
+ n.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
+ },
+ },
+ }
+
+ for _, v := range variants {
+ v := v // capture
+ t.Run(v.name, func(t *testing.T) {
+ // t.Parallel()
+ runProviderSuite(t, v.reprovide, v.apply)
+ })
+ }
}
diff --git a/test/cli/webui_test.go b/test/cli/webui_test.go
new file mode 100644
index 00000000000..93b8fe4ccf6
--- /dev/null
+++ b/test/cli/webui_test.go
@@ -0,0 +1,88 @@
+package cli
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/ipfs/kubo/config"
+ "github.com/ipfs/kubo/test/cli/harness"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWebUI(t *testing.T) {
+ t.Parallel()
+
+ t.Run("NoFetch=true shows not available error", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ node.UpdateConfig(func(cfg *config.Config) {
+ cfg.Gateway.NoFetch = true
+ })
+
+ node.StartDaemon("--offline")
+
+ apiClient := node.APIClient()
+ resp := apiClient.Get("/webui/")
+
+ // Should return 503 Service Unavailable when WebUI is not in local store
+ assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+ // Check response contains helpful information
+ body := resp.Body
+ assert.Contains(t, body, "IPFS WebUI Not Available")
+ assert.Contains(t, body, "Gateway.NoFetch=true")
+ assert.Contains(t, body, "ipfs pin add")
+ assert.Contains(t, body, "ipfs dag import")
+ assert.Contains(t, body, "https://github.com/ipfs/ipfs-webui/releases")
+ })
+
+ t.Run("DeserializedResponses=false shows incompatible error", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ node.UpdateConfig(func(cfg *config.Config) {
+ cfg.Gateway.DeserializedResponses = config.False
+ })
+
+ node.StartDaemon()
+
+ apiClient := node.APIClient()
+ resp := apiClient.Get("/webui/")
+
+ // Should return 503 Service Unavailable
+ assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+ // Check response contains incompatibility message
+ body := resp.Body
+ assert.Contains(t, body, "IPFS WebUI Incompatible")
+ assert.Contains(t, body, "Gateway.DeserializedResponses=false")
+ assert.Contains(t, body, "WebUI requires deserializing IPFS responses")
+ assert.Contains(t, body, "Gateway.DeserializedResponses=true")
+ })
+
+ t.Run("Both NoFetch=true and DeserializedResponses=false shows incompatible error", func(t *testing.T) {
+ t.Parallel()
+ node := harness.NewT(t).NewNode().Init()
+
+ node.UpdateConfig(func(cfg *config.Config) {
+ cfg.Gateway.NoFetch = true
+ cfg.Gateway.DeserializedResponses = config.False
+ })
+
+ node.StartDaemon("--offline")
+
+ apiClient := node.APIClient()
+ resp := apiClient.Get("/webui/")
+
+ // Should return 503 Service Unavailable
+ assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+ // DeserializedResponses=false takes priority
+ body := resp.Body
+ assert.Contains(t, body, "IPFS WebUI Incompatible")
+ assert.Contains(t, body, "Gateway.DeserializedResponses=false")
+ // Should NOT mention NoFetch since DeserializedResponses check comes first
+ assert.NotContains(t, body, "NoFetch")
+ })
+}
diff --git a/test/dependencies/dependencies.go b/test/dependencies/dependencies.go
index 88c8ed7fc29..848ffba2fab 100644
--- a/test/dependencies/dependencies.go
+++ b/test/dependencies/dependencies.go
@@ -1,5 +1,4 @@
//go:build tools
-// +build tools
package tools
diff --git a/test/dependencies/go.mod b/test/dependencies/go.mod
index 962e3093a2a..65d3151aac7 100644
--- a/test/dependencies/go.mod
+++ b/test/dependencies/go.mod
@@ -89,7 +89,8 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
- github.com/gabriel-vasile/mimetype v1.4.9 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.10 // indirect
+ github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.1.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/ghostiam/protogetter v0.3.9 // indirect
@@ -103,7 +104,7 @@ require (
github.com/go-toolsmith/astp v1.1.0 // indirect
github.com/go-toolsmith/strparse v1.1.0 // indirect
github.com/go-toolsmith/typep v1.1.0 // indirect
- github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
+ github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gofrs/flock v0.12.1 // indirect
@@ -134,19 +135,21 @@ require (
github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
- github.com/ipfs/boxo v0.34.0 // indirect
+ github.com/ipfs/boxo v0.35.0 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
- github.com/ipfs/go-block-format v0.2.2 // indirect
+ github.com/ipfs/go-block-format v0.2.3 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
- github.com/ipfs/go-datastore v0.8.3 // indirect
+ github.com/ipfs/go-datastore v0.9.0 // indirect
+ github.com/ipfs/go-dsqueue v0.0.5 // indirect
+ github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
github.com/ipfs/go-ipld-cbor v0.2.1 // indirect
- github.com/ipfs/go-ipld-format v0.6.2 // indirect
+ github.com/ipfs/go-ipld-format v0.6.3 // indirect
github.com/ipfs/go-ipld-legacy v0.2.2 // indirect
github.com/ipfs/go-metrics-interface v0.3.0 // indirect
- github.com/ipfs/go-unixfsnode v1.10.1 // indirect
+ github.com/ipfs/go-unixfsnode v1.10.2 // indirect
github.com/ipfs/kubo v0.31.0 // indirect
- github.com/ipld/go-car/v2 v2.14.3 // indirect
+ github.com/ipld/go-car/v2 v2.15.0 // indirect
github.com/ipld/go-codec-dagpb v1.7.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipshipyard/p2p-forge v0.6.1 // indirect
@@ -180,8 +183,8 @@ require (
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p v0.43.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
- github.com/libp2p/go-libp2p-kad-dht v0.34.0 // indirect
- github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect
+ github.com/libp2p/go-libp2p-kad-dht v0.35.0 // indirect
+ github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
@@ -211,7 +214,7 @@ require (
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.2 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
- github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/multiformats/go-varint v0.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nakabonne/nestif v0.3.1 // indirect
github.com/nishanths/exhaustive v0.12.0 // indirect
@@ -244,9 +247,9 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
github.com/polyfloyd/go-errorlint v1.7.1 // indirect
- github.com/prometheus/client_golang v1.23.0 // indirect
+ github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
- github.com/prometheus/common v0.65.0 // indirect
+ github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect
github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
@@ -284,7 +287,7 @@ require (
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
- github.com/stretchr/testify v1.10.0 // indirect
+ github.com/stretchr/testify v1.11.1 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tdakkota/asciicheck v0.4.1 // indirect
github.com/tetafro/godot v1.5.0 // indirect
@@ -312,11 +315,11 @@ require (
gitlab.com/bosi/decorder v0.4.2 // indirect
go-simpler.org/musttag v0.13.0 // indirect
go-simpler.org/sloglint v0.9.0 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
- go.opentelemetry.io/otel v1.37.0 // indirect
- go.opentelemetry.io/otel/metric v1.37.0 // indirect
- go.opentelemetry.io/otel/trace v1.37.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
+ go.opentelemetry.io/otel v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
@@ -324,20 +327,21 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
- golang.org/x/crypto v0.41.0 // indirect
- golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ golang.org/x/crypto v0.42.0 // indirect
+ golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect
golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect
- golang.org/x/mod v0.27.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/sync v0.16.0 // indirect
- golang.org/x/sys v0.35.0 // indirect
- golang.org/x/term v0.34.0 // indirect
- golang.org/x/text v0.28.0 // indirect
+ golang.org/x/mod v0.28.0 // indirect
+ golang.org/x/net v0.44.0 // indirect
+ golang.org/x/sync v0.17.0 // indirect
+ golang.org/x/sys v0.36.0 // indirect
+ golang.org/x/term v0.35.0 // indirect
+ golang.org/x/text v0.29.0 // indirect
golang.org/x/time v0.12.0 // indirect
- golang.org/x/tools v0.36.0 // indirect
+ golang.org/x/tools v0.37.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gonum.org/v1/gonum v0.16.0 // indirect
- google.golang.org/protobuf v1.36.7 // indirect
+ google.golang.org/protobuf v1.36.9 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/test/dependencies/go.sum b/test/dependencies/go.sum
index a8018cc80c3..aec72c23d20 100644
--- a/test/dependencies/go.sum
+++ b/test/dependencies/go.sum
@@ -183,8 +183,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
-github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
-github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
+github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
+github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
@@ -230,8 +230,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi
github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus=
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
-github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
-github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
+github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY=
github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
@@ -332,22 +332,26 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
-github.com/ipfs/boxo v0.34.0 h1:pMP9bAsTs4xVh8R0ZmxIWviV7kjDa60U24QrlGgHb1g=
-github.com/ipfs/boxo v0.34.0/go.mod h1:kzdH/ewDybtO3+M8MCVkpwnIIc/d2VISX95DFrY4vQA=
+github.com/ipfs/boxo v0.35.0 h1:3Mku5arSbAZz0dvb4goXRsQuZkFkPrGr5yYdu0YM1pY=
+github.com/ipfs/boxo v0.35.0/go.mod h1:uhaF0DGnbgEiXDTmD249jCGbxVkMm6+Ew85q6Uub7lo=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
-github.com/ipfs/go-block-format v0.2.2 h1:uecCTgRwDIXyZPgYspaLXoMiMmxQpSx2aq34eNc4YvQ=
-github.com/ipfs/go-block-format v0.2.2/go.mod h1:vmuefuWU6b+9kIU0vZJgpiJt1yicQz9baHXE8qR+KB8=
+github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
+github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q=
github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA=
-github.com/ipfs/go-datastore v0.8.3 h1:z391GsQyGKUIUof2tPoaZVeDknbt7fNHs6Gqjcw5Jo4=
-github.com/ipfs/go-datastore v0.8.3/go.mod h1:raxQ/CreIy9L6MxT71ItfMX12/ASN6EhXJoUFjICQ2M=
+github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
+github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
+github.com/ipfs/go-dsqueue v0.0.5 h1:TUOk15TlCJ/NKV8Yk2W5wgkEjDa44Nem7a7FGIjsMNU=
+github.com/ipfs/go-dsqueue v0.0.5/go.mod h1:i/jAlpZjBbQJLioN+XKbFgnd+u9eAhGZs9IrqIzTd9g=
github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ=
github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE=
+github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
+github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw=
@@ -358,8 +362,8 @@ github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68Q
github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw=
github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E=
github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A=
-github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU=
-github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk=
+github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8=
+github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk=
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU=
github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8=
@@ -372,16 +376,16 @@ github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8
github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
-github.com/ipfs/go-unixfsnode v1.10.1 h1:hGKhzuH6NSzZ4y621wGuDspkjXRNG3B+HqhlyTjSwSM=
-github.com/ipfs/go-unixfsnode v1.10.1/go.mod h1:eguv/otvacjmfSbYvmamc9ssNAzLvRk0+YN30EYeOOY=
+github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8=
+github.com/ipfs/go-unixfsnode v1.10.2/go.mod h1:ImDPTSiKZ+2h4UVdkSDITJHk87bUAp7kX/lgifjRicg=
github.com/ipfs/hang-fds v0.1.0 h1:deBiFlWHsVGzJ0ZMaqscEqRM1r2O1rFZ59UiQXb1Xko=
github.com/ipfs/hang-fds v0.1.0/go.mod h1:29VLWOn3ftAgNNgXg/al7b11UzuQ+w7AwtCGcTaWkbM=
github.com/ipfs/iptb v1.4.1 h1:faXd3TKGPswbHyZecqqg6UfbES7RDjTKQb+6VFPKDUo=
github.com/ipfs/iptb v1.4.1/go.mod h1:nTsBMtVYFEu0FjC5DgrErnABm3OG9ruXkFXGJoTV5OA=
github.com/ipfs/iptb-plugins v0.5.1 h1:11PNTNEt2+SFxjUcO5qpyCTXqDj6T8Tx9pU/G4ytCIQ=
github.com/ipfs/iptb-plugins v0.5.1/go.mod h1:mscJAjRnu4g16QK6oUBn9RGpcp8ueJmLfmPxIG/At78=
-github.com/ipld/go-car/v2 v2.14.3 h1:1Mhl82/ny8MVP+w1M4LXbj4j99oK3gnuZG2GmG1IhC8=
-github.com/ipld/go-car/v2 v2.14.3/go.mod h1:/vpSvPngOX8UnvmdFJ3o/mDgXa9LuyXsn7wxOzHDYQE=
+github.com/ipld/go-car/v2 v2.15.0 h1:RxtZcGXFx72zFESl+UUsCNQV2YMcy3gEMYx9M3uio24=
+github.com/ipld/go-car/v2 v2.15.0/go.mod h1:ovlq/n3xlVJDmoiN3Kd/Z7kIzQbdTIFSwltfOP+qIgk=
github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0=
github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM=
github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E=
@@ -462,10 +466,10 @@ github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvq
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
-github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0=
-github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o=
-github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs=
-github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g=
+github.com/libp2p/go-libp2p-kad-dht v0.35.0 h1:pWRC4FKR9ptQjA9DuMSrAn2D3vABE8r58iAeoLoK1Ig=
+github.com/libp2p/go-libp2p-kad-dht v0.35.0/go.mod h1:s70f017NjhsBx+SVl0/w+x//uyglrFpKLfvuQJj4QAU=
+github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
+github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg=
github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E=
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI=
@@ -552,8 +556,8 @@ github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7B
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
-github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
-github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI=
+github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
@@ -647,14 +651,14 @@ github.com/polyfloyd/go-errorlint v1.7.1/go.mod h1:aXjNb1x2TNhoLsk26iv1yl7a+zTnX
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
-github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
-github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
@@ -778,8 +782,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
@@ -861,20 +865,20 @@ go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9
go-simpler.org/sloglint v0.9.0 h1:/40NQtjRx9txvsB/RN022KsUJU+zaaSb/9q9BSefSrE=
go-simpler.org/sloglint v0.9.0/go.mod h1:G/OrAF6uxj48sHahCzrbarVMptL2kjWTaUeC8+fOGww=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
-go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
-go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
-go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
-go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
-go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
-go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
-go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
-go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
-go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
-go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
+go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
+go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
+go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
@@ -893,6 +897,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -909,11 +915,11 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
-golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
-golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
+golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE=
-golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
+golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
+golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4=
@@ -934,8 +940,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
-golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
+golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -963,8 +969,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
+golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -983,8 +989,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
-golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1014,8 +1020,10 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
-golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
+golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8=
+golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@@ -1027,8 +1035,8 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
-golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
-golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
+golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
+golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -1042,8 +1050,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
+golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
@@ -1074,8 +1082,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
-golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
-golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
+golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
+golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
@@ -1104,8 +1112,8 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
-google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
+google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/test/sharness/t0119-prometheus-data/prometheus_metrics b/test/sharness/t0119-prometheus-data/prometheus_metrics
index 12be12cb271..ed1cdaba4f5 100644
--- a/test/sharness/t0119-prometheus-data/prometheus_metrics
+++ b/test/sharness/t0119-prometheus-data/prometheus_metrics
@@ -54,6 +54,15 @@ go_memstats_stack_sys_bytes
go_memstats_sys_bytes
go_sched_gomaxprocs_threads
go_threads
+http_server_request_body_size_bytes_bucket
+http_server_request_body_size_bytes_count
+http_server_request_body_size_bytes_sum
+http_server_request_duration_seconds_bucket
+http_server_request_duration_seconds_count
+http_server_request_duration_seconds_sum
+http_server_response_body_size_bytes_bucket
+http_server_response_body_size_bytes_count
+http_server_response_body_size_bytes_sum
ipfs_bitswap_active_block_tasks
ipfs_bitswap_active_tasks
ipfs_bitswap_bcast_skips_total
@@ -231,6 +240,7 @@ libp2p_relaysvc_status
libp2p_swarm_dial_ranking_delay_seconds_bucket
libp2p_swarm_dial_ranking_delay_seconds_count
libp2p_swarm_dial_ranking_delay_seconds_sum
+otel_scope_info
process_cpu_seconds_total
process_max_fds
process_network_receive_bytes_total
@@ -242,3 +252,4 @@ process_virtual_memory_bytes
process_virtual_memory_max_bytes
provider_reprovider_provide_count
provider_reprovider_reprovide_count
+target_info
diff --git a/test/sharness/t0275-cid-security.sh b/test/sharness/t0275-cid-security.sh
index e8d26555052..7f8764d3f61 100755
--- a/test/sharness/t0275-cid-security.sh
+++ b/test/sharness/t0275-cid-security.sh
@@ -15,7 +15,7 @@ test_expect_success "adding using unsafe function fails with error" '
'
test_expect_success "error reason is pointed out" '
- grep "insecure hash functions not allowed" add_out || test_fsh cat add_out
+ grep "potentially insecure hash functions not allowed" add_out || test_fsh cat add_out
'
test_expect_success "adding using too short of a hash function gives out an error" '
@@ -23,7 +23,7 @@ test_expect_success "adding using too short of a hash function gives out an erro
'
test_expect_success "error reason is pointed out" '
- grep "hashes must be at least 20 bytes long" block_out
+ grep "digest too small" block_out
'
@@ -35,7 +35,7 @@ test_cat_get() {
test_expect_success "error reason is pointed out" '
- grep "insecure hash functions not allowed" ipfs_cat
+ grep "potentially insecure hash functions not allowed" ipfs_cat
'
@@ -45,7 +45,7 @@ test_cat_get() {
'
test_expect_success "error reason is pointed out" '
- grep "hashes must be at least 20 bytes long" ipfs_get
+ grep "digest too small" ipfs_get
'
}
diff --git a/version.go b/version.go
index 244fe6726e1..eb1dd3850b3 100644
--- a/version.go
+++ b/version.go
@@ -3,18 +3,20 @@ package ipfs
import (
"fmt"
"runtime"
+
+ "github.com/ipfs/kubo/core/commands/cmdutils"
)
// CurrentCommit is the current git commit, this is set as a ldflag in the Makefile.
var CurrentCommit string
// CurrentVersionNumber is the current application's version literal.
-const CurrentVersionNumber = "0.37.0"
+const CurrentVersionNumber = "0.38.0"
const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint
// RepoVersion is the version number that we are currently expecting to see.
-const RepoVersion = 17
+const RepoVersion = 18
// GetUserAgentVersion is the libp2p user agent used by go-ipfs.
//
@@ -27,13 +29,13 @@ func GetUserAgentVersion() string {
}
userAgent += userAgentSuffix
}
- return userAgent
+ return cmdutils.CleanAndTrim(userAgent)
}
var userAgentSuffix string
func SetUserAgentSuffix(suffix string) {
- userAgentSuffix = suffix
+ userAgentSuffix = cmdutils.CleanAndTrim(suffix)
}
type VersionInfo struct {