Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions config/reprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ type ReproviderStrategy int

const (
ReproviderStrategyAll ReproviderStrategy = 1 << iota // 1 (0b00001)
ReproviderStrategyFlat // 2 (0b00010)
ReproviderStrategyPinned // 4 (0b00100)
ReproviderStrategyRoots // 8 (0b01000)
ReproviderStrategyMFS // 16 (0b10000)
Expand All @@ -31,10 +30,8 @@ func ParseReproviderStrategy(s string) ReproviderStrategy {
var strategy ReproviderStrategy
for _, part := range strings.Split(s, "+") {
switch part {
case "all", "": // special case, does not mix with others
case "all", "flat", "": // special case, does not mix with others
return ReproviderStrategyAll
case "flat":
strategy |= ReproviderStrategyFlat
case "pinned":
strategy |= ReproviderStrategyPinned
case "roots":
Expand Down
26 changes: 26 additions & 0 deletions config/reprovider_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package config

import "testing"

func TestParseReproviderStrategy(t *testing.T) {
tests := []struct {
input string
expect ReproviderStrategy
}{
{"all", ReproviderStrategyAll},
{"pinned", ReproviderStrategyPinned},
{"mfs", ReproviderStrategyMFS},
{"pinned+mfs", ReproviderStrategyPinned | ReproviderStrategyMFS},
{"invalid", 0},
{"all+invalid", ReproviderStrategyAll},
{"", ReproviderStrategyAll},
{"flat+all", ReproviderStrategyAll},
}

for _, tt := range tests {
result := ParseReproviderStrategy(tt.input)
if result != tt.expect {
t.Errorf("ParseReproviderStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
}
}
}
2 changes: 1 addition & 1 deletion core/coreapi/unixfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options

// wrap the DAGService in a providingDAG service which provides every block written.
// note about strategies:
// - "all"/"flat" gets handled directly at the blockstore so no need to provide
// - "all" gets handled directly at the blockstore so no need to provide
// - "roots" gets handled in the pinner
// - "mfs" gets handled in mfs
// We need to provide the "pinned" cases only. Added blocks are not
Expand Down
22 changes: 2 additions & 20 deletions core/node/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@
}
}

func mfsRootProvider(mfsRoot *mfs.Root) provider.KeyChanFunc {

Check failure on line 187 in core/node/provider.go

View workflow job for this annotation

GitHub Actions / go-lint

func `mfsRootProvider` is unused (unused)
return func(ctx context.Context) (<-chan cid.Cid, error) {
rootNode, err := mfsRoot.GetDirectory().GetNode()
if err != nil {
Expand Down Expand Up @@ -219,8 +219,7 @@
// - "roots": Only root CIDs of pinned content
// - "pinned": All pinned content (roots + children)
// - "mfs": Only MFS content
// - "flat": All blocks, no prioritization
// - "all": Prioritized: pins first, then MFS roots, then all blocks
// - "all": all blocks
func createKeyProvider(strategyFlag config.ReproviderStrategy, in provStrategyIn) provider.KeyChanFunc {
switch strategyFlag {
case config.ReproviderStrategyRoots:
Expand All @@ -234,28 +233,11 @@
)
case config.ReproviderStrategyMFS:
return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher)
case config.ReproviderStrategyFlat:
default: // "all", "", "flat" (compat)
return in.Blockstore.AllKeysChan
default: // "all", ""
return createAllStrategyProvider(in)
}
}

// createAllStrategyProvider creates the complex "all" strategy provider.
// This implements a three-tier priority system:
// 1. Root blocks of direct and recursive pins (highest priority)
// 2. MFS root (medium priority)
// 3. All other blocks in blockstore (lowest priority)
func createAllStrategyProvider(in provStrategyIn) provider.KeyChanFunc {
return provider.NewPrioritizedProvider(
provider.NewPrioritizedProvider(
provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher)),
mfsRootProvider(in.MFSRoot),
),
in.Blockstore.AllKeysChan,
)
}

// detectStrategyChange checks if the reproviding strategy has changed from what's persisted.
// Returns: (previousStrategy, hasChanged, error)
func detectStrategyChange(ctx context.Context, strategy string, ds datastore.Datastore) (string, bool, error) {
Expand Down
5 changes: 2 additions & 3 deletions core/node/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,12 @@ func BaseBlockstoreCtor(
opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)}

// Blockstore providing integration:
// When strategy includes "all" or "flat", the blockstore directly provides blocks as they're Put.
// When strategy includes "all" the blockstore directly provides blocks as they're Put.
// Important: Provide calls from blockstore are intentionally BLOCKING.
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
// This avoids spawning unbounded goroutines for concurrent block additions.
strategyFlag := config.ParseReproviderStrategy(providingStrategy)
shouldProvide := config.ReproviderStrategyAll | config.ReproviderStrategyFlat
if strategyFlag&shouldProvide != 0 {
if strategyFlag&config.ReproviderStrategyAll != 0 {
opts = append(opts, blockstore.Provider(prov))
}

Expand Down
8 changes: 7 additions & 1 deletion docs/changelogs/v0.37.md
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ $ ipfs pin ls --names
bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi recursive testname
```

#### ⚙️ `Reprovider.Strategy` is now consistently respected
#### ⚙️ `Reprovider.Strategy` is now consistently respected.

Prior to this version, files added, blocks received etc. were "provided" to the network (announced on the DHT) regardless of the ["reproviding strategy" setting](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy). For example:

Expand All @@ -165,6 +165,12 @@ This was inefficient as content that should not be provided was getting provided
> [!NOTE]
> **Behavior change:** The `--offline` flag no longer affects providing behavior. Both `ipfs add` and `ipfs --offline add` now provide blocks according to the reproviding strategy when run against an online daemon (previously `--offline add` did not provide). Since `ipfs add` has been nearly as fast as offline mode [since v0.35](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.35.md#fast-ipfs-add-in-online-mode), `--offline` is rarely needed. To run truly offline operations, use `ipfs --offline daemon`.

#### `Reprovider.Strategy=flat` renamed to `all`

The `flat` reprovider strategy is being renamed to `all`, which is the default. `all` will reprovide all blocks in the datastore without priotization. The old behaviour, which prioritized adding pinned and MFS data to the provider queue first is being dropped.

As detailed in https://github.com/ipfs/kubo/issues/10864, the previous behaviour of `all` caused duplicate efforts, use more memory and was slower. The advantanges applied only when the systems could not reprovide all the content in time.

#### Removed unnecessary dependencies

Kubo has been cleaned up by removing unnecessary dependencies and packages:
Expand Down
3 changes: 0 additions & 3 deletions docs/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -2063,7 +2063,6 @@ Type: `optionalDuration` (unset for the default)
Tells reprovider what should be announced. Valid strategies are:

- `"all"` - announce all CIDs of stored blocks
- Order: root blocks of direct and recursive pins and MFS root are announced first, then the rest of blockstore
- `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks)
- Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins
- `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`)
Expand All @@ -2079,15 +2078,13 @@ Tells reprovider what should be announced. Valid strategies are:
- `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies.
- **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache.
- Order: first `pinned` and then the locally available part of `mfs`.
- `"flat"` - same as `all`, announce all CIDs of stored blocks, but without prioritizing anything.

**Strategy changes automatically clear the provide queue.** When you change `Reprovider.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`.

**Memory requirements:**

- Reproviding larger pinsets using the `all`, `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million items for reproviding to the Amino DHT.
- This is due to the use of a buffered provider, which avoids holding a lock on the entire pinset during the reprovide cycle.
- The `flat` strategy can be used to lower memory requirements, but only recommended if memory utilization is too high, prioritization of pins is not necessary, and it is acceptable to announce every block cached in the local repository.

Default: `"all"`

Expand Down
30 changes: 0 additions & 30 deletions test/cli/provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,18 +187,6 @@ func TestProvider(t *testing.T) {
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})

t.Run("Provide with 'flat' strategy", func(t *testing.T) {
t.Parallel()

nodes := initNodes(t, 2, func(n *harness.Node) {
n.SetIPFSConfig("Reprovider.Strategy", "flat")
})
defer nodes.StopDaemons()

cid := nodes[0].IPFSAddStr("flat strategy")
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})

t.Run("Provide with 'pinned' strategy", func(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -308,24 +296,6 @@ func TestProvider(t *testing.T) {
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})

t.Run("Reprovides with 'flat' strategy", func(t *testing.T) {
t.Parallel()

nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) {
n.SetIPFSConfig("Reprovider.Strategy", "flat")
})

cid := nodes[0].IPFSAddStr(time.Now().String())

nodes = nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
expectNoProviders(t, cid, nodes[1:]...)

nodes[0].IPFS("routing", "reprovide")

expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
})

t.Run("Reprovides with 'pinned' strategy", func(t *testing.T) {
t.Parallel()

Expand Down
Loading