A very experimental PLC implementation which uses BFT consensus for decentralization
19
fork

Configure Feed

Select the types of activity you want to include in your feed.

Clean up logging situation

gbl08ma f1d776e6 0c201f68

+107 -54
+21 -7
abciapp/app.go
··· 2 2 3 3 import ( 4 4 "context" 5 - "fmt" 6 5 "os" 7 6 "sync" 8 7 "time" ··· 10 9 dbm "github.com/cometbft/cometbft-db" 11 10 abcitypes "github.com/cometbft/cometbft/abci/types" 12 11 "github.com/cometbft/cometbft/crypto" 12 + cmtlog "github.com/cometbft/cometbft/libs/log" 13 13 "github.com/cometbft/cometbft/privval" 14 14 bftstore "github.com/cometbft/cometbft/store" 15 15 "github.com/cosmos/iavl" ··· 26 26 27 27 type DIDPLCApplication struct { 28 28 runnerContext context.Context 29 + logger cmtlog.Logger 29 30 plc plc.PLC 30 31 txFactory *transaction.Factory 31 32 indexDB dbm.DB ··· 54 55 } 55 56 56 57 // store and plc must be able to share transaction objects 57 - func NewDIDPLCApplication(appContext context.Context, pv *privval.FilePV, treeDB dbm.DB, indexDB transaction.ExtendedDB, clearData func(), snapshotDirectory, didBloomFilterPath string, mempoolSubmitter types.MempoolSubmitter) (*DIDPLCApplication, *transaction.Factory, plc.PLC, func(), error) { 58 + func NewDIDPLCApplication(appContext context.Context, logger cmtlog.Logger, pv *privval.FilePV, treeDB dbm.DB, indexDB transaction.ExtendedDB, clearData func(), snapshotDirectory, didBloomFilterPath string, mempoolSubmitter types.MempoolSubmitter) (*DIDPLCApplication, *transaction.Factory, plc.PLC, func(), error) { 58 59 mkTree := func() *iavl.MutableTree { 59 60 // Using SpeedDefault appears to cause the processing time for ExecuteOperation to double on average 60 61 // Using SpeedBetterCompression appears to cause the processing time to double again ··· 80 81 81 82 d := &DIDPLCApplication{ 82 83 runnerContext: runnerContext, 84 + logger: logger.With("module", "plcapp"), 83 85 tree: tree, 84 86 indexDB: indexDB, 85 87 mempoolSubmitter: mempoolSubmitter, ··· 92 94 d.validatorPrivKey = pv.Key.PrivKey 93 95 } 94 96 95 - d.txFactory, err = transaction.NewFactory(tree, indexDB, store.Consensus.CountOperations, store.NewDIDBloomFilterStore(didBloomFilterPath)) 97 + d.txFactory, err = transaction.NewFactory(tree, indexDB, store.Consensus.CountOperations, store.NewDIDBloomFilterStore(d.logger, didBloomFilterPath)) 96 98 if err != nil { 97 99 return nil, nil, nil, cancelRunnerContext, stacktrace.Propagate(err, "") 98 100 } ··· 109 111 110 112 *d.tree = *mkTree() 111 113 112 - d.txFactory, err = transaction.NewFactory(tree, indexDB, store.Consensus.CountOperations, store.NewDIDBloomFilterStore(didBloomFilterPath)) 114 + d.txFactory, err = transaction.NewFactory(tree, indexDB, store.Consensus.CountOperations, store.NewDIDBloomFilterStore(d.logger, didBloomFilterPath)) 113 115 if err != nil { 114 116 return stacktrace.Propagate(err, "") 115 117 } ··· 131 133 st := time.Now() 132 134 err := d.txFactory.SaveDIDBloomFilter() 133 135 if err != nil { 134 - fmt.Println("FAILED TO SAVE BLOOM FILTER:", stacktrace.Propagate(err, "")) 136 + d.logger.Error("failed to save bloom filter", "error", stacktrace.Propagate(err, "")) 135 137 } 136 - fmt.Println("SAVED BLOOM FILTER IN", time.Since(st)) 138 + d.logger.Debug("saved bloom filter", "took", time.Since(st)) 137 139 } 138 140 }) 139 141 ··· 195 197 }, nil 196 198 } 197 199 200 + func (d *DIDPLCApplication) logMethod(method string, keyvals ...any) func(...any) { 201 + st := time.Now() 202 + d.logger.Debug(method+" start", keyvals...) 203 + return func(extra ...any) { 204 + args := make([]any, 0, len(keyvals)+len(extra)+2) 205 + args = append(args, keyvals...) 206 + args = append(args, extra...) 207 + args = append(args, "took", time.Since(st)) 208 + d.logger.Debug(method+" done", args...) 209 + } 210 + } 211 + 198 212 func (d *DIDPLCApplication) FinishInitializing(blockStore *bftstore.BlockStore) error { 199 213 d.blockStore = blockStore 200 214 201 215 var err error 202 - d.blockChallengeCoordinator, err = newBlockChallengeCoordinator(d.runnerContext, d.txFactory, blockStore, d.validatorPubKey) 216 + d.blockChallengeCoordinator, err = newBlockChallengeCoordinator(d.runnerContext, d.logger, d.txFactory, blockStore, d.validatorPubKey) 203 217 if err != nil { 204 218 return stacktrace.Propagate(err, "") 205 219 }
+3 -1
abciapp/app_test.go
··· 6 6 7 7 dbm "github.com/cometbft/cometbft-db" 8 8 "github.com/cometbft/cometbft/abci/types" 9 + cmtlog "github.com/cometbft/cometbft/libs/log" 9 10 "github.com/dgraph-io/badger/v4" 10 11 cbornode "github.com/ipfs/go-ipld-cbor" 11 12 "github.com/stretchr/testify/require" ··· 22 23 } 23 24 24 25 func TestCheckTx(t *testing.T) { 25 - app, _, _, cleanup, err := abciapp.NewDIDPLCApplication(t.Context(), nil, dbm.NewMemDB(), memDBWrapper{dbm.NewMemDB()}, nil, "", "", nil) 26 + logger := cmtlog.NewNopLogger() 27 + app, _, _, cleanup, err := abciapp.NewDIDPLCApplication(t.Context(), logger, nil, dbm.NewMemDB(), memDBWrapper{dbm.NewMemDB()}, nil, "", "", nil) 26 28 require.NoError(t, err) 27 29 t.Cleanup(cleanup) 28 30
+12 -9
abciapp/block_challenge.go
··· 4 4 "bytes" 5 5 "context" 6 6 "embed" 7 - "fmt" 8 7 "math/big" 9 8 "time" 10 9 11 10 "github.com/Yiling-J/theine-go" 12 11 "github.com/cometbft/cometbft/crypto" 12 + cmtlog "github.com/cometbft/cometbft/libs/log" 13 13 bftstore "github.com/cometbft/cometbft/store" 14 14 "github.com/consensys/gnark-crypto/ecc" 15 15 "github.com/consensys/gnark-crypto/ecc/bn254" 16 16 "github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc" 17 - "github.com/consensys/gnark/backend" 18 17 "github.com/consensys/gnark/backend/groth16" 19 18 "github.com/consensys/gnark/backend/witness" 20 19 "github.com/consensys/gnark/constraint" 21 - "github.com/consensys/gnark/constraint/solver" 22 20 "github.com/consensys/gnark/frontend" 21 + gnarklogger "github.com/consensys/gnark/logger" 23 22 "github.com/palantir/stacktrace" 24 23 "github.com/rs/zerolog" 25 24 "github.com/samber/lo" ··· 51 50 vkFile := lo.Must(blockChallengeCircuitFS.Open("proofcircuit/BlockChallenge_VerifyingKey")) 52 51 defer vkFile.Close() 53 52 lo.Must(blockChallengeVerifyingKey.ReadFrom(vkFile)) 53 + 54 + gnarklogger.Set(zerolog.Nop()) 54 55 } 55 56 56 57 type blockChallengeCoordinator struct { 57 58 g singleflight.Group[int64, []byte] 58 59 59 60 runnerContext context.Context 61 + logger cmtlog.Logger 60 62 61 63 isConfiguredToBeValidator bool 62 64 validatorAddress []byte ··· 66 68 sharedWitnessDataCache *theine.LoadingCache[int64, proof.BlockChallengeCircuit] 67 69 } 68 70 69 - func newBlockChallengeCoordinator(runnerContext context.Context, txFactory *transaction.Factory, blockStore *bftstore.BlockStore, pubKey crypto.PubKey) (*blockChallengeCoordinator, error) { 71 + func newBlockChallengeCoordinator(runnerContext context.Context, logger cmtlog.Logger, txFactory *transaction.Factory, blockStore *bftstore.BlockStore, pubKey crypto.PubKey) (*blockChallengeCoordinator, error) { 70 72 c := &blockChallengeCoordinator{ 71 73 runnerContext: runnerContext, 74 + logger: logger, 72 75 txFactory: txFactory, 73 76 nodeBlockStore: blockStore, 74 77 isConfiguredToBeValidator: pubKey != nil, ··· 126 129 go func() { 127 130 _, err := c.loadOrComputeBlockChallengeProof(c.runnerContext, height) 128 131 if err != nil { 129 - fmt.Printf("FAILED TO COMPUTE CHALLENGE FOR BLOCK %d: %v\n", height, stacktrace.Propagate(err, "")) 132 + c.logger.Error("failed to compute block challenge", "height", height, "error", stacktrace.Propagate(err, "")) 130 133 } 131 134 }() 132 135 } ··· 148 151 return nil, stacktrace.Propagate(err, "") 149 152 } 150 153 if proof == nil { 154 + st := time.Now() 151 155 // compute and store 152 156 proof, err = c.computeBlockChallengeProof(tx, height) 153 157 if err != nil { ··· 169 173 if err != nil { 170 174 return nil, stacktrace.Propagate(err, "") 171 175 } 176 + 177 + c.logger.Debug("computed and stored block challenge", "height", height, "took", time.Since(st)) 172 178 } 173 179 return proof, nil 174 180 }) ··· 180 186 if err != nil { 181 187 return nil, stacktrace.Propagate(err, "") 182 188 } 183 - 184 - // TODO consider using a different logger once we clean up our logging act 185 - // TODO open an issue in the gnark repo because backend.WithSolverOptions(solver.WithLogger(zerolog.Nop())) has no effect... 186 - proof, err := groth16.Prove(blockChallengeConstraintSystem, blockChallengeProvingKey, witness, backend.WithSolverOptions(solver.WithLogger(zerolog.Nop()))) 189 + proof, err := groth16.Prove(blockChallengeConstraintSystem, blockChallengeProvingKey, witness) 187 190 if err != nil { 188 191 return nil, stacktrace.Propagate(err, "") 189 192 }
+12 -3
abciapp/execution.go
··· 3 3 import ( 4 4 "bytes" 5 5 "context" 6 - "fmt" 6 + "encoding/hex" 7 7 "slices" 8 8 "time" 9 9 ··· 25 25 26 26 // PrepareProposal implements [types.Application]. 27 27 func (d *DIDPLCApplication) PrepareProposal(ctx context.Context, req *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { 28 + defer (d.logMethod("PrepareProposal", "height", req.Height, "txs", len(req.Txs)))() 28 29 defer d.DiscardChanges() 29 30 30 31 if req.Height == 2 { ··· 105 106 106 107 // ProcessProposal implements [types.Application]. 107 108 func (d *DIDPLCApplication) ProcessProposal(ctx context.Context, req *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { 109 + defer (d.logMethod("ProcessProposal", "height", req.Height, "hash", req.Hash, "txs", len(req.Txs)))() 110 + 108 111 // always reset state before processing a new proposal 109 112 d.DiscardChanges() 110 113 // do not unconditionally defer DiscardChanges because we want to re-use the results in FinalizeBlock when we vote accept ··· 140 143 return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_REJECT}, nil 141 144 } 142 145 143 - st := time.Now() 144 146 result, err = finishProcessTx(ctx, d.transactionProcessorDependenciesForOngoingProcessing(true, req.Time), processor, tx) 145 147 if err != nil { 146 148 return nil, stacktrace.Propagate(err, "") 147 149 } 148 - fmt.Println("FINISHPROCESSTX TOOK", time.Since(st)) 149 150 } 150 151 151 152 // when preparing a proposal, invalid transactions should have been discarded ··· 165 166 166 167 // ExtendVote implements [types.Application]. 167 168 func (d *DIDPLCApplication) ExtendVote(ctx context.Context, req *abcitypes.RequestExtendVote) (*abcitypes.ResponseExtendVote, error) { 169 + defer (d.logMethod("ExtendVote", "height", req.Height, "hash", req.Hash))() 170 + 168 171 proof, err := d.blockChallengeCoordinator.loadOrComputeBlockChallengeProof(ctx, req.Height) 169 172 if err != nil { 170 173 return nil, stacktrace.Propagate(err, "") ··· 176 179 177 180 // VerifyVoteExtension implements [types.Application]. 178 181 func (d *DIDPLCApplication) VerifyVoteExtension(_ context.Context, req *abcitypes.RequestVerifyVoteExtension) (*abcitypes.ResponseVerifyVoteExtension, error) { 182 + defer (d.logMethod("VerifyVoteExtension", "height", req.Height, "hash", req.Hash, "validator", hex.EncodeToString(req.ValidatorAddress)))() 183 + 179 184 if len(req.VoteExtension) > 200 { 180 185 // that definitely ain't right 181 186 return &abcitypes.ResponseVerifyVoteExtension{ ··· 195 200 196 201 // FinalizeBlock implements [types.Application]. 197 202 func (d *DIDPLCApplication) FinalizeBlock(ctx context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { 203 + defer (d.logMethod("FinalizeBlock", "height", req.Height, "hash", req.Hash))() 204 + 198 205 if bytes.Equal(req.Hash, d.lastProcessedProposalHash) && d.lastProcessedProposalExecTxResults != nil { 199 206 // the block that was decided was the one we processed in ProcessProposal, and ProcessProposal processed successfully 200 207 // reuse the uncommitted results ··· 231 238 232 239 // Commit implements [types.Application]. 233 240 func (d *DIDPLCApplication) Commit(context.Context, *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { 241 + defer (d.logMethod("Commit"))() 242 + 234 243 // ensure we always advance tree version by creating ongoingWrite if it hasn't been created already 235 244 d.createOngoingTxIfNeeded(time.Now()) 236 245
+15 -13
abciapp/range_challenge.go
··· 3 3 import ( 4 4 "context" 5 5 "encoding/binary" 6 + "encoding/hex" 6 7 "errors" 7 - "fmt" 8 8 "math/big" 9 9 "slices" 10 10 "sync" ··· 12 12 13 13 "github.com/Yiling-J/theine-go" 14 14 "github.com/cometbft/cometbft/crypto" 15 - "github.com/cometbft/cometbft/mempool" 15 + cmtlog "github.com/cometbft/cometbft/libs/log" 16 16 "github.com/cometbft/cometbft/privval" 17 17 "github.com/cometbft/cometbft/rpc/core" 18 18 bftstore "github.com/cometbft/cometbft/store" ··· 30 30 31 31 type RangeChallengeCoordinator struct { 32 32 runnerContext context.Context 33 + logger cmtlog.Logger 33 34 34 35 isConfiguredToBeValidator bool 35 36 validatorPubKey crypto.PubKey ··· 58 59 59 60 func NewRangeChallengeCoordinator( 60 61 runnerContext context.Context, 62 + logger cmtlog.Logger, 63 + pv *privval.FilePV, 61 64 txFactory *transaction.Factory, 62 65 blockStore *bftstore.BlockStore, 63 66 nodeEventBus *cmttypes.EventBus, 64 67 mempoolSubmitter types.MempoolSubmitter, 65 - consensusReactor consensusReactor, 66 - pv *privval.FilePV) (*RangeChallengeCoordinator, error) { 68 + consensusReactor consensusReactor) (*RangeChallengeCoordinator, error) { 67 69 c := &RangeChallengeCoordinator{ 68 70 txFactory: txFactory, 69 71 runnerContext: runnerContext, 72 + logger: logger, 70 73 nodeBlockStore: blockStore, 71 74 nodeEventBus: nodeEventBus, 72 75 mempoolSubmitter: mempoolSubmitter, ··· 98 101 c.wg.Go(func() { 99 102 err := c.newBlocksSubscriber() 100 103 if err != nil { 101 - fmt.Println("newBlocksSubscriber FAILED:", err) 104 + c.logger.Error("blocks subscriber failed", "error", stacktrace.Propagate(err, "")) 102 105 } 103 106 }) 104 107 c.wg.Go(func() { ··· 114 117 if err != nil { 115 118 // note: this is expected in certain circumstances, such as the proof for the toHeight block not being ready yet as the block was just finalized 116 119 // (and the block may have been finalized without our votes) 117 - fmt.Println("onNewBlock FAILED:", err) 120 + c.logger.Error("range challenge block handler error", "error", stacktrace.Propagate(err, "")) 118 121 } 119 122 }() 120 123 } ··· 277 280 } 278 281 } 279 282 280 - fmt.Println("RANGE CHALLENGE EVAL", shouldCommitToChallenge, shouldCompleteChallenge) 281 - 282 - var transactionBytes []byte 283 + var transactionBytes cmttypes.Tx 283 284 if shouldCompleteChallenge { 285 + c.logger.Info("Creating challenge completion transaction", "fromHeight", fromHeight, "toHeight", toHeight, "provenHeight", provenHeight, "includedOnHeight", includedOnHeight) 284 286 transactionBytes, err = c.createCompleteChallengeTx(ctx, tx, int64(fromHeight), int64(toHeight), int64(provenHeight), int64(includedOnHeight)) 285 287 if err != nil { 286 288 return stacktrace.Propagate(err, "") 287 289 } 288 290 } else if shouldCommitToChallenge { 291 + c.logger.Info("Creating challenge commitment transaction", "toHeight", toHeight) 289 292 transactionBytes, err = c.createCommitToChallengeTx(ctx, tx, newBlockHeight) 290 293 if err != nil { 291 294 if errors.Is(err, errMissingProofs) { ··· 301 304 return nil 302 305 } 303 306 307 + txHashHex := hex.EncodeToString(transactionBytes.Hash()) 308 + c.logger.Debug("broadcasting range challenge transaction", "hash", txHashHex) 304 309 result, err := c.mempoolSubmitter.BroadcastTx(ctx, transactionBytes, true) 305 310 if err != nil { 306 - if errors.Is(err, mempool.ErrTxInCache) { 307 - // expected, as we don't wait for broadcast and therefore will try to repeatedly commit/complete 308 - return nil 309 - } 310 311 return stacktrace.Propagate(err, "") 311 312 } 312 313 if result.CheckTx.Code == 0 && shouldCompleteChallenge { 313 314 c.hasSubmittedChallengeCompletion = true 314 315 } 316 + c.logger.Debug("range challenge transaction included", "hash", txHashHex, "txResult", result.TxResult.Code) 315 317 c.cachedNextProofFromHeight = mo.None[int64]() 316 318 return nil 317 319 }
+2 -5
abciapp/snapshots.go
··· 15 15 "strconv" 16 16 "strings" 17 17 "sync" 18 - "time" 19 18 20 19 dbm "github.com/cometbft/cometbft-db" 21 20 abcitypes "github.com/cometbft/cometbft/abci/types" ··· 230 229 } 231 230 232 231 func (d *DIDPLCApplication) createSnapshot(treeVersion int64, tempFilename string) error { 232 + defer (d.logMethod("createSnapshot", "treeVersion", treeVersion, "tempFilename", tempFilename))() 233 + 233 234 it, err := d.tree.GetImmutable(treeVersion) 234 235 if err != nil { 235 236 return stacktrace.Propagate(err, "") ··· 243 244 return stacktrace.Propagate(err, "") 244 245 } 245 246 defer f.Close() 246 - 247 - st := time.Now() 248 247 249 248 err = writeSnapshot(f, d.indexDB, it) 250 249 if err != nil { ··· 278 277 } 279 278 280 279 os.Rename(tempFilename, filepath.Join(d.snapshotDirectory, fmt.Sprintf("%020d.snapshot", treeVersion))) 281 - 282 - fmt.Println("Took", time.Since(st), "to export") 283 280 284 281 return nil 285 282 }
+25 -9
main.go
··· 97 97 appContext, cancelAppContext := context.WithCancel(context.Background()) 98 98 defer cancelAppContext() 99 99 100 - app, txFactory, plc, cleanup, err := abciapp.NewDIDPLCApplication(appContext, pv, treeDB, indexDB, recreateDatabases, filepath.Join(homeDir, "snapshots"), didBloomFilterPath, mempoolSubmitter) 100 + logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) 101 + logger, err = cmtflags.ParseLogLevel(config.LogLevel, logger, bftconfig.DefaultLogLevel) 102 + if err != nil { 103 + log.Fatalf("failed to parse log level: %v", err) 104 + } 105 + 106 + app, txFactory, plc, cleanup, err := abciapp.NewDIDPLCApplication( 107 + appContext, 108 + logger, 109 + pv, 110 + treeDB, 111 + indexDB, 112 + recreateDatabases, 113 + filepath.Join(homeDir, "snapshots"), 114 + didBloomFilterPath, 115 + mempoolSubmitter) 101 116 if err != nil { 102 117 log.Fatalf("failed to create DIDPLC application: %v", err) 103 118 } ··· 108 123 log.Fatalf("failed to load node's key: %v", err) 109 124 } 110 125 111 - logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) 112 - logger, err = cmtflags.ParseLogLevel(config.LogLevel, logger, bftconfig.DefaultLogLevel) 113 - 114 - if err != nil { 115 - log.Fatalf("failed to parse log level: %v", err) 116 - } 117 - 118 126 node, err := nm.NewNode( 119 127 config.Config, 120 128 pv, ··· 137 145 log.Fatalf("Finishing ABCI app initialization: %v", err) 138 146 } 139 147 140 - rangeChallengeCoordinator, err := abciapp.NewRangeChallengeCoordinator(appContext, txFactory, node.BlockStore(), node.EventBus(), mempoolSubmitter, node.ConsensusReactor(), pv) 148 + rangeChallengeCoordinator, err := abciapp.NewRangeChallengeCoordinator( 149 + appContext, 150 + logger.With("module", "plcapp"), 151 + pv, 152 + txFactory, 153 + node.BlockStore(), 154 + node.EventBus(), 155 + mempoolSubmitter, 156 + node.ConsensusReactor()) 141 157 if err != nil { 142 158 log.Fatalf("Creating RangeChallengeCoordinator: %v", err) 143 159 }
+1
startfresh.sh
··· 3 3 go build -trimpath 4 4 go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.19 init --home didplcbft-data 5 5 sed -i 's/^create_empty_blocks = true$/create_empty_blocks = false/g' didplcbft-data/config/config.toml 6 + sed -i 's/^log_level = "info"$/log_level = "plcapp:debug,*:info"/g' didplcbft-data/config/config.toml 6 7 ./didplcbft
+14 -6
store/did_bloom.go
··· 3 3 import ( 4 4 "encoding/binary" 5 5 "errors" 6 - "fmt" 7 6 "io" 8 7 "math" 9 8 "os" 10 9 "slices" 11 10 12 11 "github.com/bits-and-blooms/bloom/v3" 12 + cmtlog "github.com/cometbft/cometbft/libs/log" 13 13 "github.com/palantir/stacktrace" 14 14 "tangled.org/gbl08ma.com/didplcbft/transaction" 15 15 ) 16 16 17 17 type DIDBloomFilterStore struct { 18 + logger cmtlog.Logger 18 19 filePath string 19 20 } 20 21 21 - func NewInMemoryDIDBloomFilterStore() *DIDBloomFilterStore { 22 - return &DIDBloomFilterStore{} 22 + func NewInMemoryDIDBloomFilterStore(logger cmtlog.Logger) *DIDBloomFilterStore { 23 + return &DIDBloomFilterStore{ 24 + logger: logger, 25 + } 23 26 } 24 27 25 - func NewDIDBloomFilterStore(filePath string) *DIDBloomFilterStore { 28 + func NewDIDBloomFilterStore(logger cmtlog.Logger, filePath string) *DIDBloomFilterStore { 26 29 return &DIDBloomFilterStore{ 30 + logger: logger, 27 31 filePath: filePath, 28 32 } 29 33 } ··· 93 97 return filter, nil 94 98 } 95 99 96 - fmt.Println("(RE)BUILDING DID BLOOM FILTER") 97 - 98 100 filterEstimatedItems := uint(100000000) // we know there are like 80M DIDs at the time of writing 99 101 if estimatedDIDCount != 0 { 100 102 filterEstimatedItems = max(filterEstimatedItems, uint(estimatedDIDCount*3)) 101 103 } 104 + 105 + s.logger.Info("Rebuilding DID bloom filter", "itemCapacity", filterEstimatedItems) 102 106 103 107 filter = bloom.NewWithEstimates(filterEstimatedItems, 0.01) 104 108 ··· 112 116 113 117 defer iterator.Close() 114 118 119 + itemCount := 0 115 120 for iterator.Valid() { 116 121 filter.Add(iterator.Key()[1:16]) 117 122 118 123 iterator.Next() 124 + itemCount++ 119 125 } 120 126 err = iterator.Error() 121 127 if err != nil { 122 128 return nil, stacktrace.Propagate(err, "") 123 129 } 130 + 131 + s.logger.Debug("rebuilt DID bloom filter", "itemCapacity", filterEstimatedItems, "itemCount", itemCount) 124 132 125 133 return filter, nil 126 134 }
+2 -1
testutil/testutil.go
··· 7 7 "github.com/klauspost/compress/zstd" 8 8 "github.com/stretchr/testify/require" 9 9 10 + cmtlog "github.com/cometbft/cometbft/libs/log" 10 11 "tangled.org/gbl08ma.com/didplcbft/badgertodbm" 11 12 "tangled.org/gbl08ma.com/didplcbft/dbmtoiavldb" 12 13 "tangled.org/gbl08ma.com/didplcbft/dbmtoiavldb/zstddict" ··· 23 24 _, indexDB, err := badgertodbm.NewBadgerInMemoryDB() 24 25 require.NoError(t, err) 25 26 26 - factory, err := transaction.NewFactory(tree, indexDB, store.Consensus.CountOperations, store.NewInMemoryDIDBloomFilterStore()) 27 + factory, err := transaction.NewFactory(tree, indexDB, store.Consensus.CountOperations, store.NewInMemoryDIDBloomFilterStore(cmtlog.NewNopLogger())) 27 28 require.NoError(t, err) 28 29 29 30 return factory, tree, indexDB