From 03711f8b601826f28e683361a76987cc2c42c6fe Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 31 Oct 2025 10:08:21 +0100 Subject: [PATCH 01/75] Uses codexConfig to mange codex-related files --- protocol/communities/manager_archive.go | 40 +++++++++----- protocol/communities/manager_archive_file.go | 57 +++++++++++++++----- 2 files changed, 71 insertions(+), 26 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index fd1ef1bfc38..9ddfe5845b3 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -63,7 +63,6 @@ type ArchiveManager struct { codexClient *CodexClient isCodexClientStarted bool torrentTasks map[string]metainfo.Hash - indexCidTasks map[string]string historyArchiveDownloadTasks map[string]*HistoryArchiveDownloadTask historyArchiveTasksWaitGroup sync.WaitGroup historyArchiveTasks sync.Map // stores `chan struct{}` @@ -86,7 +85,6 @@ func NewArchiveManager(amc *ArchiveManagerConfig) *ArchiveManager { torrentConfig: amc.TorrentConfig, codexConfig: amc.CodexConfig, torrentTasks: make(map[string]metainfo.Hash), - indexCidTasks: make(map[string]string), historyArchiveDownloadTasks: make(map[string]*HistoryArchiveDownloadTask), logger: amc.Logger, @@ -463,6 +461,7 @@ func (m *ArchiveManager) StartHistoryArchiveTasksInterval(community *Community, } case <-cancel: m.UnseedHistoryArchiveTorrent(community.ID()) + m.UnseedHistoryArchiveIndexCid(community.ID()) m.historyArchiveTasks.Delete(id) m.historyArchiveTasksWaitGroup.Done() return @@ -543,17 +542,25 @@ func (m *ArchiveManager) UnseedHistoryArchiveTorrent(communityID types.HexBytes) } func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) { - id := communityID.String() + // Remove local index file + err := m.removeCodexIndexFile(communityID) + if err != nil { + m.logger.Error("failed to remove local index file", zap.Error(err)) + } - if cid, exists := m.indexCidTasks[id]; exists { - m.logger.Debug("Unseeding index CID for community", zap.String("id", id), zap.String("cid", cid)) - // ToDo: consider "unpinning" the index Cid, so that it is no longer advertised on DHT - // For now, we remove it from tracking and could delete the local index file - delete(m.indexCidTasks, id) + // get currently advertised index Cid + cid, err := m.GetHistoryArchiveIndexCid(communityID) - // Optional: Remove local index file if we want to clean up storage - // indexFilePath := m.ArchiveFileManager.codexArchiveIndexFile(id) - // os.Remove(indexFilePath) + if err != nil { + m.logger.Debug("failed to get history archive index CID", zap.Error(err)) + return + } + + m.logger.Debug("Unseeding index CID for community", zap.String("id", communityID.String()), zap.String("cid", cid)) + + err = m.codexClient.RemoveCid(cid) + if err != nil { + m.logger.Error("failed to remove CID from Codex", zap.Error(err)) } } @@ -755,14 +762,13 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex Cancelled: false, } - m.indexCidTasks[id] = indexCid timeout := time.After(20 * time.Second) // Create separate cancel channel for the index downloader to avoid channel competition indexDownloaderCancel := make(chan struct{}) // Create index downloader with path to index file using helper function - indexFilePath := m.ArchiveFileManager.codexArchiveIndexFile(id) + indexFilePath := m.codexArchiveIndexFilePath(communityID) indexDownloader := NewCodexIndexDownloader(m.codexClient, indexCid, indexFilePath, indexDownloaderCancel, m.logger) m.logger.Debug("fetching history index from Codex", zap.String("indexCid", indexCid)) @@ -810,7 +816,13 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex if indexDownloader.IsDownloadComplete() { - index, err := m.ArchiveFileManager.CodexLoadHistoryArchiveIndexFromFile(m.identity, communityID) + err := m.writeCodexIndexCidToFile(communityID, indexCid) + if err != nil { + m.logger.Error("failed to write Codex index CID to file", zap.Error(err)) + return nil, err + } + + index, err := m.CodexLoadHistoryArchiveIndexFromFile(m.identity, communityID) if err != nil { return nil, err } diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index 5858e311dbb..f8fb517bbe4 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -16,6 +16,8 @@ import ( "path" "time" + "github.com/codex-storage/codex-go-bindings/codex" + "github.com/status-im/status-go/crypto" "github.com/status-im/status-go/crypto/types" "github.com/status-im/status-go/messaging" @@ -32,6 +34,7 @@ import ( type ArchiveFileManager struct { torrentConfig *params.TorrentConfig + codexConfig *codex.Config codexClient *CodexClient logger *zap.Logger persistence *Persistence @@ -44,6 +47,7 @@ type ArchiveFileManager struct { func NewArchiveFileManager(amc *ArchiveManagerConfig) *ArchiveFileManager { return &ArchiveFileManager{ torrentConfig: amc.TorrentConfig, + codexConfig: amc.CodexConfig, logger: amc.Logger, persistence: amc.Persistence, identity: amc.Identity, @@ -341,10 +345,8 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte to = endDate } - codexArchiveDir := m.torrentConfig.DataDir + "/codex/" + communityID.String() - codexIndexPath := codexArchiveDir + "/index" - codexIndexCidPath := codexArchiveDir + "/index-cid" - // codexDataPath := codexArchiveDir + "/data" + codexArchiveDir := m.codexArchiveDirPath(communityID) + codexIndexPath := m.codexArchiveIndexFilePath(communityID) m.logger.Debug("codexArchiveDir", zap.String("codexArchiveDir", codexArchiveDir)) @@ -508,12 +510,12 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte return codexArchiveIDs, err } - err = os.WriteFile(codexIndexPath, codexIndexBytes, 0644) // nolint: gosec + err = m.writeCodexIndexToFile(communityID, codexIndexBytes) if err != nil { return codexArchiveIDs, err } - err = os.WriteFile(codexIndexCidPath, []byte(cid), 0644) // nolint: gosec + err = m.writeCodexIndexCidToFile(communityID, cid) if err != nil { return codexArchiveIDs, err } @@ -558,8 +560,41 @@ func (m *ArchiveFileManager) archiveIndexFile(communityID string) string { return path.Join(m.torrentConfig.DataDir, communityID, "index") } -func (m *ArchiveFileManager) codexArchiveIndexFile(communityID string) string { - return path.Join(m.torrentConfig.DataDir, "codex", communityID, "index") +func (m *ArchiveFileManager) codexArchiveDirPath(communityID types.HexBytes) string { + return path.Join(m.codexConfig.DataDir, communityID.String()) +} + +func (m *ArchiveFileManager) codexArchiveIndexFilePath(communityID types.HexBytes) string { + return path.Join(m.codexConfig.DataDir, communityID.String(), "index") +} + +func (m *ArchiveFileManager) codexArchiveIndexCidFilePath(communityID types.HexBytes) string { + return path.Join(m.codexConfig.DataDir, communityID.String(), "index-cid") +} + +func (m *ArchiveFileManager) writeCodexIndexToFile(communityID types.HexBytes, bytes []byte) error { + indexFilePath := m.codexArchiveIndexFilePath(communityID) + return os.WriteFile(indexFilePath, bytes, 0644) // nolint: gosec +} + +func (m *ArchiveFileManager) readCodexIndexFromFile(communityID types.HexBytes) ([]byte, error) { + indexFilePath := m.codexArchiveIndexFilePath(communityID) + return os.ReadFile(indexFilePath) +} + +func (m *ArchiveFileManager) removeCodexIndexFile(communityID types.HexBytes) error { + indexFilePath := m.codexArchiveIndexFilePath(communityID) + return os.Remove(indexFilePath) +} + +func (m *ArchiveFileManager) writeCodexIndexCidToFile(communityID types.HexBytes, cid string) error { + cidFilePath := m.codexArchiveIndexCidFilePath(communityID) + return os.WriteFile(cidFilePath, []byte(cid), 0644) // nolint: gosec +} + +func (m *ArchiveFileManager) readCodexIndexCidFromFile(communityID types.HexBytes) ([]byte, error) { + cidFilePath := m.codexArchiveIndexCidFilePath(communityID) + return os.ReadFile(cidFilePath) } func (m *ArchiveFileManager) createWakuMessageArchive(from time.Time, to time.Time, messages []messagingtypes.ReceivedMessage, topics [][]byte) *protobuf.WakuMessageArchive { @@ -637,8 +672,7 @@ func (m *ArchiveFileManager) GetHistoryArchiveMagnetlink(communityID types.HexBy } func (m *ArchiveFileManager) GetHistoryArchiveIndexCid(communityID types.HexBytes) (string, error) { - codexArchiveDir := m.torrentConfig.DataDir + "/codex/" + communityID.String() - codexIndexCidPath := codexArchiveDir + "/index-cid" + codexIndexCidPath := m.codexArchiveIndexCidFilePath(communityID) cidData, err := os.ReadFile(codexIndexCidPath) if err != nil { @@ -797,8 +831,7 @@ func (m *ArchiveFileManager) LoadHistoryArchiveIndexFromFile(myKey *ecdsa.Privat func (m *ArchiveFileManager) CodexLoadHistoryArchiveIndexFromFile(myKey *ecdsa.PrivateKey, communityID types.HexBytes) (*protobuf.CodexWakuMessageArchiveIndex, error) { codexWakuMessageArchiveIndexProto := &protobuf.CodexWakuMessageArchiveIndex{} - indexPath := m.codexArchiveIndexFile(communityID.String()) - indexData, err := os.ReadFile(indexPath) + indexData, err := m.readCodexIndexFromFile(communityID) if err != nil { return nil, err } From 0d3cb5218d5ddfa1ba9a30e31eff5e8272035a44 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 31 Oct 2025 10:13:23 +0100 Subject: [PATCH 02/75] use param.CodexConfig instead of codex.Config --- protocol/communities/manager_archive_file.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index f8fb517bbe4..6f06b9323fa 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -16,8 +16,6 @@ import ( "path" "time" - "github.com/codex-storage/codex-go-bindings/codex" - "github.com/status-im/status-go/crypto" "github.com/status-im/status-go/crypto/types" "github.com/status-im/status-go/messaging" @@ -34,7 +32,7 @@ import ( type ArchiveFileManager struct { torrentConfig *params.TorrentConfig - codexConfig *codex.Config + codexConfig *params.CodexConfig codexClient *CodexClient logger *zap.Logger persistence *Persistence From bcbaca7ca787ae6a7bdd7c3ff500b781251b956d Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 31 Oct 2025 13:09:05 +0100 Subject: [PATCH 03/75] Rough version of the integration test --- protocol/communities/codex_client.go | 50 +++- protocol/communities/manager.go | 2 + protocol/communities/manager_archive.go | 14 + protocol/communities/manager_archive_nop.go | 6 + protocol/communities/persistence.go | 17 +- ...nities_messenger_token_permissions_test.go | 263 ++++++++++++++++++ 6 files changed, 335 insertions(+), 17 deletions(-) diff --git a/protocol/communities/codex_client.go b/protocol/communities/codex_client.go index 40b7ababf4c..637702b2907 100644 --- a/protocol/communities/codex_client.go +++ b/protocol/communities/codex_client.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "io" + "log" "github.com/codex-storage/codex-go-bindings/codex" @@ -12,9 +13,12 @@ import ( // CodexClient handles basic upload/download operations with Codex storage type CodexClient struct { - config codex.Config - node *codex.CodexNode - enabled bool + config codex.Config + node *codex.CodexNode + enabled bool + started bool + stopped bool + destroyed bool } // NewCodexClient creates a new Codex client @@ -31,16 +35,44 @@ func NewCodexClient(config params.CodexConfig) (CodexClient, error) { }, nil } -func (c CodexClient) Start() error { - return c.node.Start() +func (c *CodexClient) Start() error { + if c.started { + return nil + } + err := c.node.Start() + if err != nil { + return err + } + c.started = true + return nil } -func (c CodexClient) Stop() error { - return c.node.Stop() +func (c *CodexClient) Stop() error { + log.Println("AAAAAAAAAAAAAAA!!!!!!!!!!!!!!!!!!!!!!!!!!!!") + if c.stopped { + return nil + } + log.Println("Stopping Codex client...!!!!!!!!!!!!!!!!!!!!!!!!!!!!") + err := c.node.Stop() + if err != nil { + return err + } + c.stopped = true + return nil } -func (c CodexClient) Destroy() error { - return c.node.Destroy() +func (c *CodexClient) Destroy() error { + log.Println("BBBBBBBBBBBBBBBB????????????????????????????????") + if c.destroyed { + return nil + } + log.Println("Destroy Destroy Destroy ???????????????????????") + err := c.node.Destroy() + if err != nil { + return err + } + c.destroyed = true + return nil } func (c *CodexClient) UpdateLogLevel(logLevel string) error { diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index 50f98c262af..c514df3029f 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -214,6 +214,8 @@ type ArchiveService interface { SetCodexConfig(*params.CodexConfig) StartTorrentClient() error StartCodexClient() error + SetCodexClient(client *CodexClient) + GetCodexClient() *CodexClient Stop() error IsReady() bool GetCommunityChatsFilters(communityID types.HexBytes) (messagingtypes.ChatFilters, error) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 9ddfe5845b3..3b3fe6c7201 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -122,6 +122,7 @@ func (m *ArchiveManager) SetTorrentConfig(config *params.TorrentConfig) { func (m *ArchiveManager) SetCodexConfig(config *params.CodexConfig) { m.codexConfig = config + m.ArchiveFileManager.codexConfig = config } // getTCPandUDPport will return the same port number given if != 0, @@ -213,6 +214,9 @@ func (m *ArchiveManager) StartTorrentClient() error { } func (m *ArchiveManager) StartCodexClient() error { + + m.logger.Info("======================Starting codex client=============================") + if m.codexConfig == nil { return fmt.Errorf("can't start codex client: missing codexConfig") } @@ -269,6 +273,16 @@ func (m *ArchiveManager) Stop() error { return nil } +func (m *ArchiveManager) GetCodexClient() *CodexClient { + return m.codexClient +} + +func (m *ArchiveManager) SetCodexClient(client *CodexClient) { + m.codexClient = client + m.ArchiveFileManager.codexClient = client + m.isCodexClientStarted = true +} + func (m *ArchiveManager) torrentClientStarted() bool { return m.torrentClient != nil } diff --git a/protocol/communities/manager_archive_nop.go b/protocol/communities/manager_archive_nop.go index af057be7ea8..4c96575c987 100644 --- a/protocol/communities/manager_archive_nop.go +++ b/protocol/communities/manager_archive_nop.go @@ -39,6 +39,12 @@ func (tmm *ArchiveManagerNop) StartCodexClient() error { return nil } +func (tmm *ArchiveManagerNop) GetCodexClient() *CodexClient { + return nil +} + +func (tmm *ArchiveManagerNop) SetCodexClient(client *CodexClient) {} + func (tmm *ArchiveManagerNop) Stop() error { return nil } diff --git a/protocol/communities/persistence.go b/protocol/communities/persistence.go index b5fd2392c07..93ba71cadbd 100644 --- a/protocol/communities/persistence.go +++ b/protocol/communities/persistence.go @@ -2230,14 +2230,15 @@ func (p *Persistence) UpdateAndPruneEncryptionKeyRequests(communityID types.HexB } func (p *Persistence) GetArchiveDistributionPreference(communityID types.HexBytes) (string, error) { - var preference string - err := p.db.QueryRow(`SELECT preferred_distribution_method FROM communities_archive_info WHERE community_id = ?`, communityID.String()).Scan(&preference) - if err == sql.ErrNoRows { - return "auto", nil // Default preference - } else if err != nil { - return "", err - } - return preference, nil + return "codex", nil + // var preference string + // err := p.db.QueryRow(`SELECT preferred_distribution_method FROM communities_archive_info WHERE community_id = ?`, communityID.String()).Scan(&preference) + // if err == sql.ErrNoRows { + // return "auto", nil // Default preference + // } else if err != nil { + // return "", err + // } + // return preference, nil } func (p *Persistence) SetArchiveDistributionPreference(communityID types.HexBytes, preference string) error { diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index 59c338ce53e..e2827f128f4 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -6,6 +6,7 @@ import ( "crypto/ecdsa" "errors" "fmt" + "log" "os" "strconv" "strings" @@ -13,6 +14,7 @@ import ( "testing" "time" + "github.com/codex-storage/codex-go-bindings/codex" "github.com/google/uuid" "github.com/stretchr/testify/suite" "go.uber.org/zap" @@ -2325,6 +2327,267 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedArchiveMe s.Require().Equal(messageText1, receivedMessage1.Text) } +func PrintArchiveIndex(index *protobuf.CodexWakuMessageArchiveIndex) { + fmt.Println("********************* Archive Index **********************") + for hash, meta := range index.Archives { + fmt.Printf(" Hash: %s\n", hash) + if meta != nil && meta.Metadata != nil { + fmt.Printf(" CID: %s\n", meta.Cid) + fmt.Printf(" From: %d\n", meta.Metadata.From) + fmt.Printf(" To: %d\n", meta.Metadata.To) + // Print other fields as needed + } + } +} + +func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArchiveMessages() { + + dataDir := os.TempDir() + "/archivedata" + + log.Println("Data directory:", dataDir) + + codexConfig := params.CodexConfig{ + Enabled: false, + Config: codex.Config{ + DataDir: dataDir, + BlockRetries: 10, + LogLevel: "ERROR", + LogFormat: codex.LogFormatNoColors, + }, + } + + // Share archive directory between all users + s.owner.archiveManager.SetCodexConfig(&codexConfig) + s.bob.archiveManager.SetCodexConfig(&codexConfig) + + err := s.owner.archiveManager.StartCodexClient() + s.Require().NoError(err) + codexClient := s.owner.archiveManager.GetCodexClient() + s.Require().NotNil(codexClient) + // defer codexClient.Stop() //nolint: errcheck + // defer codexClient.Stop() //nolint: errcheck + + s.bob.archiveManager.SetCodexClient(codexClient) + + // 1.1. Create community + community, chat := s.createCommunity() + + // 1.2. Setup permissions + communityPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_BECOME_MEMBER, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "100000000000000000000", + Decimals: uint64(18), + }, + }, + } + + channelPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_CAN_VIEW_AND_POST_CHANNEL, + ChatIds: []string{chat.ID}, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "200000000000000000000", + Decimals: uint64(18), + }, + }, + } + + waitOnChannelKeyAdded := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeyAdd { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.owner.identity.PublicKey)] + return ok + }) + + waitOnCommunityPermissionCreated := waitOnCommunitiesEvent(s.owner, func(sub *communities.Subscription) bool { + return len(sub.Community.TokenPermissions()) == 2 + }) + + response, err := s.owner.CreateCommunityTokenPermission(communityPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + response, err = s.owner.CreateCommunityTokenPermission(channelPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + community = response.Communities()[0] + s.Require().True(community.HasTokenPermissions()) + s.Require().Len(community.TokenPermissions(), 2) + + err = <-waitOnCommunityPermissionCreated + s.Require().NoError(err) + s.Require().True(community.Encrypted()) + + err = <-waitOnChannelKeyAdded + s.Require().NoError(err) + + // 2. Owner: Send a message A + messageText1 := RandomLettersString(10) + message1 := s.sendChatMessage(s.owner, chat.ID, messageText1) + + // 2.2. Retrieve own message (to make it stored in the archive later) + _, err = s.owner.RetrieveAll() + s.Require().NoError(err) + + log.Println("Message sent with ID:", message1.ID) + + // 3. Owner: Create community archive + const partition = 2 * time.Minute + messageDate := time.UnixMilli(int64(message1.Timestamp)) + startDate := messageDate.Add(-time.Minute) + endDate := messageDate.Add(time.Minute) + topic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(chat.ID)) + communityCommonTopic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(community.UniversalChatID())) + topics := []messagingtypes.ContentTopic{topic, communityCommonTopic} + + // dataDir := os.TempDir() + "/archivedata" + + // log.Println("Data directory:", dataDir) + + // codexConfig := params.CodexConfig{ + // Enabled: false, + // Config: codex.Config{ + // DataDir: dataDir, + // BlockRetries: 10, + // LogLevel: "ERROR", + // LogFormat: codex.LogFormatNoColors, + // }, + // } + + // // Share archive directory between all users + // s.owner.archiveManager.SetCodexConfig(&codexConfig) + // s.bob.archiveManager.SetCodexConfig(&codexConfig) + + // err = s.owner.archiveManager.StartCodexClient() + // s.Require().NoError(err) + // codexClient := s.owner.archiveManager.GetCodexClient() + // s.Require().NotNil(codexClient) + // defer codexClient.Stop() //nolint: errcheck + + // s.bob.archiveManager.SetCodexClient(codexClient) + + s.owner.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + s.bob.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + + archiveIDs, err := s.owner.archiveManager.CreateHistoryArchiveCodexFromDB(community.ID(), topics, startDate, endDate, partition, community.Encrypted()) + s.Require().NoError(err) + s.Require().Len(archiveIDs, 1) + + community, err = s.owner.GetCommunityByID(community.ID()) + s.Require().NoError(err) + + // 4. Bob: join community (satisfying membership, but not channel permissions) + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, communityPermission.TokenCriteria[0]) + s.advertiseCommunityTo(community, s.bob) + + waitForKeysDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action := sub.keyActions.CommunityKeyAction + if action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok := action.Members[s.bob.IdentityPublicKeyString()] + return ok + }) + + s.joinCommunity(community, s.bob) + + err = <-waitForKeysDistributedToBob + s.Require().NoError(err) + + // 5. Bob: Import community archive + // The archive is successfully decrypted, but the message inside is not. + // https://github.com/status-im/status-desktop/issues/13105 can be reproduced at this stage + // by forcing `encryption.ErrHashRatchetGroupIDNotFound` in `ExtractMessagesFromHistoryArchive` after decryption here: + // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4403 + + // Ensure owner has archive + archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) + s.Require().NoError(err) + s.Require().Len(archiveIndex.Archives, 1) + + PrintArchiveIndex(archiveIndex) + + // Ensure bob has archive (because they share same local directory) + archiveIndex, err = s.bob.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.bob.identity, community.ID()) + s.Require().NoError(err) + s.Require().Len(archiveIndex.Archives, 1) + + PrintArchiveIndex(archiveIndex) + + archiveHash := maps.Keys(archiveIndex.Archives)[0] + + // Save message archive ID as in + // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4325-L4336 + err = s.bob.archiveManager.SaveMessageArchiveID(community.ID(), archiveHash) + s.Require().NoError(err) + + // Import archive + s.bob.importDelayer.once.Do(func() { + close(s.bob.importDelayer.wait) + }) + cancel := make(chan struct{}) + err = s.bob.importHistoryArchives(community.ID(), cancel) + s.Require().NoError(err) + + // Ensure message1 wasn't imported, as it's encrypted, and we don't have access to the channel + receivedMessage1, err := s.bob.MessageByID(message1.ID) + s.Require().Nil(receivedMessage1) + s.Require().Error(err) + + chatID := []byte(chat.ID) + hashRatchetMessagesCount, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) + s.Require().NoError(err) + s.Require().Equal(1, hashRatchetMessagesCount) + + // Make bob satisfy channel criteria + waitOnChannelKeyToBeDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.bob.identity.PublicKey)] + return ok + }) + + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, channelPermission.TokenCriteria[0]) + + // force owner to reevaluate channel members + // in production it will happen automatically, by periodic check + err = s.owner.communitiesManager.ForceMembersReevaluation(community.ID()) + s.Require().NoError(err) + + err = <-waitOnChannelKeyToBeDistributedToBob + s.Require().NoError(err) + + // Finally ensure that the message from archive was retrieved and decrypted + + // NOTE: In theory a single RetrieveAll call should be enough, + // because we immediately process all hash ratchet messages + response, err = s.bob.RetrieveAll() + s.Require().NoError(err) + s.Require().Len(response.Messages(), 1) + + receivedMessage1, ok := response.messages[message1.ID] + log.Printf("Received message: %+v, ok: %v", receivedMessage1, ok) + s.Require().True(ok) + s.Require().Equal(messageText1, receivedMessage1.Text) +} + func (s *MessengerCommunitiesTokenPermissionsSuite) TestDeleteChannelWithTokenPermission() { // Setup community with two permitted channels community, firstChat := s.createCommunity() From 3bbe1c7f6fe4b29ab39886ead820aef751c60657 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 31 Oct 2025 14:28:55 +0100 Subject: [PATCH 04/75] draft of codex_config sql table with update node_config code --- .../sql/1761913234_add_codex_config.up.sql | 28 ++++++ nodecfg/node_config.go | 94 +++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 appdatabase/migrations/sql/1761913234_add_codex_config.up.sql diff --git a/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql b/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql new file mode 100644 index 00000000000..b3e6145f2eb --- /dev/null +++ b/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql @@ -0,0 +1,28 @@ +CREATE TABLE codex_config ( + enabled BOOLEAN DEFAULT false, + log_level TEXT DEFAULT 'info', + log_format TEXT DEFAULT 'auto', + metrics_enabled BOOLEAN DEFAULT false, + metrics_address TEXT DEFAULT '127.0.0.1', + metrics_port INTEGER DEFAULT 9090, + data_dir VARCHAR NOT NULL, + listen_addrs TEXT DEFAULT '["/ip4/0.0.0.0/tcp/0"]', + nat TEXT DEFAULT 'any', + disc_port INTEGER DEFAULT 8090, + net_privkey TEXT DEFAULT 'key', + bootstrap_nodes TEXT, + max_peers INTEGER DEFAULT 160, + num_threads INTEGER DEFAULT 0, + agent_string TEXT DEFAULT 'Codex', + repo_kind TEXT DEFAULT 'fs', + storage_quota INTEGER DEFAULT 21474836480, -- 20 GiB + block_ttl INTEGER DEFAULT 2592000, -- 30 days + block_maintenance_interval INTEGER DEFAULT 600, -- 10 min + block_maintenance_number_of_blocks INTEGER DEFAULT 1000, + block_retries INTEGER DEFAULT 3000, + cache_size INTEGER DEFAULT 0, + log_file TEXT DEFAULT '', + + synthetic_id VARCHAR DEFAULT 'id' PRIMARY KEY +) WITHOUT ROWID; + diff --git a/nodecfg/node_config.go b/nodecfg/node_config.go index 06c1d808bb1..317ec450257 100644 --- a/nodecfg/node_config.go +++ b/nodecfg/node_config.go @@ -3,6 +3,7 @@ package nodecfg import ( "context" "database/sql" + "encoding/json" "strings" "github.com/status-im/status-go/params" @@ -115,6 +116,50 @@ func insertTorrentConfig(tx *sql.Tx, c *params.NodeConfig) error { return err } +// Insert or update codex_config table +func insertCodexConfig(tx *sql.Tx, c *params.NodeConfig) error { + listenAddrsJSON, err := json.Marshal(c.CodexConfig.ListenAddrs) + if err != nil { + return err + } + bootstrapNodesJSON, err := json.Marshal(c.CodexConfig.BootstrapNodes) + if err != nil { + return err + } + _, err = tx.Exec(` + INSERT OR REPLACE INTO codex_config ( + enabled, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, + listen_addrs, nat, disc_port, net_privkey, bootstrap_nodes, max_peers, num_threads, agent_string, + repo_kind, storage_quota, block_ttl, block_maintenance_interval, block_maintenance_number_of_blocks, + block_retries, cache_size, log_file, synthetic_id + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`, + c.CodexConfig.Enabled, + c.CodexConfig.LogLevel, + c.CodexConfig.LogFormat, + c.CodexConfig.MetricsEnabled, + c.CodexConfig.MetricsAddress, + c.CodexConfig.MetricsPort, + c.CodexConfig.DataDir, + string(listenAddrsJSON), + c.CodexConfig.Nat, + c.CodexConfig.DiscoveryPort, + c.CodexConfig.NetPrivKeyFile, + string(bootstrapNodesJSON), + c.CodexConfig.MaxPeers, + c.CodexConfig.NumThreads, + c.CodexConfig.AgentString, + c.CodexConfig.RepoKind, + c.CodexConfig.StorageQuota, + c.CodexConfig.BlockTtl, + c.CodexConfig.BlockMaintenanceInterval, + c.CodexConfig.BlockMaintenanceNumberOfBlocks, + c.CodexConfig.BlockRetries, + c.CodexConfig.CacheSize, + c.CodexConfig.LogFile, + ) + return err +} + func insertWakuV2ConfigPreMigration(tx *sql.Tx, c *params.NodeConfig) error { _, err := tx.Exec(` INSERT OR REPLACE INTO wakuv2_config ( @@ -178,6 +223,7 @@ func nodeConfigNormalInserts() []insertFn { insertShhExtConfig, insertWakuV2ConfigPreMigration, insertTorrentConfig, + insertCodexConfig, insertWakuV2ConfigPostMigration, } } @@ -259,6 +305,54 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) { return nil, err } + // Load codex_config + var listenAddrsStr, bootstrapNodesStr string + err = tx.QueryRow(` + SELECT enabled, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, + listen_addrs, nat, disc_port, net_privkey, bootstrap_nodes, max_peers, num_threads, agent_string, + repo_kind, storage_quota, block_ttl, block_maintenance_interval, block_maintenance_number_of_blocks, + block_retries, cache_size, log_file + FROM codex_config WHERE synthetic_id = 'id' + `).Scan( + &nodecfg.CodexConfig.Enabled, + &nodecfg.CodexConfig.LogLevel, + &nodecfg.CodexConfig.LogFormat, + &nodecfg.CodexConfig.MetricsEnabled, + &nodecfg.CodexConfig.MetricsAddress, + &nodecfg.CodexConfig.MetricsPort, + &nodecfg.CodexConfig.DataDir, + &listenAddrsStr, + &nodecfg.CodexConfig.Nat, + &nodecfg.CodexConfig.DiscoveryPort, + &nodecfg.CodexConfig.NetPrivKeyFile, + &bootstrapNodesStr, + &nodecfg.CodexConfig.MaxPeers, + &nodecfg.CodexConfig.NumThreads, + &nodecfg.CodexConfig.AgentString, + &nodecfg.CodexConfig.RepoKind, + &nodecfg.CodexConfig.StorageQuota, + &nodecfg.CodexConfig.BlockTtl, + &nodecfg.CodexConfig.BlockMaintenanceInterval, + &nodecfg.CodexConfig.BlockMaintenanceNumberOfBlocks, + &nodecfg.CodexConfig.BlockRetries, + &nodecfg.CodexConfig.CacheSize, + &nodecfg.CodexConfig.LogFile, + ) + if err != nil && err != sql.ErrNoRows { + return nil, err + } + // Unmarshal JSON fields + if listenAddrsStr != "" { + if err := json.Unmarshal([]byte(listenAddrsStr), &nodecfg.CodexConfig.ListenAddrs); err != nil { + return nil, err + } + } + if bootstrapNodesStr != "" { + if err := json.Unmarshal([]byte(bootstrapNodesStr), &nodecfg.CodexConfig.BootstrapNodes); err != nil { + return nil, err + } + } + err = tx.QueryRow("SELECT enabled, log_dir, log_level, log_namespaces, file, max_backups, max_size, compress_rotated, log_to_stderr FROM log_config WHERE synthetic_id = 'id'").Scan( &nodecfg.LogEnabled, &nodecfg.LogDir, &nodecfg.LogLevel, &nodecfg.LogNamespaces, &nodecfg.LogFile, &nodecfg.LogMaxBackups, &nodecfg.LogMaxSize, &nodecfg.LogCompressRotated, &nodecfg.LogToStderr) if err != nil && err != sql.ErrNoRows { From 2d83cb5326d7b2b653d1a0342ca82e7aae52748a Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Sat, 1 Nov 2025 00:11:46 +0100 Subject: [PATCH 05/75] updates the way the distribution preference is handled --- ...add_archive_distribution_preference.up.sql | 2 +- protocol/communities/manager.go | 8 +++--- protocol/communities/persistence.go | 24 +++++++++-------- ...nities_messenger_token_permissions_test.go | 21 +++++++++++++++ protocol/messenger_communities.go | 27 +++++++++++++++++-- 5 files changed, 64 insertions(+), 18 deletions(-) diff --git a/appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql b/appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql index 067f4f4c47f..56fcc31075a 100644 --- a/appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql +++ b/appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql @@ -1 +1 @@ -ALTER TABLE communities_archive_info ADD COLUMN preferred_distribution_method TEXT DEFAULT 'torrent'; \ No newline at end of file +ALTER TABLE communities_archive_info ADD COLUMN preferred_distribution_method TEXT DEFAULT 'codex'; \ No newline at end of file diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index c514df3029f..28911ee9aa3 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -72,9 +72,9 @@ var validateInterval = 2 * time.Minute // Archive distribution preferences const ( - ArchiveDistributionMethodAuto = "auto" // Default: try both methods, prefer fastest - ArchiveDistributionMethodTorrent = "torrent" // Prefer BitTorrent/magnetlink only - ArchiveDistributionMethodCodex = "codex" // Prefer Codex/IndexCid only + ArchiveDistributionMethodUnknown = "unknown" + ArchiveDistributionMethodTorrent = "torrent" + ArchiveDistributionMethodCodex = "codex" ) // Used for testing only @@ -3757,7 +3757,7 @@ func (m *Manager) GetArchiveDistributionPreference(communityID types.HexBytes) ( func (m *Manager) SetArchiveDistributionPreference(communityID types.HexBytes, preference string) error { // Validate preference value switch preference { - case ArchiveDistributionMethodAuto, ArchiveDistributionMethodTorrent, ArchiveDistributionMethodCodex: + case ArchiveDistributionMethodTorrent, ArchiveDistributionMethodCodex: // Valid preference default: return errors.New("invalid archive distribution preference") diff --git a/protocol/communities/persistence.go b/protocol/communities/persistence.go index 93ba71cadbd..1c78f35a0e9 100644 --- a/protocol/communities/persistence.go +++ b/protocol/communities/persistence.go @@ -1021,11 +1021,13 @@ func (p *Persistence) GetMagnetlinkMessageClock(communityID types.HexBytes) (uin } func (p *Persistence) SaveCommunityArchiveInfo(communityID types.HexBytes, magnetLinkClock uint64, lastArchiveEndDate uint64, indexCidClock uint64) error { - _, err := p.db.Exec(`INSERT INTO communities_archive_info (magnetlink_clock, last_message_archive_end_date, community_id, index_cid_clock) VALUES (?, ?, ?, ?)`, + _, err := p.db.Exec(`INSERT INTO communities_archive_info (magnetlink_clock, last_message_archive_end_date, community_id, index_cid_clock, preferred_distribution_method) VALUES (?, ?, ?, ?, ?)`, magnetLinkClock, lastArchiveEndDate, communityID.String(), - indexCidClock) + indexCidClock, + ArchiveDistributionMethodUnknown, + ) return err } @@ -2230,15 +2232,15 @@ func (p *Persistence) UpdateAndPruneEncryptionKeyRequests(communityID types.HexB } func (p *Persistence) GetArchiveDistributionPreference(communityID types.HexBytes) (string, error) { - return "codex", nil - // var preference string - // err := p.db.QueryRow(`SELECT preferred_distribution_method FROM communities_archive_info WHERE community_id = ?`, communityID.String()).Scan(&preference) - // if err == sql.ErrNoRows { - // return "auto", nil // Default preference - // } else if err != nil { - // return "", err - // } - // return preference, nil + // return "codex", nil + var preference string + err := p.db.QueryRow(`SELECT preferred_distribution_method FROM communities_archive_info WHERE community_id = ?`, communityID.String()).Scan(&preference) + if err == sql.ErrNoRows { + return ArchiveDistributionMethodUnknown, nil + } else if err != nil { + return "", err + } + return preference, nil } func (p *Persistence) SetArchiveDistributionPreference(communityID types.HexBytes, preference string) error { diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index e2827f128f4..78c95304dc9 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -2134,6 +2134,13 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedArchiveMe // 1.1. Create community community, chat := s.createCommunity() + // createCommunity sets history archive distribution method to "codex" - we need torrent for this test + err := s.owner.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodTorrent) + s.Require().NoError(err) + + err = s.bob.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodTorrent) + s.Require().NoError(err) + // 1.2. Setup permissions communityPermission := &requests.CreateCommunityTokenPermission{ CommunityID: community.ID(), @@ -2372,6 +2379,12 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch // 1.1. Create community community, chat := s.createCommunity() + // for community owner, the history archive distribution preference is set to Codex when the community is created + archiveDistributionPreferenceOwner, err := s.owner.communitiesManager.GetArchiveDistributionPreference(community.ID()) + s.Require().NoError(err) + log.Println("Archive distribution preference for owner:", archiveDistributionPreferenceOwner) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceOwner) + // 1.2. Setup permissions communityPermission := &requests.CreateCommunityTokenPermission{ CommunityID: community.ID(), @@ -2506,6 +2519,14 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch s.joinCommunity(community, s.bob) + // when Bob requested to join, the history archive distribution preference should be set to Codex + archiveDistributionPreferenceBob, err := s.bob.communitiesManager.GetArchiveDistributionPreference(community.ID()) + s.Require().NoError(err) + + log.Println("Archive distribution preference for bob:", archiveDistributionPreferenceBob) + + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceBob) + err = <-waitForKeysDistributedToBob s.Require().NoError(err) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 289773d4a94..2897c12fa8b 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -1478,6 +1478,23 @@ func (m *Messenger) RequestToJoinCommunity(request *requests.RequestToJoinCommun Priority: &messagingtypes.HighPriority, } + // we want to use codex for archive distribution + // but if it is already set to something else (handy in testing), respect that + archiveDistributionPreference, err := m.communitiesManager.GetArchiveDistributionPreference(community.ID()) + if err != nil { + return nil, err + } + + m.logger.Debug("Archive distribution preference (RequestToJoin):", zap.String("preference", archiveDistributionPreference)) + + if archiveDistributionPreference == communities.ArchiveDistributionMethodUnknown { + // If the preference is unknown, we can set it to codex + err = m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) + if err != nil { + return nil, err + } + } + _, err = m.SendMessageToControlNode(community, rawMessage) if err != nil { return nil, err @@ -2478,6 +2495,12 @@ func (m *Messenger) CreateCommunity(request *requests.CreateCommunity, createDef return nil, err } + // we want to use codex for archive distribution + err = m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) + if err != nil { + return nil, err + } + communitySettings := communities.CommunitySettings{ CommunityID: community.IDString(), HistoryArchiveSupportEnabled: request.HistoryArchiveSupportEnabled, @@ -4048,8 +4071,8 @@ importMessageArchivesLoop: var archiveMessages []*protobuf.WakuMessage preference, err := m.communitiesManager.GetArchiveDistributionPreference(communityID) if err != nil { - m.logger.Warn("failed to get archive distribution preference, using torrent", zap.Error(err)) - preference = "torrent" + m.logger.Warn("failed to get archive distribution preference, using codex", zap.Error(err)) + preference = "codex" } if preference == "codex" { archiveMessages, err = m.archiveManager.ExtractMessagesFromCodexHistoryArchive(communityID, downloadedArchiveID) From 956fc0ce84a5deccc92c25b4c76bee2d09953f8e Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Sat, 1 Nov 2025 00:39:47 +0100 Subject: [PATCH 06/75] adds triggering downloading and importing archives from Codex after joining the community --- protocol/communities/manager.go | 1 + protocol/communities/manager_archive.go | 5 ++++ protocol/communities/manager_archive_nop.go | 4 +++ protocol/messenger_communities.go | 9 +++++++ protocol/messenger_handler.go | 29 +++++++++++++++++++++ protocol/protobuf/communities.proto | 1 + 6 files changed, 49 insertions(+) diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index 28911ee9aa3..b62d8d1bb24 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -233,6 +233,7 @@ type ArchiveService interface { DownloadHistoryArchivesByMagnetlink(communityID types.HexBytes, magnetlink string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) DownloadHistoryArchivesByIndexCid(communityID types.HexBytes, indexCid string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) TorrentFileExists(communityID string) bool + CodexIndexCidFileExists(communityID types.HexBytes) bool } type ArchiveManagerConfig struct { diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 3b3fe6c7201..db5a4587900 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -953,6 +953,11 @@ func (m *ArchiveManager) TorrentFileExists(communityID string) bool { return err == nil } +func (m *ArchiveManager) CodexIndexCidFileExists(communityID types.HexBytes) bool { + _, err := os.Stat(m.codexArchiveIndexCidFilePath(communityID)) + return err == nil +} + func topicsAsByteArrays(topics []messagingtypes.ContentTopic) [][]byte { var topicsAsByteArrays [][]byte for _, t := range topics { diff --git a/protocol/communities/manager_archive_nop.go b/protocol/communities/manager_archive_nop.go index 4c96575c987..13775e70146 100644 --- a/protocol/communities/manager_archive_nop.go +++ b/protocol/communities/manager_archive_nop.go @@ -104,3 +104,7 @@ func (tmm *ArchiveManagerNop) DownloadHistoryArchivesByIndexCid(communityID type func (tmm *ArchiveManagerNop) TorrentFileExists(communityID string) bool { return false } + +func (tmm *ArchiveManagerNop) CodexIndexCidFileExists(communityID types.HexBytes) bool { + return false +} diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 2897c12fa8b..bd2ea1191c7 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -2016,6 +2016,15 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ requestToJoinResponseProto.MagnetUri = magnetlink } + if m.archiveManager.IsReady() && m.archiveManager.CodexIndexCidFileExists(community.ID()) { + cid, err := m.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + if err != nil { + m.logger.Warn("couldn't get codex index cid for community", zap.Error(err)) + return nil, err + } + requestToJoinResponseProto.IndexCid = cid + } + payload, err := proto.Marshal(requestToJoinResponseProto) if err != nil { return nil, err diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index 1eb7d073f4d..75791265554 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -1855,6 +1855,35 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS m.downloadAndImportHistoryArchives(community.ID(), magnetlink, task.CancelChan) }(currentTask) } + + cid := requestToJoinResponseProto.IndexCid + if m.archiveManager.IsReady() && communitySettings != nil && communitySettings.HistoryArchiveSupportEnabled && cid != "" { + + currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(community.IDString()) + go func(currentTask *communities.HistoryArchiveDownloadTask) { + defer gocommon.LogOnPanic() + // Cancel ongoing download/import task + if currentTask != nil && !currentTask.IsCancelled() { + currentTask.Cancel() + currentTask.Waiter.Wait() + } + + task := &communities.HistoryArchiveDownloadTask{ + CancelChan: make(chan struct{}), + Waiter: *new(sync.WaitGroup), + Cancelled: false, + } + m.archiveManager.AddHistoryArchiveDownloadTask(community.IDString(), task) + + task.Waiter.Add(1) + defer task.Waiter.Done() + + m.shutdownWaitGroup.Add(1) + defer m.shutdownWaitGroup.Done() + + m.downloadAndImportCodexHistoryArchives(community.ID(), cid, task.CancelChan) + }(currentTask) + } } return nil diff --git a/protocol/protobuf/communities.proto b/protocol/protobuf/communities.proto index 23b14250d4c..ad891c4e868 100644 --- a/protocol/protobuf/communities.proto +++ b/protocol/protobuf/communities.proto @@ -210,6 +210,7 @@ message CommunityRequestToJoinResponse { Shard shard = 8; // CommunityDescription protocol message with owner signature bytes community_description_protocol_message = 9; + string index_cid = 10; } message CommunityRequestToLeave { From 2a034ef7d2a227057ac229da59ec2221fc675c79 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Sat, 1 Nov 2025 01:41:15 +0100 Subject: [PATCH 07/75] linting --- protocol/communities/codex_client.go | 4 +++- protocol/communities/manager_test.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/protocol/communities/codex_client.go b/protocol/communities/codex_client.go index 637702b2907..04f47d9eb89 100644 --- a/protocol/communities/codex_client.go +++ b/protocol/communities/codex_client.go @@ -103,7 +103,9 @@ func (c *CodexClient) HasCid(cid string) (bool, error) { if err := c.node.DownloadInit(cid, codex.DownloadInitOptions{Local: true}); err != nil { return false, nil } - defer c.node.DownloadCancel(cid) + defer func() { + _ = c.node.DownloadCancel(cid) + }() _, err := c.node.DownloadChunk(cid) return err == nil, nil diff --git a/protocol/communities/manager_test.go b/protocol/communities/manager_test.go index 7d17f81eec8..5b51e1a0be8 100644 --- a/protocol/communities/manager_test.go +++ b/protocol/communities/manager_test.go @@ -577,7 +577,7 @@ func (s *ManagerSuite) TestStartTorrentClient_DelayedUntilOnline() { s.Require().False(s.archiveManager.torrentClientStarted()) s.T().Cleanup(func() { - s.archiveManager.Stop() + _ = s.archiveManager.Stop() }) s.archiveManager.SetOnline(true) From c59a3daa29b1be947e379a2612bb8ce49ec2b14d Mon Sep 17 00:00:00 2001 From: Arnaud Date: Sun, 2 Nov 2025 06:34:30 +0100 Subject: [PATCH 08/75] Add API to update the message archive interval --- protocol/messenger_communities.go | 9 +++++++++ services/ext/api.go | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index bd2ea1191c7..e2fee8e33b8 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -4301,6 +4301,15 @@ func (m *Messenger) DisableCommunityHistoryArchiveProtocol() error { return nil } +func (m *Messenger) UpdateMessageArchiveInterval(duration time.Duration) error { + messageArchiveInterval = duration + return nil +} + +func (m *Messenger) GetMessageArchiveInterval() (time.Duration, error) { + return messageArchiveInterval, nil +} + func (m *Messenger) GetCommunitiesSettings() ([]communities.CommunitySettings, error) { settings, err := m.communitiesManager.GetCommunitiesSettings() if err != nil { diff --git a/services/ext/api.go b/services/ext/api.go index 84a728de605..f39732dcd6a 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -1176,6 +1176,14 @@ func (api *PublicAPI) DisableCommunityHistoryArchiveProtocol() error { return api.service.messenger.DisableCommunityHistoryArchiveProtocol() } +func (api *PublicAPI) GetMessageArchiveInterval() (time.Duration, error) { + return api.service.messenger.GetMessageArchiveInterval() +} + +func (api *PublicAPI) UpdateMessageArchiveInterval(duration time.Duration) error { + return api.service.messenger.UpdateMessageArchiveInterval(duration) +} + func (api *PublicAPI) SubscribeToPubsubTopic(topic string, optPublicKey string) error { var publicKey *ecdsa.PublicKey if optPublicKey != "" { From 168f78ba5a902bf04c21b217d45b760af5f91222 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 05:36:32 +0100 Subject: [PATCH 09/75] Extend test to check CodexConfig and ensure it is loaded properly --- appdatabase/node_config_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/appdatabase/node_config_test.go b/appdatabase/node_config_test.go index ac8920b14fc..4157f5795dc 100644 --- a/appdatabase/node_config_test.go +++ b/appdatabase/node_config_test.go @@ -9,6 +9,7 @@ import ( "sort" "testing" + "github.com/codex-storage/codex-go-bindings/codex" "github.com/stretchr/testify/require" "github.com/status-im/status-go/nodecfg" @@ -55,6 +56,15 @@ func randomNodeConfig() *params.NodeConfig { WakuV2Config: params.WakuV2Config{ LightClient: randomBool(), }, + CodexConfig: params.CodexConfig{ + Enabled: randomBool(), + Config: codex.Config{ + DataDir: randomString(), + DiscoveryPort: randomInt(65535), + BlockRetries: randomInt(10), + LogLevel: randomString(), + }, + }, } } @@ -66,6 +76,7 @@ func TestGetNodeConfig(t *testing.T) { dbNodeConfig, err := nodecfg.GetNodeConfigFromDB(db) require.NoError(t, err) + require.Equal(t, nodeConfig, dbNodeConfig) } From ac3c7358592e4fdd5925fc3b4c39dad895f8ee30 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 06:26:00 +0100 Subject: [PATCH 10/75] Add missing local property --- protocol/communities/codex_client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocol/communities/codex_client.go b/protocol/communities/codex_client.go index 04f47d9eb89..928db1efd87 100644 --- a/protocol/communities/codex_client.go +++ b/protocol/communities/codex_client.go @@ -128,7 +128,7 @@ func (c *CodexClient) LocalDownload(cid string, output io.Writer) error { } func (c *CodexClient) LocalDownloadWithContext(ctx context.Context, cid string, output io.Writer) error { - return c.node.DownloadStream(ctx, cid, codex.DownloadStreamOptions{Writer: output}) + return c.node.DownloadStream(ctx, cid, codex.DownloadStreamOptions{Writer: output, Local: true}) } func (c *CodexClient) FetchManifestWithContext(ctx context.Context, cid string) (codex.Manifest, error) { From 02f8f2a0c7b995c0ba092df9373969a9de4740cf Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 03:59:15 +0100 Subject: [PATCH 11/75] separates libcodex datadir from history archive data dir --- api/defaults.go | 7 +- .../sql/1761913234_add_codex_config.up.sql | 1 + nodecfg/node_config.go | 96 ++++++++++--------- params/config.go | 12 ++- params/config_test.go | 13 ++- ...dex_archive_downloader_integration_test.go | 65 ++++++++----- protocol/communities/codex_client.go | 14 ++- protocol/communities/codex_client_test.go | 4 +- ...estutil_test.go => codex_testutil_test.go} | 9 +- protocol/communities/manager_archive.go | 7 +- protocol/communities/manager_archive_file.go | 29 +++--- protocol/communities/manager_test.go | 11 ++- ...nities_messenger_token_permissions_test.go | 55 +++++------ 13 files changed, 172 insertions(+), 151 deletions(-) rename protocol/communities/{communities_testutil_test.go => codex_testutil_test.go} (76%) diff --git a/api/defaults.go b/api/defaults.go index 47bb74f2729..c2c08ee528e 100644 --- a/api/defaults.go +++ b/api/defaults.go @@ -350,9 +350,10 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc } nodeConfig.CodexConfig = params.CodexConfig{ - Enabled: false, - Config: codex.Config{ - DataDir: filepath.Join(nodeConfig.RootDataDir, params.ArchivesRelativePath), + Enabled: false, + HistoryArchiveDataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "archivedata"), + CodexNodeConfig: codex.Config{ + DataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "codexdata"), BlockRetries: params.BlockRetries, MetricsEnabled: false, }, diff --git a/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql b/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql index b3e6145f2eb..2f43637ed0f 100644 --- a/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql +++ b/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql @@ -1,5 +1,6 @@ CREATE TABLE codex_config ( enabled BOOLEAN DEFAULT false, + history_archive_data_dir VARCHAR NOT NULL, log_level TEXT DEFAULT 'info', log_format TEXT DEFAULT 'auto', metrics_enabled BOOLEAN DEFAULT false, diff --git a/nodecfg/node_config.go b/nodecfg/node_config.go index 317ec450257..b7cff27ad08 100644 --- a/nodecfg/node_config.go +++ b/nodecfg/node_config.go @@ -118,44 +118,45 @@ func insertTorrentConfig(tx *sql.Tx, c *params.NodeConfig) error { // Insert or update codex_config table func insertCodexConfig(tx *sql.Tx, c *params.NodeConfig) error { - listenAddrsJSON, err := json.Marshal(c.CodexConfig.ListenAddrs) + listenAddrsJSON, err := json.Marshal(c.CodexConfig.CodexNodeConfig.ListenAddrs) if err != nil { return err } - bootstrapNodesJSON, err := json.Marshal(c.CodexConfig.BootstrapNodes) + bootstrapNodesJSON, err := json.Marshal(c.CodexConfig.CodexNodeConfig.BootstrapNodes) if err != nil { return err } _, err = tx.Exec(` INSERT OR REPLACE INTO codex_config ( - enabled, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, + enabled, history_archive_data_dir, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, listen_addrs, nat, disc_port, net_privkey, bootstrap_nodes, max_peers, num_threads, agent_string, repo_kind, storage_quota, block_ttl, block_maintenance_interval, block_maintenance_number_of_blocks, block_retries, cache_size, log_file, synthetic_id - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`, + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`, c.CodexConfig.Enabled, - c.CodexConfig.LogLevel, - c.CodexConfig.LogFormat, - c.CodexConfig.MetricsEnabled, - c.CodexConfig.MetricsAddress, - c.CodexConfig.MetricsPort, - c.CodexConfig.DataDir, + c.CodexConfig.HistoryArchiveDataDir, + c.CodexConfig.CodexNodeConfig.LogLevel, + c.CodexConfig.CodexNodeConfig.LogFormat, + c.CodexConfig.CodexNodeConfig.MetricsEnabled, + c.CodexConfig.CodexNodeConfig.MetricsAddress, + c.CodexConfig.CodexNodeConfig.MetricsPort, + c.CodexConfig.CodexNodeConfig.DataDir, string(listenAddrsJSON), - c.CodexConfig.Nat, - c.CodexConfig.DiscoveryPort, - c.CodexConfig.NetPrivKeyFile, + c.CodexConfig.CodexNodeConfig.Nat, + c.CodexConfig.CodexNodeConfig.DiscoveryPort, + c.CodexConfig.CodexNodeConfig.NetPrivKeyFile, string(bootstrapNodesJSON), - c.CodexConfig.MaxPeers, - c.CodexConfig.NumThreads, - c.CodexConfig.AgentString, - c.CodexConfig.RepoKind, - c.CodexConfig.StorageQuota, - c.CodexConfig.BlockTtl, - c.CodexConfig.BlockMaintenanceInterval, - c.CodexConfig.BlockMaintenanceNumberOfBlocks, - c.CodexConfig.BlockRetries, - c.CodexConfig.CacheSize, - c.CodexConfig.LogFile, + c.CodexConfig.CodexNodeConfig.MaxPeers, + c.CodexConfig.CodexNodeConfig.NumThreads, + c.CodexConfig.CodexNodeConfig.AgentString, + c.CodexConfig.CodexNodeConfig.RepoKind, + c.CodexConfig.CodexNodeConfig.StorageQuota, + c.CodexConfig.CodexNodeConfig.BlockTtl, + c.CodexConfig.CodexNodeConfig.BlockMaintenanceInterval, + c.CodexConfig.CodexNodeConfig.BlockMaintenanceNumberOfBlocks, + c.CodexConfig.CodexNodeConfig.BlockRetries, + c.CodexConfig.CodexNodeConfig.CacheSize, + c.CodexConfig.CodexNodeConfig.LogFile, ) return err } @@ -308,47 +309,48 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) { // Load codex_config var listenAddrsStr, bootstrapNodesStr string err = tx.QueryRow(` - SELECT enabled, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, + SELECT enabled, history_archive_data_dir, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, listen_addrs, nat, disc_port, net_privkey, bootstrap_nodes, max_peers, num_threads, agent_string, repo_kind, storage_quota, block_ttl, block_maintenance_interval, block_maintenance_number_of_blocks, block_retries, cache_size, log_file FROM codex_config WHERE synthetic_id = 'id' `).Scan( &nodecfg.CodexConfig.Enabled, - &nodecfg.CodexConfig.LogLevel, - &nodecfg.CodexConfig.LogFormat, - &nodecfg.CodexConfig.MetricsEnabled, - &nodecfg.CodexConfig.MetricsAddress, - &nodecfg.CodexConfig.MetricsPort, - &nodecfg.CodexConfig.DataDir, + &nodecfg.CodexConfig.HistoryArchiveDataDir, + &nodecfg.CodexConfig.CodexNodeConfig.LogLevel, + &nodecfg.CodexConfig.CodexNodeConfig.LogFormat, + &nodecfg.CodexConfig.CodexNodeConfig.MetricsEnabled, + &nodecfg.CodexConfig.CodexNodeConfig.MetricsAddress, + &nodecfg.CodexConfig.CodexNodeConfig.MetricsPort, + &nodecfg.CodexConfig.CodexNodeConfig.DataDir, &listenAddrsStr, - &nodecfg.CodexConfig.Nat, - &nodecfg.CodexConfig.DiscoveryPort, - &nodecfg.CodexConfig.NetPrivKeyFile, + &nodecfg.CodexConfig.CodexNodeConfig.Nat, + &nodecfg.CodexConfig.CodexNodeConfig.DiscoveryPort, + &nodecfg.CodexConfig.CodexNodeConfig.NetPrivKeyFile, &bootstrapNodesStr, - &nodecfg.CodexConfig.MaxPeers, - &nodecfg.CodexConfig.NumThreads, - &nodecfg.CodexConfig.AgentString, - &nodecfg.CodexConfig.RepoKind, - &nodecfg.CodexConfig.StorageQuota, - &nodecfg.CodexConfig.BlockTtl, - &nodecfg.CodexConfig.BlockMaintenanceInterval, - &nodecfg.CodexConfig.BlockMaintenanceNumberOfBlocks, - &nodecfg.CodexConfig.BlockRetries, - &nodecfg.CodexConfig.CacheSize, - &nodecfg.CodexConfig.LogFile, + &nodecfg.CodexConfig.CodexNodeConfig.MaxPeers, + &nodecfg.CodexConfig.CodexNodeConfig.NumThreads, + &nodecfg.CodexConfig.CodexNodeConfig.AgentString, + &nodecfg.CodexConfig.CodexNodeConfig.RepoKind, + &nodecfg.CodexConfig.CodexNodeConfig.StorageQuota, + &nodecfg.CodexConfig.CodexNodeConfig.BlockTtl, + &nodecfg.CodexConfig.CodexNodeConfig.BlockMaintenanceInterval, + &nodecfg.CodexConfig.CodexNodeConfig.BlockMaintenanceNumberOfBlocks, + &nodecfg.CodexConfig.CodexNodeConfig.BlockRetries, + &nodecfg.CodexConfig.CodexNodeConfig.CacheSize, + &nodecfg.CodexConfig.CodexNodeConfig.LogFile, ) if err != nil && err != sql.ErrNoRows { return nil, err } // Unmarshal JSON fields if listenAddrsStr != "" { - if err := json.Unmarshal([]byte(listenAddrsStr), &nodecfg.CodexConfig.ListenAddrs); err != nil { + if err := json.Unmarshal([]byte(listenAddrsStr), &nodecfg.CodexConfig.CodexNodeConfig.ListenAddrs); err != nil { return nil, err } } if bootstrapNodesStr != "" { - if err := json.Unmarshal([]byte(bootstrapNodesStr), &nodecfg.CodexConfig.BootstrapNodes); err != nil { + if err := json.Unmarshal([]byte(bootstrapNodesStr), &nodecfg.CodexConfig.CodexNodeConfig.BootstrapNodes); err != nil { return nil, err } } diff --git a/params/config.go b/params/config.go index 2e3eb0bd969..126eaa2193d 100644 --- a/params/config.go +++ b/params/config.go @@ -325,8 +325,9 @@ type TorrentConfig struct { } type CodexConfig struct { - Enabled bool - codex.Config + Enabled bool + HistoryArchiveDataDir string + CodexNodeConfig codex.Config } // Validate validates the ShhextConfig struct and returns an error if inconsistent values are found @@ -403,10 +404,11 @@ func NewNodeConfig(dataDir string, networkID uint64) (*NodeConfig, error) { TorrentDir: dataDir + "/torrents", }, CodexConfig: CodexConfig{ - Enabled: false, - Config: codex.Config{ + Enabled: false, + HistoryArchiveDataDir: dataDir + "/codex/history-archive-data", + CodexNodeConfig: codex.Config{ BlockRetries: 50, - DataDir: dataDir + "/codexdata", + DataDir: dataDir + "/codex/codex-data", MetricsEnabled: false, }, }, diff --git a/params/config_test.go b/params/config_test.go index 88525c0368f..06492300552 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "path" + "path/filepath" "testing" "gopkg.in/go-playground/validator.v9" @@ -31,8 +32,11 @@ func TestNewConfigFromJSON(t *testing.T) { }, "CodexConfig": { "Enabled": false, - "data-dir": "` + tmpDir + `/codexdata", - "block-retries": 5 + "HistoryArchiveDataDir": "` + tmpDir + `/codex/archivedata", + "CodexNodeConfig": { + "data-dir": "` + tmpDir + `/codex/codexdata", + "block-retries": 5 + } }, "RuntimeLogLevel": "DEBUG" }` @@ -45,8 +49,9 @@ func TestNewConfigFromJSON(t *testing.T) { require.Equal(t, tmpDir+"/archivedata", c.TorrentConfig.DataDir) require.Equal(t, tmpDir+"/torrents", c.TorrentConfig.TorrentDir) require.Equal(t, "DEBUG", c.RuntimeLogLevel) - require.Equal(t, tmpDir+"/codexdata", c.CodexConfig.DataDir) - require.Equal(t, 5, c.CodexConfig.BlockRetries) + require.Equal(t, filepath.Join(tmpDir, "codex", "archivedata"), c.CodexConfig.HistoryArchiveDataDir) + require.Equal(t, filepath.Join(tmpDir, "codex", "codexdata"), c.CodexConfig.CodexNodeConfig.DataDir) + require.Equal(t, 5, c.CodexConfig.CodexNodeConfig.BlockRetries) } // TestNodeConfigValidate checks validation of individual fields. diff --git a/protocol/communities/codex_archive_downloader_integration_test.go b/protocol/communities/codex_archive_downloader_integration_test.go index 2903ca77103..7ab5119405b 100644 --- a/protocol/communities/codex_archive_downloader_integration_test.go +++ b/protocol/communities/codex_archive_downloader_integration_test.go @@ -8,6 +8,7 @@ import ( "context" "crypto/rand" "encoding/hex" + "sync" "testing" "time" @@ -24,21 +25,20 @@ import ( // against a real Codex instance type CodexArchiveDownloaderIntegrationSuite struct { suite.Suite + client communities.CodexClient uploadedCIDs []string // Track uploaded CIDs for cleanup } // SetupSuite runs once before all tests in the suite -func (suite *CodexArchiveDownloaderIntegrationSuite) SetupSuite() { - // Nothing to do +func (suite *CodexArchiveDownloaderIntegrationSuite) SetupTest() { + suite.client = NewCodexClientTest(suite.T()) } -// TearDownSuite runs once after all tests in the suite -func (suite *CodexArchiveDownloaderIntegrationSuite) TearDownSuite() { - client := NewCodexClientTest(suite.T()) - +// TearDownSuite runs once after each test in the suite +func (suite *CodexArchiveDownloaderIntegrationSuite) TearDownTest() { // Clean up all uploaded CIDs for _, cid := range suite.uploadedCIDs { - if err := client.RemoveCid(cid); err != nil { + if err := suite.client.RemoveCid(cid); err != nil { suite.T().Logf("Warning: Failed to remove CID %s: %v", cid, err) } else { suite.T().Logf("Successfully removed CID: %s", cid) @@ -47,7 +47,7 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TearDownSuite() { } func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWorkflow() { - client := NewCodexClientTest(suite.T()) + // client := NewCodexClientTest(suite.T()) // Step 1: Create test archive data and upload multiple archives to Codex archives := []struct { @@ -73,7 +73,7 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork // Upload all archives to Codex for _, archive := range archives { - cid, err := client.Upload(bytes.NewReader(archive.data), archive.hash+".bin") + cid, err := suite.client.Upload(bytes.NewReader(archive.data), archive.hash+".bin") require.NoError(suite.T(), err, "Failed to upload %s", archive.hash) archiveCIDs[archive.hash] = cid @@ -81,7 +81,7 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork suite.T().Logf("Uploaded %s to CID: %s", archive.hash, cid) // Verify upload succeeded - exists, err := client.HasCid(cid) + exists, err := suite.client.HasCid(cid) require.NoError(suite.T(), err, "Failed to check CID existence for %s", archive.hash) require.True(suite.T(), exists, "CID %s should exist after upload", cid) } @@ -109,7 +109,7 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork logger, _ := zap.NewDevelopment() // Use development logger for integration tests downloader := communities.NewCodexArchiveDownloader( - &client, + &suite.client, index, communityID, existingArchiveIDs, @@ -122,16 +122,21 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork downloader.SetPollingTimeout(30 * time.Second) // Generous timeout for real network // Step 4: Set up callbacks to track progress + var trackerMu sync.Mutex startedArchives := make(map[string]bool) completedArchives := make(map[string]bool) downloader.SetOnStartingArchiveDownload(func(hash string, from, to uint64) { + trackerMu.Lock() startedArchives[hash] = true + trackerMu.Unlock() suite.T().Logf("🚀 Started downloading archive: %s (from %d to %d)", hash, from, to) }) downloader.SetOnArchiveDownloaded(func(hash string, from, to uint64) { + trackerMu.Lock() completedArchives[hash] = true + trackerMu.Unlock() suite.T().Logf("✅ Completed downloading archive: %s (from %d to %d)", hash, from, to) }) @@ -162,6 +167,7 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork assert.Equal(suite.T(), 0, downloader.GetPendingArchivesCount(), "Should have no pending archives") // Verify all archives were processed + trackerMu.Lock() assert.Len(suite.T(), startedArchives, 3, "Should have started 3 archives") assert.Len(suite.T(), completedArchives, 3, "Should have completed 3 archives") @@ -169,6 +175,7 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork assert.Contains(suite.T(), startedArchives, archive.hash, "Should have started %s", archive.hash) assert.Contains(suite.T(), completedArchives, archive.hash, "Should have completed %s", archive.hash) } + trackerMu.Unlock() // Step 9: Verify we can download the actual archive content using LocalDownloadWithContext suite.T().Log("🔍 Verifying archive content can be downloaded...") @@ -176,6 +183,8 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork for completedHash := range completedArchives { cid := archiveCIDs[completedHash] + suite.T().Logf("Downloading and verifying content for archive: %s (CID: %s)", completedHash, cid) + // Find the original archive data for comparison var originalData []byte for _, archive := range archives { @@ -185,22 +194,32 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork } } - // Create context with timeout for download - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + require.Eventually(suite.T(), func() bool { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() - var downloadBuf bytes.Buffer - err := client.LocalDownloadWithContext(ctx, cid, &downloadBuf) - cancel() + var downloadBuf bytes.Buffer + err := suite.client.LocalDownloadWithContext(ctx, cid, &downloadBuf) + if err != nil { + suite.T().Logf("LocalDownloadWithContext error for %s: %v", completedHash, err) + return false + } - require.NoError(suite.T(), err, "LocalDownload should succeed for %s", completedHash) + downloadedData := downloadBuf.Bytes() + if len(downloadedData) != len(originalData) { + suite.T().Logf("Downloaded %d bytes for %s (expected %d), retrying...", + len(downloadedData), completedHash, len(originalData)) + return false + } - downloadedData := downloadBuf.Bytes() - assert.Equal(suite.T(), len(originalData), len(downloadedData), - "Downloaded data length should match for %s", completedHash) - assert.True(suite.T(), bytes.Equal(originalData, downloadedData), - "Downloaded data should match original for %s", completedHash) + if !bytes.Equal(originalData, downloadedData) { + suite.T().Logf("Downloaded data mismatch for %s, retrying...", completedHash) + return false + } - suite.T().Logf("✅ Verified content for %s: %d bytes match", completedHash, len(downloadedData)) + suite.T().Logf("✅ Verified content for %s: %d bytes match", completedHash, len(downloadedData)) + return true + }, 30*time.Second, time.Second, "Local download should eventually match original for %s", completedHash) } suite.T().Log("🎉 Full archive download workflow completed successfully!") diff --git a/protocol/communities/codex_client.go b/protocol/communities/codex_client.go index 928db1efd87..65278e15ba2 100644 --- a/protocol/communities/codex_client.go +++ b/protocol/communities/codex_client.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "io" - "log" "github.com/codex-storage/codex-go-bindings/codex" @@ -23,13 +22,13 @@ type CodexClient struct { // NewCodexClient creates a new Codex client func NewCodexClient(config params.CodexConfig) (CodexClient, error) { - node, err := codex.New(config.Config) + node, err := codex.New(config.CodexNodeConfig) if err != nil { return CodexClient{}, err } return CodexClient{ - config: config.Config, + config: config.CodexNodeConfig, node: node, enabled: config.Enabled, }, nil @@ -48,11 +47,9 @@ func (c *CodexClient) Start() error { } func (c *CodexClient) Stop() error { - log.Println("AAAAAAAAAAAAAAA!!!!!!!!!!!!!!!!!!!!!!!!!!!!") if c.stopped { return nil } - log.Println("Stopping Codex client...!!!!!!!!!!!!!!!!!!!!!!!!!!!!") err := c.node.Stop() if err != nil { return err @@ -62,11 +59,9 @@ func (c *CodexClient) Stop() error { } func (c *CodexClient) Destroy() error { - log.Println("BBBBBBBBBBBBBBBB????????????????????????????????") if c.destroyed { return nil } - log.Println("Destroy Destroy Destroy ???????????????????????") err := c.node.Destroy() if err != nil { return err @@ -128,7 +123,10 @@ func (c *CodexClient) LocalDownload(cid string, output io.Writer) error { } func (c *CodexClient) LocalDownloadWithContext(ctx context.Context, cid string, output io.Writer) error { - return c.node.DownloadStream(ctx, cid, codex.DownloadStreamOptions{Writer: output, Local: true}) + return c.node.DownloadStream(ctx, cid, codex.DownloadStreamOptions{ + Writer: output, + Local: true, + }) } func (c *CodexClient) FetchManifestWithContext(ctx context.Context, cid string) (codex.Manifest, error) { diff --git a/protocol/communities/codex_client_test.go b/protocol/communities/codex_client_test.go index 148a6a6f286..88c6f0f0816 100644 --- a/protocol/communities/codex_client_test.go +++ b/protocol/communities/codex_client_test.go @@ -75,7 +75,7 @@ func (suite *CodexClientTestSuite) TestDownload_Success() { func (suite *CodexClientTestSuite) TestDownloadWithContext_Cancel() { // skip test - suite.T().Skip("Wait for cancellation support PR to be merged in codex-go-bindings") + // suite.T().Skip("Wait for cancellation support PR to be merged in codex-go-bindings") client := NewCodexClientTest(suite.T()) @@ -203,8 +203,6 @@ func (suite *CodexClientTestSuite) TestLocalDownloadWithContext_Success() { } func (suite *CodexClientTestSuite) TestLocalDownloadWithContext_Cancellation() { - suite.T().Skip("Wait for cancellation support PR to be merged in codex-go-bindings") - client := NewCodexClientTest(suite.T()) // Create a context with a very short timeout diff --git a/protocol/communities/communities_testutil_test.go b/protocol/communities/codex_testutil_test.go similarity index 76% rename from protocol/communities/communities_testutil_test.go rename to protocol/communities/codex_testutil_test.go index 0131a570ef2..fa2cb2f633e 100644 --- a/protocol/communities/communities_testutil_test.go +++ b/protocol/communities/codex_testutil_test.go @@ -1,6 +1,7 @@ package communities_test import ( + "path/filepath" "testing" "github.com/codex-storage/codex-go-bindings/codex" @@ -11,12 +12,14 @@ import ( func NewCodexClientTest(t *testing.T) communities.CodexClient { client, err := communities.NewCodexClient(params.CodexConfig{ - Enabled: true, - Config: codex.Config{ - DataDir: t.TempDir(), + Enabled: true, + HistoryArchiveDataDir: filepath.Join(t.TempDir(), "codex", "archivedata"), + CodexNodeConfig: codex.Config{ + DataDir: filepath.Join(t.TempDir(), "codex", "codexdata"), LogFormat: codex.LogFormatNoColors, MetricsEnabled: false, LogLevel: "ERROR", + Nat: "none", }, }) if err != nil { diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index db5a4587900..f35e7523b68 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -214,9 +214,6 @@ func (m *ArchiveManager) StartTorrentClient() error { } func (m *ArchiveManager) StartCodexClient() error { - - m.logger.Info("======================Starting codex client=============================") - if m.codexConfig == nil { return fmt.Errorf("can't start codex client: missing codexConfig") } @@ -782,7 +779,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex indexDownloaderCancel := make(chan struct{}) // Create index downloader with path to index file using helper function - indexFilePath := m.codexArchiveIndexFilePath(communityID) + indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) indexDownloader := NewCodexIndexDownloader(m.codexClient, indexCid, indexFilePath, indexDownloaderCancel, m.logger) m.logger.Debug("fetching history index from Codex", zap.String("indexCid", indexCid)) @@ -954,7 +951,7 @@ func (m *ArchiveManager) TorrentFileExists(communityID string) bool { } func (m *ArchiveManager) CodexIndexCidFileExists(communityID types.HexBytes) bool { - _, err := os.Stat(m.codexArchiveIndexCidFilePath(communityID)) + _, err := os.Stat(m.codexHistoryArchiveIndexCidFilePath(communityID)) return err == nil } diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index 6f06b9323fa..e95ff06420e 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -14,6 +14,7 @@ import ( "crypto/ecdsa" "os" "path" + "path/filepath" "time" "github.com/status-im/status-go/crypto" @@ -343,8 +344,8 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte to = endDate } - codexArchiveDir := m.codexArchiveDirPath(communityID) - codexIndexPath := m.codexArchiveIndexFilePath(communityID) + codexArchiveDir := m.codexHistoryArchiveDataDirPath(communityID) + codexIndexPath := m.codexHistoryArchiveIndexFilePath(communityID) m.logger.Debug("codexArchiveDir", zap.String("codexArchiveDir", codexArchiveDir)) @@ -558,40 +559,40 @@ func (m *ArchiveFileManager) archiveIndexFile(communityID string) string { return path.Join(m.torrentConfig.DataDir, communityID, "index") } -func (m *ArchiveFileManager) codexArchiveDirPath(communityID types.HexBytes) string { - return path.Join(m.codexConfig.DataDir, communityID.String()) +func (m *ArchiveFileManager) codexHistoryArchiveDataDirPath(communityID types.HexBytes) string { + return filepath.Join(m.codexConfig.HistoryArchiveDataDir, communityID.String()) } -func (m *ArchiveFileManager) codexArchiveIndexFilePath(communityID types.HexBytes) string { - return path.Join(m.codexConfig.DataDir, communityID.String(), "index") +func (m *ArchiveFileManager) codexHistoryArchiveIndexFilePath(communityID types.HexBytes) string { + return filepath.Join(m.codexConfig.HistoryArchiveDataDir, communityID.String(), "index") } -func (m *ArchiveFileManager) codexArchiveIndexCidFilePath(communityID types.HexBytes) string { - return path.Join(m.codexConfig.DataDir, communityID.String(), "index-cid") +func (m *ArchiveFileManager) codexHistoryArchiveIndexCidFilePath(communityID types.HexBytes) string { + return filepath.Join(m.codexConfig.HistoryArchiveDataDir, communityID.String(), "index-cid") } func (m *ArchiveFileManager) writeCodexIndexToFile(communityID types.HexBytes, bytes []byte) error { - indexFilePath := m.codexArchiveIndexFilePath(communityID) + indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) return os.WriteFile(indexFilePath, bytes, 0644) // nolint: gosec } func (m *ArchiveFileManager) readCodexIndexFromFile(communityID types.HexBytes) ([]byte, error) { - indexFilePath := m.codexArchiveIndexFilePath(communityID) + indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) return os.ReadFile(indexFilePath) } func (m *ArchiveFileManager) removeCodexIndexFile(communityID types.HexBytes) error { - indexFilePath := m.codexArchiveIndexFilePath(communityID) + indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) return os.Remove(indexFilePath) } func (m *ArchiveFileManager) writeCodexIndexCidToFile(communityID types.HexBytes, cid string) error { - cidFilePath := m.codexArchiveIndexCidFilePath(communityID) + cidFilePath := m.codexHistoryArchiveIndexCidFilePath(communityID) return os.WriteFile(cidFilePath, []byte(cid), 0644) // nolint: gosec } func (m *ArchiveFileManager) readCodexIndexCidFromFile(communityID types.HexBytes) ([]byte, error) { - cidFilePath := m.codexArchiveIndexCidFilePath(communityID) + cidFilePath := m.codexHistoryArchiveIndexCidFilePath(communityID) return os.ReadFile(cidFilePath) } @@ -670,7 +671,7 @@ func (m *ArchiveFileManager) GetHistoryArchiveMagnetlink(communityID types.HexBy } func (m *ArchiveFileManager) GetHistoryArchiveIndexCid(communityID types.HexBytes) (string, error) { - codexIndexCidPath := m.codexArchiveIndexCidFilePath(communityID) + codexIndexCidPath := m.codexHistoryArchiveIndexCidFilePath(communityID) cidData, err := os.ReadFile(codexIndexCidPath) if err != nil { diff --git a/protocol/communities/manager_test.go b/protocol/communities/manager_test.go index 5b51e1a0be8..09fb9dd838b 100644 --- a/protocol/communities/manager_test.go +++ b/protocol/communities/manager_test.go @@ -9,6 +9,7 @@ import ( "math" "math/big" "os" + "path/filepath" "testing" "time" @@ -493,7 +494,7 @@ func (s *ManagerSuite) TestStartAndStopCodexClient() { s.Require().NotNil(s.archiveManager.codexClient) defer s.archiveManager.Stop() //nolint: errcheck - _, err = os.Stat(s.archiveManager.codexConfig.DataDir) + _, err = os.Stat(s.archiveManager.codexConfig.CodexNodeConfig.DataDir) s.Require().NoError(err) s.Require().Equal(s.archiveManager.isCodexClientStarted, true) } @@ -1658,11 +1659,13 @@ func buildTorrentConfig() *params.TorrentConfig { func buildCodexConfig(t *testing.T) *params.CodexConfig { return ¶ms.CodexConfig{ - Enabled: true, - Config: codex.Config{ - DataDir: t.TempDir() + "/codexdata", + Enabled: true, + HistoryArchiveDataDir: filepath.Join(t.TempDir(), "codex", "archivedata"), + CodexNodeConfig: codex.Config{ + DataDir: filepath.Join(t.TempDir(), "codex", "codexdata"), BlockRetries: 5, LogLevel: "ERROR", + Nat: "none", }, } } diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index 78c95304dc9..a032c8dcaa9 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -8,6 +8,7 @@ import ( "fmt" "log" "os" + "path/filepath" "strconv" "strings" "sync" @@ -2349,21 +2350,25 @@ func PrintArchiveIndex(index *protobuf.CodexWakuMessageArchiveIndex) { func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArchiveMessages() { - dataDir := os.TempDir() + "/archivedata" + archiveDataDir := filepath.Join(os.TempDir(), "codex", "archivedata") + codexDataDir := filepath.Join(os.TempDir(), "codex", "codexdata") - log.Println("Data directory:", dataDir) + log.Println("Data directory:", archiveDataDir) codexConfig := params.CodexConfig{ - Enabled: false, - Config: codex.Config{ - DataDir: dataDir, + Enabled: false, + HistoryArchiveDataDir: archiveDataDir, + CodexNodeConfig: codex.Config{ + DataDir: codexDataDir, BlockRetries: 10, LogLevel: "ERROR", LogFormat: codex.LogFormatNoColors, + Nat: "none", }, } // Share archive directory between all users + // so that bob can access owner's created archive s.owner.archiveManager.SetCodexConfig(&codexConfig) s.bob.archiveManager.SetCodexConfig(&codexConfig) @@ -2371,7 +2376,7 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch s.Require().NoError(err) codexClient := s.owner.archiveManager.GetCodexClient() s.Require().NotNil(codexClient) - // defer codexClient.Stop() //nolint: errcheck + // no need to stop codex client, as it will be stopped during messenger Stop // defer codexClient.Stop() //nolint: errcheck s.bob.archiveManager.SetCodexClient(codexClient) @@ -2468,32 +2473,6 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch communityCommonTopic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(community.UniversalChatID())) topics := []messagingtypes.ContentTopic{topic, communityCommonTopic} - // dataDir := os.TempDir() + "/archivedata" - - // log.Println("Data directory:", dataDir) - - // codexConfig := params.CodexConfig{ - // Enabled: false, - // Config: codex.Config{ - // DataDir: dataDir, - // BlockRetries: 10, - // LogLevel: "ERROR", - // LogFormat: codex.LogFormatNoColors, - // }, - // } - - // // Share archive directory between all users - // s.owner.archiveManager.SetCodexConfig(&codexConfig) - // s.bob.archiveManager.SetCodexConfig(&codexConfig) - - // err = s.owner.archiveManager.StartCodexClient() - // s.Require().NoError(err) - // codexClient := s.owner.archiveManager.GetCodexClient() - // s.Require().NotNil(codexClient) - // defer codexClient.Stop() //nolint: errcheck - - // s.bob.archiveManager.SetCodexClient(codexClient) - s.owner.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} s.bob.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} @@ -2609,6 +2588,18 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch s.Require().Equal(messageText1, receivedMessage1.Text) } +func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabase() { + // The messengers used in the tests in this suite use the helper newTestMessenger (protocol/messenger_builder_test.go). In the config setup (config.complete), tmc.nodeConfig defaults to an empty params.NodeConfig{} unless the test overrides it. The default params.NodeConfig zero-value has all nested configs (including CodexConfig.Enabled) set to false. + + // During newTestMessenger, the in-memory appDb is migrated and then sDB.CreateSettings(*config.appSettings, *config.nodeConfig) is called (messenger_builder_test.go (line 120)). If you don’t override config.nodeConfig beforehand, this writes CodexConfig.Enabled = false into the node-config tables—mirroring what a brand-new install would do. + + // So TestImportDecryptedCodexArchiveMessages starts from that baseline: the in-memory DB contains the node config seeded with CodexConfig.Enabled false (unless you explicitly mutate it in the test). + + // Following the above reaoning, we read the config from the database and verify that CodexConfig setting are what we expect them to be. + + // s.owner. +} + func (s *MessengerCommunitiesTokenPermissionsSuite) TestDeleteChannelWithTokenPermission() { // Setup community with two permitted channels community, firstChat := s.createCommunity() From 85ac523c9ad84c3fdc8e7a73a76e1988969ed410 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 04:34:29 +0100 Subject: [PATCH 12/75] adds more logging to the archive downloader integration test --- ...dex_archive_downloader_integration_test.go | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/protocol/communities/codex_archive_downloader_integration_test.go b/protocol/communities/codex_archive_downloader_integration_test.go index 7ab5119405b..e43a1346219 100644 --- a/protocol/communities/codex_archive_downloader_integration_test.go +++ b/protocol/communities/codex_archive_downloader_integration_test.go @@ -84,6 +84,34 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork exists, err := suite.client.HasCid(cid) require.NoError(suite.T(), err, "Failed to check CID existence for %s", archive.hash) require.True(suite.T(), exists, "CID %s should exist after upload", cid) + + // try to download immediately to verify + require.Eventually(suite.T(), func() bool { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + var downloadBuf bytes.Buffer + err := suite.client.LocalDownloadWithContext(ctx, cid, &downloadBuf) + if err != nil { + suite.T().Logf("LocalDownloadWithContext error for %s: %v", cid, err) + return false + } + + downloadedData := downloadBuf.Bytes() + if len(downloadedData) != len(archive.data) { + suite.T().Logf("Downloaded %d bytes for %s (expected %d), retrying...", + len(downloadedData), cid, len(archive.data)) + return false + } + + if !bytes.Equal(archive.data, downloadedData) { + suite.T().Logf("Downloaded data mismatch for %s, retrying...", cid) + return false + } + + suite.T().Logf("✅ Verified content for %s: %d bytes match", cid, len(downloadedData)) + return true + }, 30*time.Second, time.Second, "Local download should eventually match original for %s", cid) } // Step 2: Create archive index for CodexArchiveDownloader From 4182402ad2c7483c88bc05f3239b0a8def1afe0c Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 05:49:18 +0100 Subject: [PATCH 13/75] use filepath.Join to construct codex paths --- params/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/params/config.go b/params/config.go index 126eaa2193d..f6d281c8176 100644 --- a/params/config.go +++ b/params/config.go @@ -405,10 +405,10 @@ func NewNodeConfig(dataDir string, networkID uint64) (*NodeConfig, error) { }, CodexConfig: CodexConfig{ Enabled: false, - HistoryArchiveDataDir: dataDir + "/codex/history-archive-data", + HistoryArchiveDataDir: filepath.Join(dataDir, "codex", "archivedata"), CodexNodeConfig: codex.Config{ BlockRetries: 50, - DataDir: dataDir + "/codex/codex-data", + DataDir: filepath.Join(dataDir, "codex", "codexdata"), MetricsEnabled: false, }, }, From ade0d07efb814be311ebd3db15da7898d7c96db9 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 07:45:36 +0100 Subject: [PATCH 14/75] updates node_config tests --- appdatabase/node_config_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/appdatabase/node_config_test.go b/appdatabase/node_config_test.go index 4157f5795dc..67e6c33e566 100644 --- a/appdatabase/node_config_test.go +++ b/appdatabase/node_config_test.go @@ -57,8 +57,9 @@ func randomNodeConfig() *params.NodeConfig { LightClient: randomBool(), }, CodexConfig: params.CodexConfig{ - Enabled: randomBool(), - Config: codex.Config{ + Enabled: randomBool(), + HistoryArchiveDataDir: randomString(), + CodexNodeConfig: codex.Config{ DataDir: randomString(), DiscoveryPort: randomInt(65535), BlockRetries: randomInt(10), From 51b19f2867b30aab12232199b892089b489b3424 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 08:00:54 +0100 Subject: [PATCH 15/75] Fetch the lib codex version based on the Go dependency --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 3268bc40a1b..bbbdf84818b 100644 --- a/Makefile +++ b/Makefile @@ -215,7 +215,7 @@ else CODEX_OS := $(detected_OS) endif -CODEX_VERSION ?= "v0.0.24" +CODEX_VERSION := $(shell go list -m -f '{{.Version}}' github.com/codex-storage/codex-go-bindings 2>/dev/null) CODEX_DOWNLOAD_URL := "https://github.com/codex-storage/codex-go-bindings/releases/download/$(CODEX_VERSION)/codex-${CODEX_OS}-${CODEX_ARCH}.zip" fetch-libcodex: @@ -432,7 +432,7 @@ lint-panics: generate lint: generate lint-panics $(CGO_ENV) golangci-lint --build-tags '$(BUILD_TAGS)' run ./... -clean: ##@other Cleanup +clean: clean-libcodex | ##@other Cleanup rm -fr build/bin/* git-clean: From e9ec381b97e2b21c1dbf0618c12d5a36deb9f9e1 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 08:07:17 +0100 Subject: [PATCH 16/75] Update codex lib and use exists function to check cid existence --- go.mod | 2 +- go.sum | 4 ++-- protocol/communities/codex_client.go | 12 +---------- .../codex-go-bindings/codex/storage.go | 20 +++++++++++++++++++ vendor/modules.txt | 2 +- 5 files changed, 25 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index f9106f97207..5c5cea8446c 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( github.com/btcsuite/btcd/btcutil v1.1.6 github.com/cenkalti/backoff/v4 v4.2.1 github.com/cockroachdb/errors v1.11.3 - github.com/codex-storage/codex-go-bindings v0.0.24 + github.com/codex-storage/codex-go-bindings v0.0.25 github.com/getsentry/sentry-go v0.29.1 github.com/golang-migrate/migrate/v4 v4.15.2 github.com/gorilla/sessions v1.2.1 diff --git a/go.sum b/go.sum index 44b727c154e..16bb4308912 100644 --- a/go.sum +++ b/go.sum @@ -548,8 +548,8 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codex-storage/codex-go-bindings v0.0.24 h1:uUlLiUf5zuec34AvtzpAr4BOCE71FuxqTMBholMR86M= -github.com/codex-storage/codex-go-bindings v0.0.24/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= +github.com/codex-storage/codex-go-bindings v0.0.25 h1:MzBQrM5IyYKAvKv8dgI4CnVbcKlrWyyaorzgzM+mlz0= +github.com/codex-storage/codex-go-bindings v0.0.25/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= diff --git a/protocol/communities/codex_client.go b/protocol/communities/codex_client.go index 65278e15ba2..24a04f1bfd4 100644 --- a/protocol/communities/codex_client.go +++ b/protocol/communities/codex_client.go @@ -92,18 +92,8 @@ func (c *CodexClient) TriggerDownload(cid string) (codex.Manifest, error) { } // HasCid checks if the given CID exists in Codex storage -// TODO: When the PR is merge https://github.com/codex-storage/nim-codex/pull/1331 -// add the HasCid method to the codex-go-bindings and improve this implementation. func (c *CodexClient) HasCid(cid string) (bool, error) { - if err := c.node.DownloadInit(cid, codex.DownloadInitOptions{Local: true}); err != nil { - return false, nil - } - defer func() { - _ = c.node.DownloadCancel(cid) - }() - - _, err := c.node.DownloadChunk(cid) - return err == nil, nil + return c.node.Exists(cid) } func (c *CodexClient) RemoveCid(cid string) error { diff --git a/vendor/github.com/codex-storage/codex-go-bindings/codex/storage.go b/vendor/github.com/codex-storage/codex-go-bindings/codex/storage.go index 256598e70ce..8af1f5b49b4 100644 --- a/vendor/github.com/codex-storage/codex-go-bindings/codex/storage.go +++ b/vendor/github.com/codex-storage/codex-go-bindings/codex/storage.go @@ -24,6 +24,10 @@ import ( static int cGoCodexStorageDelete(void* codexCtx, char* cid, void* resp) { return codex_storage_delete(codexCtx, cid, (CodexCallback) callback, resp); } + + static int cGoCodexStorageExists(void* codexCtx, char* cid, void* resp) { + return codex_storage_exists(codexCtx, cid, (CodexCallback) callback, resp); + } */ import "C" @@ -142,3 +146,19 @@ func (node CodexNode) Delete(cid string) error { _, err := bridge.wait() return err } + +// Exists checks if a given cid exists in the local storage. +func (node CodexNode) Exists(cid string) (bool, error) { + bridge := newBridgeCtx() + defer bridge.free() + + var cCid = C.CString(cid) + defer C.free(unsafe.Pointer(cCid)) + + if C.cGoCodexStorageExists(node.ctx, cCid, bridge.resp) != C.RET_OK { + return false, bridge.callError("cGoCodexStorageExists") + } + + result, err := bridge.wait() + return result == "true", err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4a814e7d466..42b28cade74 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -269,7 +269,7 @@ github.com/cockroachdb/redact/internal/rfmt/fmtsort # github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 ## explicit; go 1.19 github.com/cockroachdb/tokenbucket -# github.com/codex-storage/codex-go-bindings v0.0.24 +# github.com/codex-storage/codex-go-bindings v0.0.25 ## explicit; go 1.24.0 github.com/codex-storage/codex-go-bindings/codex # github.com/consensys/gnark-crypto v0.18.0 From 74e99477347f03511b7ebfef390a3326b6bd2fd4 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 08:14:22 +0100 Subject: [PATCH 17/75] Update codex version in nix configuration --- nix/pkgs/status-go/library/default.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nix/pkgs/status-go/library/default.nix b/nix/pkgs/status-go/library/default.nix index 7c3f9ee5a44..ec7aeea7b33 100644 --- a/nix/pkgs/status-go/library/default.nix +++ b/nix/pkgs/status-go/library/default.nix @@ -8,7 +8,7 @@ let optionalString = pkgs.lib.optionalString; - codexVersion = "v0.0.24"; + codexVersion = "v0.0.25"; arch = if stdenv.hostPlatform.isx86_64 then "amd64" else if stdenv.hostPlatform.isAarch64 then "arm64" @@ -16,9 +16,9 @@ let os = if stdenv.isDarwin then "macos" else "Linux"; hash = if stdenv.hostPlatform.isDarwin - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.24/codex-macos-arm64.zip | jq -r .hash + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.25/codex-macos-arm64.zip | jq -r .hash then "sha256-0AwwTom5i8v+hG81ikKjXWVeq7/v/FNVyb+3clH/V1Y=" - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.24/codex-Linux-amd64.zip | jq -r .hash + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.25/codex-Linux-amd64.zip | jq -r .hash else "sha256-P1w1XvWsg/ZPg8VZfd52hffI2u4SIIWekIWVP79YnCc="; # Pre-fetch libcodex to avoid network during build From a2d7cdd5a57158ea2981f802b057f5f8b59232d4 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 08:20:17 +0100 Subject: [PATCH 18/75] Update nix hashes for v0.0.25 --- nix/pkgs/status-go/library/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nix/pkgs/status-go/library/default.nix b/nix/pkgs/status-go/library/default.nix index ec7aeea7b33..cc7de7d59da 100644 --- a/nix/pkgs/status-go/library/default.nix +++ b/nix/pkgs/status-go/library/default.nix @@ -17,9 +17,9 @@ let hash = if stdenv.hostPlatform.isDarwin # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.25/codex-macos-arm64.zip | jq -r .hash - then "sha256-0AwwTom5i8v+hG81ikKjXWVeq7/v/FNVyb+3clH/V1Y=" + then "sha256-vlQu7mCGuDL+dKBsD1yZ+PZenZYtmM2TxjU5b/Gi1pQ=" # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.25/codex-Linux-amd64.zip | jq -r .hash - else "sha256-P1w1XvWsg/ZPg8VZfd52hffI2u4SIIWekIWVP79YnCc="; + else "sha256-SVJsnEZF5Bkh3zBWBCD1klpAb/Q3bePX8HB7NCeSY20="; # Pre-fetch libcodex to avoid network during build codexLib = pkgs.fetchzip { From 126a97d2e324c2a8cf894ac2d5296514d30123fc Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 08:29:41 +0100 Subject: [PATCH 19/75] Updates archive downloader integration test to again catch local downloads failures --- ...dex_archive_downloader_integration_test.go | 62 ++++--------------- 1 file changed, 12 insertions(+), 50 deletions(-) diff --git a/protocol/communities/codex_archive_downloader_integration_test.go b/protocol/communities/codex_archive_downloader_integration_test.go index e43a1346219..60cc2a4098d 100644 --- a/protocol/communities/codex_archive_downloader_integration_test.go +++ b/protocol/communities/codex_archive_downloader_integration_test.go @@ -84,34 +84,6 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork exists, err := suite.client.HasCid(cid) require.NoError(suite.T(), err, "Failed to check CID existence for %s", archive.hash) require.True(suite.T(), exists, "CID %s should exist after upload", cid) - - // try to download immediately to verify - require.Eventually(suite.T(), func() bool { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - var downloadBuf bytes.Buffer - err := suite.client.LocalDownloadWithContext(ctx, cid, &downloadBuf) - if err != nil { - suite.T().Logf("LocalDownloadWithContext error for %s: %v", cid, err) - return false - } - - downloadedData := downloadBuf.Bytes() - if len(downloadedData) != len(archive.data) { - suite.T().Logf("Downloaded %d bytes for %s (expected %d), retrying...", - len(downloadedData), cid, len(archive.data)) - return false - } - - if !bytes.Equal(archive.data, downloadedData) { - suite.T().Logf("Downloaded data mismatch for %s, retrying...", cid) - return false - } - - suite.T().Logf("✅ Verified content for %s: %d bytes match", cid, len(downloadedData)) - return true - }, 30*time.Second, time.Second, "Local download should eventually match original for %s", cid) } // Step 2: Create archive index for CodexArchiveDownloader @@ -222,32 +194,22 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork } } - require.Eventually(suite.T(), func() bool { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() + // Create context with timeout for download + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - var downloadBuf bytes.Buffer - err := suite.client.LocalDownloadWithContext(ctx, cid, &downloadBuf) - if err != nil { - suite.T().Logf("LocalDownloadWithContext error for %s: %v", completedHash, err) - return false - } + var downloadBuf bytes.Buffer + err := suite.client.LocalDownloadWithContext(ctx, cid, &downloadBuf) + cancel() - downloadedData := downloadBuf.Bytes() - if len(downloadedData) != len(originalData) { - suite.T().Logf("Downloaded %d bytes for %s (expected %d), retrying...", - len(downloadedData), completedHash, len(originalData)) - return false - } + require.NoError(suite.T(), err, "LocalDownload should succeed for %s", completedHash) - if !bytes.Equal(originalData, downloadedData) { - suite.T().Logf("Downloaded data mismatch for %s, retrying...", completedHash) - return false - } + downloadedData := downloadBuf.Bytes() + assert.Equal(suite.T(), len(originalData), len(downloadedData), + "Downloaded data length should match for %s", completedHash) + assert.True(suite.T(), bytes.Equal(originalData, downloadedData), + "Downloaded data should match original for %s", completedHash) - suite.T().Logf("✅ Verified content for %s: %d bytes match", completedHash, len(downloadedData)) - return true - }, 30*time.Second, time.Second, "Local download should eventually match original for %s", completedHash) + suite.T().Logf("✅ Verified content for %s: %d bytes match", completedHash, len(downloadedData)) } suite.T().Log("🎉 Full archive download workflow completed successfully!") From d5c49b808922aaf8285a5aa9ab9624e5a93bf3df Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 11:52:17 +0100 Subject: [PATCH 20/75] Add test for HandleHistoryArchiveIndexCidMessage and fix IsReady --- protocol/communities/manager_archive.go | 12 ++-- protocol/messenger_handler_test.go | 73 +++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 7 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index f35e7523b68..660d643a0e9 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -285,13 +285,11 @@ func (m *ArchiveManager) torrentClientStarted() bool { } func (m *ArchiveManager) IsReady() bool { - // Simply checking for `torrentConfig.Enabled` isn't enough - // as there's a possibility that the torrent client couldn't - // be instantiated (for example in case of port conflicts) - return m.torrentConfig != nil && - m.torrentConfig.Enabled && - m.torrentClientStarted() && - m.isCodexClientStarted + // Simply checking for `torrentConfig.Enabled` or `codexConfig.Enabled` + // isn't enough as there's a possibility that the torrent client or the + // codex client couldn't be instantiated (for example in case of port conflicts) + return (m.torrentConfig != nil && m.torrentConfig.Enabled && m.torrentClientStarted() || + (m.codexConfig != nil && m.codexConfig.Enabled && m.isCodexClientStarted)) } func (m *ArchiveManager) GetCommunityChatsFilters(communityID types.HexBytes) (messagingtypes.ChatFilters, error) { diff --git a/protocol/messenger_handler_test.go b/protocol/messenger_handler_test.go index 36f22366d23..805d08085b4 100644 --- a/protocol/messenger_handler_test.go +++ b/protocol/messenger_handler_test.go @@ -1,15 +1,23 @@ package protocol import ( + "bytes" "context" + "path/filepath" "testing" + "github.com/codex-storage/codex-go-bindings/codex" "github.com/stretchr/testify/suite" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" "github.com/status-im/status-go/crypto" "github.com/status-im/status-go/crypto/types" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/contacts" "github.com/status-im/status-go/protocol/protobuf" + "github.com/status-im/status-go/protocol/requests" v1protocol "github.com/status-im/status-go/protocol/v1" localnotifications "github.com/status-im/status-go/services/local-notifications" ) @@ -173,3 +181,68 @@ func (s *EventToSystemMessageSuite) TestHandleMembershipUpdate() { s.Require().Len(state.Response.Chats(), 1) s.Require().False(state.Response.Chats()[0].Active) } + +func (s *EventToSystemMessageSuite) TestHandleHistoryArchiveIndexCidMessageWithCodex() { + adminPrivateKey, err := crypto.GenerateKey() + s.Require().NoError(err) + + contact, err := contacts.BuildContactFromPublicKey(&adminPrivateKey.PublicKey) + s.Require().NoError(err) + + contact.ContactRequestLocalState = contacts.ContactRequestStateSent + currentMessageState := &CurrentMessageState{ + Contact: contact, + } + + state := &ReceivedMessageState{ + Response: &MessengerResponse{}, + Timesource: s.m.getTimesource(), + CurrentMessageState: currentMessageState, + ExistingMessagesMap: map[string]bool{}, + AllChats: s.m.allChats, + } + + description := &requests.CreateCommunity{ + Membership: protobuf.CommunityPermissions_AUTO_ACCEPT, + Name: "status", + Color: "#ffffff", + Description: "status community description", + } + + response, err := s.m.CreateCommunity(description, false) + s.Require().NoError(err) + + s.m.archiveManager.SetCodexConfig(¶ms.CodexConfig{ + Enabled: true, + HistoryArchiveDataDir: filepath.Join(s.T().TempDir(), "codex", "archivedata"), + CodexNodeConfig: codex.Config{ + DataDir: filepath.Join(s.T().TempDir(), "codex", "codexdata"), + LogFormat: codex.LogFormatNoColors, + MetricsEnabled: false, + LogLevel: "ERROR", + Nat: "none", + }, + }) + + err = s.m.archiveManager.StartCodexClient() + s.Require().NoError(err) + defer s.m.archiveManager.Stop() + + s.Require().True(s.m.archiveManager.IsReady()) + + community := response.Communities()[0] + s.m.communitiesManager.SaveCommunitySettings(communities.CommunitySettings{ + CommunityID: community.IDString(), + HistoryArchiveSupportEnabled: true, + }) + s.m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) + + var buf bytes.Buffer + core := zapcore.NewCore( + zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()), zapcore.AddSync(&buf), zap.DebugLevel) + s.m.logger = zap.New(core) + + err = s.m.HandleHistoryArchiveMagnetlinkMessage(state, &community.PrivateKey().PublicKey, "", 100) + s.Require().NoError(err) + s.Require().Contains(buf.String(), "skipping magnetlink processing due to codex-only preference") +} From 0ba4e1181d453e7ac5f8d9a935ac947e5de09184 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 14:26:06 +0100 Subject: [PATCH 21/75] global node-wide archive distribution preference --- api/default_test.go | 19 ++-- api/defaults.go | 3 + api/geth_backend.go | 5 +- ...add_archive_distribution_preference.up.sql | 1 - ...tribution_preference_to_node_config.up.sql | 2 + appdatabase/node_config_test.go | 31 +++--- multiaccounts/accounts/database_test.go | 2 +- multiaccounts/settings/database_test.go | 3 +- .../settings_wallet/database_test.go | 5 +- node/geth_status_node_test.go | 3 +- nodecfg/node_config.go | 83 +++++++++++++--- params/config.go | 65 ++++++++++--- protocol/communities/manager.go | 25 ++++- protocol/communities/persistence.go | 66 +++++++++---- .../communities_messenger_helpers_test.go | 3 +- ...nities_messenger_token_permissions_test.go | 87 +++++++++++++---- protocol/messenger_builder_test.go | 2 +- protocol/messenger_communities.go | 96 +++++++------------ protocol/messenger_handler.go | 72 ++++---------- ...mmunity_archive_distribution_preference.go | 17 +--- services/connector/test_helpers_test.go | 3 +- services/ext/api.go | 12 +-- services/gif/gif_test.go | 3 +- services/wallet/market/market_test.go | 3 +- .../token/token-lists/token_lists_test.go | 3 +- 25 files changed, 369 insertions(+), 245 deletions(-) delete mode 100644 appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql create mode 100644 appdatabase/migrations/sql/1762000000_add_history_archive_distribution_preference_to_node_config.up.sql diff --git a/api/default_test.go b/api/default_test.go index bb613d01bb7..23c57f93d2b 100644 --- a/api/default_test.go +++ b/api/default_test.go @@ -11,15 +11,16 @@ import ( func setupConfigs() (*params.NodeConfig, *requests.APIConfig) { newNodeConfig := ¶ms.NodeConfig{ - APIModules: "test, eth, wakuv2", - ConnectorConfig: params.ConnectorConfig{Enabled: true}, - HTTPEnabled: true, - HTTPHost: "0.0.0.0", - HTTPPort: 8545, - HTTPVirtualHosts: []string{"status-go"}, - WSEnabled: false, - WSHost: "127.0.0.1", - WSPort: 8586, + APIModules: "test, eth, wakuv2", + ConnectorConfig: params.ConnectorConfig{Enabled: true}, + HTTPEnabled: true, + HTTPHost: "0.0.0.0", + HTTPPort: 8545, + HTTPVirtualHosts: []string{"status-go"}, + WSEnabled: false, + WSHost: "127.0.0.1", + WSPort: 8586, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } apiConfig := &requests.APIConfig{ diff --git a/api/defaults.go b/api/defaults.go index c2c08ee528e..e82b475905a 100644 --- a/api/defaults.go +++ b/api/defaults.go @@ -277,6 +277,8 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc nodeConfig.LogEnabled = false } + nodeConfig.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference + if request.TestOverrideNetworks != nil { nodeConfig.Networks = request.TestOverrideNetworks } else { @@ -356,6 +358,7 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc DataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "codexdata"), BlockRetries: params.BlockRetries, MetricsEnabled: false, + LogFormat: codex.LogFormatNoColors, }, } diff --git a/api/geth_backend.go b/api/geth_backend.go index 1e9dba316d4..c2237a92234 100644 --- a/api/geth_backend.go +++ b/api/geth_backend.go @@ -583,7 +583,7 @@ func (b *GethStatusBackend) workaroundToFixBadMigration(request *requests.Login) if currentConf.NetworkID == 0 && currentConf.NodeKey == "" { // check if exist old node config - oldNodeConf := ¶ms.NodeConfig{} + oldNodeConf := ¶ms.NodeConfig{HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference} err = b.appDB.QueryRow("SELECT node_config FROM settings WHERE synthetic_id = 'id'").Scan(&sqlite.JSONBlob{Data: oldNodeConf}) if err != nil && err != sql.ErrNoRows { return err @@ -692,7 +692,8 @@ func (b *GethStatusBackend) loginAccount(request *requests.Login) error { defaultCfg := ¶ms.NodeConfig{ // why we need this? relate PR: https://github.com/status-im/status-go/pull/4014 - KeycardPairingDataFile: filepath.Join(b.rootDataDir, DefaultKeycardPairingDataFileRelativePath), + KeycardPairingDataFile: filepath.Join(b.rootDataDir, DefaultKeycardPairingDataFileRelativePath), + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } defaultCfg.WalletConfig = buildWalletConfig(&request.WalletConfig, &request.WalletSecretsConfig) diff --git a/appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql b/appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql deleted file mode 100644 index 56fcc31075a..00000000000 --- a/appdatabase/migrations/sql/1761795811_add_archive_distribution_preference.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE communities_archive_info ADD COLUMN preferred_distribution_method TEXT DEFAULT 'codex'; \ No newline at end of file diff --git a/appdatabase/migrations/sql/1762000000_add_history_archive_distribution_preference_to_node_config.up.sql b/appdatabase/migrations/sql/1762000000_add_history_archive_distribution_preference_to_node_config.up.sql new file mode 100644 index 00000000000..a1c6c617183 --- /dev/null +++ b/appdatabase/migrations/sql/1762000000_add_history_archive_distribution_preference_to_node_config.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE node_config + ADD COLUMN history_archive_distribution_preference TEXT DEFAULT 'codex'; diff --git a/appdatabase/node_config_test.go b/appdatabase/node_config_test.go index 67e6c33e566..dc0803a162c 100644 --- a/appdatabase/node_config_test.go +++ b/appdatabase/node_config_test.go @@ -27,21 +27,22 @@ func setupTestDB(t *testing.T) *sql.DB { func randomNodeConfig() *params.NodeConfig { return ¶ms.NodeConfig{ - NetworkID: uint64(int64(randomInt(math.MaxInt64))), - NodeKey: randomString(), - APIModules: randomString(), - WalletConfig: params.WalletConfig{Enabled: randomBool()}, - BrowsersConfig: params.BrowsersConfig{Enabled: randomBool()}, - PermissionsConfig: params.PermissionsConfig{Enabled: randomBool()}, - ConnectorConfig: params.ConnectorConfig{Enabled: randomBool()}, - LogEnabled: randomBool(), - LogDir: randomString(), - LogFile: randomString(), - LogLevel: randomString(), - LogMaxBackups: randomInt(math.MaxInt64), - LogMaxSize: randomInt(math.MaxInt64), - LogCompressRotated: randomBool(), - LogToStderr: randomBool(), + NetworkID: uint64(int64(randomInt(math.MaxInt64))), + NodeKey: randomString(), + APIModules: randomString(), + WalletConfig: params.WalletConfig{Enabled: randomBool()}, + BrowsersConfig: params.BrowsersConfig{Enabled: randomBool()}, + PermissionsConfig: params.PermissionsConfig{Enabled: randomBool()}, + ConnectorConfig: params.ConnectorConfig{Enabled: randomBool()}, + HistoryArchiveDistributionPreference: randomString(), + LogEnabled: randomBool(), + LogDir: randomString(), + LogFile: randomString(), + LogLevel: randomString(), + LogMaxBackups: randomInt(math.MaxInt64), + LogMaxSize: randomInt(math.MaxInt64), + LogCompressRotated: randomBool(), + LogToStderr: randomBool(), ClusterConfig: params.ClusterConfig{ Enabled: randomBool(), Fleet: randomString(), diff --git a/multiaccounts/accounts/database_test.go b/multiaccounts/accounts/database_test.go index 9d6aad03ae2..a0952bf50c7 100644 --- a/multiaccounts/accounts/database_test.go +++ b/multiaccounts/accounts/database_test.go @@ -46,7 +46,7 @@ func TestMoveWalletAccount(t *testing.T) { setting := settings.Settings{ Networks: &networks, } - config := params.NodeConfig{} + config := params.NodeConfig{HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference} err := db.CreateSettings(setting, config) require.NoError(t, err) diff --git a/multiaccounts/settings/database_test.go b/multiaccounts/settings/database_test.go index 5db9baa162e..567f625a682 100644 --- a/multiaccounts/settings/database_test.go +++ b/multiaccounts/settings/database_test.go @@ -20,7 +20,8 @@ import ( var ( config = params.NodeConfig{ - NetworkID: 10, + NetworkID: 10, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } networks = json.RawMessage("{}") diff --git a/multiaccounts/settings_wallet/database_test.go b/multiaccounts/settings_wallet/database_test.go index 7ae8c687d98..97a7b632754 100644 --- a/multiaccounts/settings_wallet/database_test.go +++ b/multiaccounts/settings_wallet/database_test.go @@ -14,8 +14,9 @@ import ( var ( config = params.NodeConfig{ - NetworkID: 10, - RootDataDir: "test", + NetworkID: 10, + RootDataDir: "test", + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } networks = json.RawMessage("{}") settingsObj = settings.Settings{ diff --git a/node/geth_status_node_test.go b/node/geth_status_node_test.go index 1101c19301e..91fc4677919 100644 --- a/node/geth_status_node_test.go +++ b/node/geth_status_node_test.go @@ -63,7 +63,8 @@ func TestStatusNodeWithDataDir(t *testing.T) { require.NoError(t, err) config := params.NodeConfig{ - RootDataDir: dir, + RootDataDir: dir, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } n, stop1, stop2, err := createStatusNode() diff --git a/nodecfg/node_config.go b/nodecfg/node_config.go index b7cff27ad08..ca5bde910b7 100644 --- a/nodecfg/node_config.go +++ b/nodecfg/node_config.go @@ -22,12 +22,43 @@ func nodeConfigWasMigrated(tx *sql.Tx) (migrated bool, err error) { type insertFn func(tx *sql.Tx, c *params.NodeConfig) error +const historyArchiveDistributionPreferenceColumn = "history_archive_distribution_preference" + +func nodeConfigHasArchivePreferenceColumn(tx *sql.Tx) bool { + rows, err := tx.Query(`PRAGMA table_info(node_config)`) + if err != nil { + return false + } + defer rows.Close() + + for rows.Next() { + var ( + cid int + name string + dataType string + notNull int + dfltValue sql.NullString + pk int + ) + if err := rows.Scan(&cid, &name, &dataType, ¬Null, &dfltValue, &pk); err != nil { + return false + } + if name == historyArchiveDistributionPreferenceColumn { + return true + } + } + + return false +} + func insertNodeConfigBase(tx *sql.Tx, c *params.NodeConfig, includeConnector bool) error { + hasPreferenceColumn := nodeConfigHasArchivePreferenceColumn(tx) + query := ` - INSERT OR REPLACE INTO node_config ( - network_id, data_dir, keystore_dir, node_key, - api_modules, enable_ntp_sync, wallet_enabled, - browser_enabled, permissions_enabled` + INSERT OR REPLACE INTO node_config ( + network_id, data_dir, keystore_dir, node_key, + api_modules, enable_ntp_sync, wallet_enabled, + browser_enabled, permissions_enabled` args := []any{ c.NetworkID, "", "", c.NodeKey, c.APIModules, true, @@ -35,6 +66,15 @@ func insertNodeConfigBase(tx *sql.Tx, c *params.NodeConfig, includeConnector boo c.PermissionsConfig.Enabled, } + preference := c.HistoryArchiveDistributionPreference + if preference == "" { + preference = params.DefaultHistoryArchiveDistributionPreference + } + if hasPreferenceColumn { + query += `, history_archive_distribution_preference` + args = append(args, preference) + } + if includeConnector { query += `, connector_enabled` args = append(args, c.ConnectorConfig.Enabled) @@ -291,20 +331,37 @@ func migrateNodeConfig(tx *sql.Tx) error { func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) { nodecfg := ¶ms.NodeConfig{} - err := tx.QueryRow(` - SELECT - network_id, node_key, api_modules, - wallet_enabled, browser_enabled, permissions_enabled, - connector_enabled FROM node_config - WHERE synthetic_id = 'id' - `).Scan( + hasPreferenceColumn := nodeConfigHasArchivePreferenceColumn(tx) + + query := ` + SELECT + network_id, node_key, api_modules, + wallet_enabled, browser_enabled, permissions_enabled` + + scanArgs := []any{ &nodecfg.NetworkID, &nodecfg.NodeKey, &nodecfg.APIModules, &nodecfg.WalletConfig.Enabled, &nodecfg.BrowsersConfig.Enabled, &nodecfg.PermissionsConfig.Enabled, - &nodecfg.ConnectorConfig.Enabled, - ) + } + + if hasPreferenceColumn { + query += `, history_archive_distribution_preference` + scanArgs = append(scanArgs, &nodecfg.HistoryArchiveDistributionPreference) + } + + query += `, connector_enabled FROM node_config + WHERE synthetic_id = 'id'` + scanArgs = append(scanArgs, &nodecfg.ConnectorConfig.Enabled) + + err := tx.QueryRow(query).Scan(scanArgs...) if err != nil && err != sql.ErrNoRows { return nil, err } + if nodecfg.HistoryArchiveDistributionPreference == "" { + nodecfg.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference + } + if nodecfg.HistoryArchiveDistributionPreference == "" { + nodecfg.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference + } // Load codex_config var listenAddrsStr, bootstrapNodesStr string diff --git a/params/config.go b/params/config.go index f6d281c8176..ba44b57930b 100644 --- a/params/config.go +++ b/params/config.go @@ -207,8 +207,13 @@ type NodeConfig struct { // ConnectorConfig extra configuration for connector.Service ConnectorConfig ConnectorConfig + // node-wide selection for history archive distribution preference + HistoryArchiveDistributionPreference string + + // TorrentConfig provides configuration for the BitTorrent client used for message history archives. TorrentConfig TorrentConfig + // CodexConfig provides configuration for the Codex history archive. CodexConfig CodexConfig OutputMessageCSVEnabled bool @@ -330,6 +335,12 @@ type CodexConfig struct { CodexNodeConfig codex.Config } +const ( + ArchiveDistributionMethodTorrent = "torrent" + ArchiveDistributionMethodCodex = "codex" + DefaultHistoryArchiveDistributionPreference = ArchiveDistributionMethodCodex +) + // Validate validates the ShhextConfig struct and returns an error if inconsistent values are found func (c *ShhextConfig) Validate(validate *validator.Validate) error { if err := validate.Struct(c); err != nil { @@ -360,13 +371,35 @@ func (c *NodeConfig) UpdateWithDefaults() error { c.APIModules = "net,web3,eth" } - // Ensure TorrentConfig is valid + if c.HistoryArchiveDistributionPreference == "" { + c.HistoryArchiveDistributionPreference = DefaultHistoryArchiveDistributionPreference + } + if c.TorrentConfig.Enabled { - if c.TorrentConfig.DataDir == "" { - c.TorrentConfig.DataDir = filepath.Join(c.RootDataDir, ArchivesRelativePath) + c.HistoryArchiveDistributionPreference = ArchiveDistributionMethodTorrent + } else if c.CodexConfig.Enabled { + c.HistoryArchiveDistributionPreference = ArchiveDistributionMethodCodex + } + + if c.HistoryArchiveDistributionPreference == ArchiveDistributionMethodTorrent { + if c.TorrentConfig.Enabled { + if c.TorrentConfig.DataDir == "" { + c.TorrentConfig.DataDir = filepath.Join(c.RootDataDir, ArchivesRelativePath) + } + if c.TorrentConfig.TorrentDir == "" { + c.TorrentConfig.TorrentDir = filepath.Join(c.RootDataDir, TorrentTorrentsRelativePath) + } } - if c.TorrentConfig.TorrentDir == "" { - c.TorrentConfig.TorrentDir = filepath.Join(c.RootDataDir, TorrentTorrentsRelativePath) + } + + if c.HistoryArchiveDistributionPreference == ArchiveDistributionMethodCodex { + if c.CodexConfig.Enabled { + if c.CodexConfig.HistoryArchiveDataDir == "" { + c.CodexConfig.HistoryArchiveDataDir = filepath.Join(c.RootDataDir, "codex", "archivedata") + } + if c.CodexConfig.CodexNodeConfig.DataDir == "" { + c.CodexConfig.CodexNodeConfig.DataDir = filepath.Join(c.RootDataDir, "codex", "codexdata") + } } } @@ -383,15 +416,16 @@ func NewNodeConfig(dataDir string, networkID uint64) (*NodeConfig, error) { } config := &NodeConfig{ - NetworkID: networkID, - RootDataDir: dataDir, - KeycardPairingDataFile: keycardPairingDataFile, - HTTPHost: "localhost", - HTTPPort: 8545, - HTTPVirtualHosts: []string{"localhost"}, - APIModules: "eth,net,web3,peer,wallet", - LogFile: "", - LogLevel: "ERROR", + NetworkID: networkID, + RootDataDir: dataDir, + KeycardPairingDataFile: keycardPairingDataFile, + HTTPHost: "localhost", + HTTPPort: 8545, + HTTPVirtualHosts: []string{"localhost"}, + APIModules: "eth,net,web3,peer,wallet", + LogFile: "", + LogLevel: "ERROR", + HistoryArchiveDistributionPreference: DefaultHistoryArchiveDistributionPreference, WakuV2Config: WakuV2Config{ Host: "0.0.0.0", Port: 0, @@ -407,9 +441,10 @@ func NewNodeConfig(dataDir string, networkID uint64) (*NodeConfig, error) { Enabled: false, HistoryArchiveDataDir: filepath.Join(dataDir, "codex", "archivedata"), CodexNodeConfig: codex.Config{ - BlockRetries: 50, + BlockRetries: BlockRetries, DataDir: filepath.Join(dataDir, "codex", "codexdata"), MetricsEnabled: false, + LogFormat: codex.LogFormatNoColors, }, }, } diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index b62d8d1bb24..45cc2935434 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -72,7 +72,6 @@ var validateInterval = 2 * time.Minute // Archive distribution preferences const ( - ArchiveDistributionMethodUnknown = "unknown" ArchiveDistributionMethodTorrent = "torrent" ArchiveDistributionMethodCodex = "codex" ) @@ -3751,11 +3750,15 @@ func (m *Manager) GetLastSeenIndexCid(communityID types.HexBytes) (string, error return m.persistence.GetLastSeenIndexCid(communityID) } -func (m *Manager) GetArchiveDistributionPreference(communityID types.HexBytes) (string, error) { - return m.persistence.GetArchiveDistributionPreference(communityID) +func (m *Manager) GetArchiveDistributionPreference() (string, error) { + return m.persistence.GetArchiveDistributionPreference() } -func (m *Manager) SetArchiveDistributionPreference(communityID types.HexBytes, preference string) error { +// func (m *Manager) GetArchiveDistributionPreference(communityID types.HexBytes) (string, error) { +// return m.persistence.GetArchiveDistributionPreference(communityID) +// } + +func (m *Manager) SetArchiveDistributionPreference(preference string) error { // Validate preference value switch preference { case ArchiveDistributionMethodTorrent, ArchiveDistributionMethodCodex: @@ -3764,9 +3767,21 @@ func (m *Manager) SetArchiveDistributionPreference(communityID types.HexBytes, p return errors.New("invalid archive distribution preference") } - return m.persistence.SetArchiveDistributionPreference(communityID, preference) + return m.persistence.SetArchiveDistributionPreference(preference) } +// func (m *Manager) SetArchiveDistributionPreference(communityID types.HexBytes, preference string) error { +// // Validate preference value +// switch preference { +// case ArchiveDistributionMethodTorrent, ArchiveDistributionMethodCodex: +// // Valid preference +// default: +// return errors.New("invalid archive distribution preference") +// } + +// return m.persistence.SetArchiveDistributionPreference(communityID, preference) +// } + func (m *Manager) LeaveCommunity(id types.HexBytes) (*Community, error) { m.communityLock.Lock(id) defer m.communityLock.Unlock(id) diff --git a/protocol/communities/persistence.go b/protocol/communities/persistence.go index 1c78f35a0e9..4c499fe2bd4 100644 --- a/protocol/communities/persistence.go +++ b/protocol/communities/persistence.go @@ -1021,12 +1021,11 @@ func (p *Persistence) GetMagnetlinkMessageClock(communityID types.HexBytes) (uin } func (p *Persistence) SaveCommunityArchiveInfo(communityID types.HexBytes, magnetLinkClock uint64, lastArchiveEndDate uint64, indexCidClock uint64) error { - _, err := p.db.Exec(`INSERT INTO communities_archive_info (magnetlink_clock, last_message_archive_end_date, community_id, index_cid_clock, preferred_distribution_method) VALUES (?, ?, ?, ?, ?)`, + _, err := p.db.Exec(`INSERT INTO communities_archive_info (magnetlink_clock, last_message_archive_end_date, community_id, index_cid_clock) VALUES (?, ?, ?, ?)`, magnetLinkClock, lastArchiveEndDate, communityID.String(), indexCidClock, - ArchiveDistributionMethodUnknown, ) return err } @@ -2231,33 +2230,58 @@ func (p *Persistence) UpdateAndPruneEncryptionKeyRequests(communityID types.HexB return nil } -func (p *Persistence) GetArchiveDistributionPreference(communityID types.HexBytes) (string, error) { - // return "codex", nil +// func (p *Persistence) GetArchiveDistributionPreference(communityID types.HexBytes) (string, error) { +// // return "codex", nil +// var preference string +// err := p.db.QueryRow(`SELECT preferred_distribution_method FROM communities_archive_info WHERE community_id = ?`, communityID.String()).Scan(&preference) +// if err == sql.ErrNoRows { +// return ArchiveDistributionMethodUnknown, nil +// } else if err != nil { +// return "", err +// } +// return preference, nil +// } + +func (p *Persistence) GetArchiveDistributionPreference() (string, error) { var preference string - err := p.db.QueryRow(`SELECT preferred_distribution_method FROM communities_archive_info WHERE community_id = ?`, communityID.String()).Scan(&preference) - if err == sql.ErrNoRows { - return ArchiveDistributionMethodUnknown, nil - } else if err != nil { + err := p.db.QueryRow(`SELECT history_archive_distribution_preference FROM node_config`).Scan(&preference) + if err != nil { return "", err } return preference, nil } -func (p *Persistence) SetArchiveDistributionPreference(communityID types.HexBytes, preference string) error { - // First check if record exists - exists, err := p.HasCommunityArchiveInfo(communityID) +func (p *Persistence) SetArchiveDistributionPreference(preference string) error { + res, err := p.db.Exec( + `UPDATE node_config + SET history_archive_distribution_preference = ? + WHERE synthetic_id = 'id'`, + preference, + ) if err != nil { return err } - - if exists { - // Update existing record - _, err = p.db.Exec(`UPDATE communities_archive_info SET preferred_distribution_method = ? WHERE community_id = ?`, - preference, communityID.String()) - } else { - // Insert new record with preference - _, err = p.db.Exec(`INSERT INTO communities_archive_info (community_id, preferred_distribution_method, magnetlink_clock, last_message_archive_end_date, index_cid_clock) VALUES (?, ?, 0, 0, 0)`, - communityID.String(), preference) + if rows, _ := res.RowsAffected(); rows == 0 { + return errors.New("node_config row not found") } - return err + return nil } + +// func (p *Persistence) SetArchiveDistributionPreference(communityID types.HexBytes, preference string) error { +// // First check if record exists +// exists, err := p.HasCommunityArchiveInfo(communityID) +// if err != nil { +// return err +// } + +// if exists { +// // Update existing record +// _, err = p.db.Exec(`UPDATE communities_archive_info SET preferred_distribution_method = ? WHERE community_id = ?`, +// preference, communityID.String()) +// } else { +// // Insert new record with preference +// _, err = p.db.Exec(`INSERT INTO communities_archive_info (community_id, preferred_distribution_method, magnetlink_clock, last_message_archive_end_date, index_cid_clock) VALUES (?, ?, 0, 0, 0)`, +// communityID.String(), preference) +// } +// return err +// } diff --git a/protocol/communities_messenger_helpers_test.go b/protocol/communities_messenger_helpers_test.go index b18fac97ab7..7fc7703a350 100644 --- a/protocol/communities_messenger_helpers_test.go +++ b/protocol/communities_messenger_helpers_test.go @@ -254,7 +254,8 @@ func (tcmc *testCommunitiesMessengerConfig) complete() error { func defaultTestCommunitiesMessengerNodeConfig() *params.NodeConfig { return ¶ms.NodeConfig{ - NetworkID: 10, + NetworkID: 10, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } } func defaultTestCommunitiesMessengerSettings() *settings.Settings { diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index a032c8dcaa9..4f2f22f8a51 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -149,6 +149,23 @@ type MessengerCommunitiesTokenPermissionsSuite struct { collectiblesManagerMock *CollectiblesManagerMock accountsTestData map[string][]string accountsPasswords map[string]string + nodeConfigs map[string]*params.NodeConfig +} + +func (s *MessengerCommunitiesTokenPermissionsSuite) defaultNodeCfg(tempDir string) *params.NodeConfig { + s.Require().NoError(os.MkdirAll(tempDir, 0o755)) + + const defaultNetworkID = 1 + + nodeCfg, err := params.NewNodeConfig(tempDir, defaultNetworkID) + s.Require().NoError(err) + + // false is default, but being explicit here for clarity + nodeCfg.CodexConfig.Enabled = false + nodeCfg.TorrentConfig.Enabled = false + nodeCfg.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference + + return nodeCfg } func (s *MessengerCommunitiesTokenPermissionsSuite) SetupTest() { @@ -163,11 +180,19 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) SetupTest() { } s.resetMockedBalances() - s.owner = s.newMessenger(ownerPassword, []string{ownerAddress}, "owner", []Option{}) + s.nodeConfigs = make(map[string]*params.NodeConfig) + + ownerNodeConfig := s.defaultNodeCfg(filepath.Join(os.TempDir(), "owner_"+uuid.NewString())) + s.owner = s.newMessenger(ownerPassword, []string{ownerAddress}, "owner", []Option{}, ownerNodeConfig) + s.nodeConfigs[s.owner.IdentityPublicKeyString()] = ownerNodeConfig - s.bob = s.newMessenger(bobPassword, []string{bobAddress}, "bob", []Option{}) + bobNodeConfig := s.defaultNodeCfg(filepath.Join(os.TempDir(), "bob_"+uuid.NewString())) + s.bob = s.newMessenger(bobPassword, []string{bobAddress}, "bob", []Option{}, bobNodeConfig) + s.nodeConfigs[s.bob.IdentityPublicKeyString()] = bobNodeConfig - s.alice = s.newMessenger(alicePassword, []string{aliceAddress1, aliceAddress2}, "alice", []Option{}) + aliceNodeConfig := s.defaultNodeCfg(filepath.Join(os.TempDir(), "alice_"+uuid.NewString())) + s.alice = s.newMessenger(alicePassword, []string{aliceAddress1, aliceAddress2}, "alice", []Option{}, aliceNodeConfig) + s.nodeConfigs[s.alice.IdentityPublicKeyString()] = aliceNodeConfig _, err := s.owner.Start() s.Require().NoError(err) @@ -184,16 +209,22 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TearDownTest() { s.MessengerBaseTestSuite.TearDownTest() } -func (s *MessengerCommunitiesTokenPermissionsSuite) newMessenger(password string, walletAddresses []string, name string, extraOptions []Option) *Messenger { +func (s *MessengerCommunitiesTokenPermissionsSuite) newMessenger(password string, walletAddresses []string, name string, extraOptions []Option, nodeConfig *params.NodeConfig) *Messenger { communityManagerOptions := []communities.ManagerOption{ communities.WithAllowForcingCommunityMembersReevaluation(true), } extraOptions = append(extraOptions, WithCommunityManagerOptions(communityManagerOptions)) + testMessengerCfg := testMessengerConfig{ + extraOptions: extraOptions, + } + + if nodeConfig != nil { + testMessengerCfg.nodeConfig = nodeConfig + } + messenger := newTestCommunitiesMessenger(&s.Suite, s.messagingEnv, testCommunitiesMessengerConfig{ - testMessengerConfig: testMessengerConfig{ - extraOptions: extraOptions, - }, + testMessengerConfig: testMessengerCfg, password: password, walletAddresses: walletAddresses, mockedBalances: &s.mockedBalances, @@ -2136,10 +2167,10 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedArchiveMe community, chat := s.createCommunity() // createCommunity sets history archive distribution method to "codex" - we need torrent for this test - err := s.owner.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodTorrent) + err := s.owner.communitiesManager.SetArchiveDistributionPreference(communities.ArchiveDistributionMethodTorrent) s.Require().NoError(err) - err = s.bob.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodTorrent) + err = s.bob.communitiesManager.SetArchiveDistributionPreference(communities.ArchiveDistributionMethodTorrent) s.Require().NoError(err) // 1.2. Setup permissions @@ -2384,12 +2415,16 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch // 1.1. Create community community, chat := s.createCommunity() - // for community owner, the history archive distribution preference is set to Codex when the community is created - archiveDistributionPreferenceOwner, err := s.owner.communitiesManager.GetArchiveDistributionPreference(community.ID()) + archiveDistributionPreferenceOwner, err := s.owner.GetArchiveDistributionPreference() s.Require().NoError(err) log.Println("Archive distribution preference for owner:", archiveDistributionPreferenceOwner) s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceOwner) + archiveDistributionPreferenceBob, err := s.bob.GetArchiveDistributionPreference() + s.Require().NoError(err) + log.Println("Archive distribution preference for bob:", archiveDistributionPreferenceBob) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceBob) + // 1.2. Setup permissions communityPermission := &requests.CreateCommunityTokenPermission{ CommunityID: community.ID(), @@ -2498,14 +2533,6 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch s.joinCommunity(community, s.bob) - // when Bob requested to join, the history archive distribution preference should be set to Codex - archiveDistributionPreferenceBob, err := s.bob.communitiesManager.GetArchiveDistributionPreference(community.ID()) - s.Require().NoError(err) - - log.Println("Archive distribution preference for bob:", archiveDistributionPreferenceBob) - - s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceBob) - err = <-waitForKeysDistributedToBob s.Require().NoError(err) @@ -2595,9 +2622,27 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas // So TestImportDecryptedCodexArchiveMessages starts from that baseline: the in-memory DB contains the node config seeded with CodexConfig.Enabled false (unless you explicitly mutate it in the test). - // Following the above reaoning, we read the config from the database and verify that CodexConfig setting are what we expect them to be. + // Following the above reasoning, we read the codex config from the database and verify that CodexConfig setting are what we expect them to be. + + ownerNodeCfgFromDB, err := s.owner.settings.GetNodeConfig() + s.Require().NoError(err) + s.Assert().Equal( + s.nodeConfigs[s.owner.IdentityPublicKeyString()].CodexConfig, + ownerNodeCfgFromDB.CodexConfig, + ) + + bobNodeCfgFromDB, err := s.bob.settings.GetNodeConfig() + s.Require().NoError(err) + s.Assert().Equal( + s.nodeConfigs[s.bob.IdentityPublicKeyString()].CodexConfig, + bobNodeCfgFromDB.CodexConfig, + ) + + // now to be specific + s.Assert().False(ownerNodeCfgFromDB.CodexConfig.Enabled) + s.Assert().False(bobNodeCfgFromDB.CodexConfig.Enabled) - // s.owner. + // s.owner.settings.SaveSetting("node-config", ownerNodeCfgFromDB) } func (s *MessengerCommunitiesTokenPermissionsSuite) TestDeleteChannelWithTokenPermission() { diff --git a/protocol/messenger_builder_test.go b/protocol/messenger_builder_test.go index 4505f29b0f4..d62d2fbb28d 100644 --- a/protocol/messenger_builder_test.go +++ b/protocol/messenger_builder_test.go @@ -66,7 +66,7 @@ func (tmc *testMessengerConfig) complete() error { } if tmc.nodeConfig == nil { - tmc.nodeConfig = ¶ms.NodeConfig{} + tmc.nodeConfig = ¶ms.NodeConfig{HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference} } return nil diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index e2fee8e33b8..554e5944033 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -1478,23 +1478,6 @@ func (m *Messenger) RequestToJoinCommunity(request *requests.RequestToJoinCommun Priority: &messagingtypes.HighPriority, } - // we want to use codex for archive distribution - // but if it is already set to something else (handy in testing), respect that - archiveDistributionPreference, err := m.communitiesManager.GetArchiveDistributionPreference(community.ID()) - if err != nil { - return nil, err - } - - m.logger.Debug("Archive distribution preference (RequestToJoin):", zap.String("preference", archiveDistributionPreference)) - - if archiveDistributionPreference == communities.ArchiveDistributionMethodUnknown { - // If the preference is unknown, we can set it to codex - err = m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) - if err != nil { - return nil, err - } - } - _, err = m.SendMessageToControlNode(community, rawMessage) if err != nil { return nil, err @@ -2504,12 +2487,6 @@ func (m *Messenger) CreateCommunity(request *requests.CreateCommunity, createDef return nil, err } - // we want to use codex for archive distribution - err = m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) - if err != nil { - return nil, err - } - communitySettings := communities.CommunitySettings{ CommunityID: community.IDString(), HistoryArchiveSupportEnabled: request.HistoryArchiveSupportEnabled, @@ -4078,12 +4055,12 @@ importMessageArchivesLoop: downloadedArchiveID := archiveIDsToImport[0] var archiveMessages []*protobuf.WakuMessage - preference, err := m.communitiesManager.GetArchiveDistributionPreference(communityID) + preference, err := m.GetArchiveDistributionPreference() if err != nil { m.logger.Warn("failed to get archive distribution preference, using codex", zap.Error(err)) - preference = "codex" + preference = communities.ArchiveDistributionMethodCodex } - if preference == "codex" { + if preference == communities.ArchiveDistributionMethodCodex { archiveMessages, err = m.archiveManager.ExtractMessagesFromCodexHistoryArchive(communityID, downloadedArchiveID) } else { archiveMessages, err = m.archiveManager.ExtractMessagesFromHistoryArchive(communityID, downloadedArchiveID) @@ -4229,34 +4206,43 @@ func (m *Messenger) EnableCommunityHistoryArchiveProtocol() error { return err } - if nodeConfig.TorrentConfig.Enabled && nodeConfig.CodexConfig.Enabled { + if nodeConfig.TorrentConfig.Enabled || nodeConfig.CodexConfig.Enabled { return nil } - nodeConfig.TorrentConfig.Enabled = true - err = m.settings.SaveSetting("node-config", nodeConfig) + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() if err != nil { return err } - nodeConfig.CodexConfig.Enabled = true - err = m.settings.SaveSetting("node-config", nodeConfig) - if err != nil { - return err - } + if archiveDistributionPreference == communities.ArchiveDistributionMethodTorrent { + nodeConfig.TorrentConfig.Enabled = true + err = m.settings.SaveSetting("node-config", nodeConfig) + if err != nil { + return err + } - m.config.torrentConfig = &nodeConfig.TorrentConfig - m.archiveManager.SetTorrentConfig(&nodeConfig.TorrentConfig) - err = m.archiveManager.StartTorrentClient() - if err != nil { - return err + m.config.torrentConfig = &nodeConfig.TorrentConfig + m.archiveManager.SetTorrentConfig(&nodeConfig.TorrentConfig) + err = m.archiveManager.StartTorrentClient() + if err != nil { + return err + } } - m.config.codexConfig = &nodeConfig.CodexConfig - m.archiveManager.SetCodexConfig(&nodeConfig.CodexConfig) - err = m.archiveManager.StartCodexClient() - if err != nil { - return err + if archiveDistributionPreference == communities.ArchiveDistributionMethodCodex { + nodeConfig.CodexConfig.Enabled = true + err = m.settings.SaveSetting("node-config", nodeConfig) + if err != nil { + return err + } + + m.config.codexConfig = &nodeConfig.CodexConfig + m.archiveManager.SetCodexConfig(&nodeConfig.CodexConfig) + err = m.archiveManager.StartCodexClient() + if err != nil { + return err + } } controlledCommunities, err := m.communitiesManager.Controlled() @@ -5155,33 +5141,23 @@ func (m *Messenger) startRequestMissingCommunityChannelsHRKeysLoop() { }() } -// SetCommunityArchiveDistributionPreference sets the archive distribution preference for a community -func (m *Messenger) SetCommunityArchiveDistributionPreference(request *requests.SetCommunityArchiveDistributionPreference) (*MessengerResponse, error) { +// SetArchiveDistributionPreference sets the archive distribution preference for the node +func (m *Messenger) SetArchiveDistributionPreference(request *requests.SetArchiveDistributionPreference) (*MessengerResponse, error) { if err := request.Validate(); err != nil { return nil, err } - community, err := m.communitiesManager.GetByID(request.CommunityID) - if err != nil { - return nil, err - } - - if community == nil { - return nil, errors.New("community not found") - } - - err = m.communitiesManager.SetArchiveDistributionPreference(request.CommunityID, request.Preference) + err := m.communitiesManager.SetArchiveDistributionPreference(request.Preference) if err != nil { return nil, err } response := &MessengerResponse{} - response.AddCommunity(community) return response, nil } -// GetCommunityArchiveDistributionPreference gets the archive distribution preference for a community -func (m *Messenger) GetCommunityArchiveDistributionPreference(communityID types.HexBytes) (string, error) { - return m.communitiesManager.GetArchiveDistributionPreference(communityID) +// GetArchiveDistributionPreference gets the archive distribution preference for the node +func (m *Messenger) GetArchiveDistributionPreference() (string, error) { + return m.communitiesManager.GetArchiveDistributionPreference() } diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index 75791265554..dcdbf7c5570 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -1259,36 +1259,10 @@ func (m *Messenger) HandleHistoryArchiveMagnetlinkMessage(state *ReceivedMessage } if m.archiveManager.IsReady() && settings.HistoryArchiveSupportEnabled { - // Determine preference before any processing - preference, err := m.communitiesManager.GetArchiveDistributionPreference(id) - if err != nil { - m.logger.Warn("failed to get archive distribution preference, using auto", zap.Error(err)) - preference = "auto" - } - - // Skip magnetlink processing entirely if preference is codex-only - if preference == "codex" { - m.logger.Debug("skipping magnetlink processing due to codex-only preference") - return nil - } - lastMagnetlinkClock, err := m.communitiesManager.GetMagnetlinkMessageClock(id) if err != nil { return err } - lastIndexCidClock, err := m.communitiesManager.GetIndexCidMessageClock(id) - if err != nil { - return err - } - - var lastClock uint64 - if preference == "torrent" { - // In torrent mode, only compare against magnetlink clock - lastClock = lastMagnetlinkClock - } else { - // In auto mode, use the maximum of both clocks - lastClock = max(lastIndexCidClock, lastMagnetlinkClock) - } lastSeenMagnetlink, err := m.communitiesManager.GetLastSeenMagnetlink(id) if err != nil { @@ -1297,7 +1271,7 @@ func (m *Messenger) HandleHistoryArchiveMagnetlinkMessage(state *ReceivedMessage // We are only interested in a community archive magnet link // if it originates from a community that the current account is // part of and doesn't own the private key at the same time - if !community.IsControlNode() && community.Joined() && clock >= lastClock { + if !community.IsControlNode() && community.Joined() && clock >= lastMagnetlinkClock { if lastSeenMagnetlink == magnetlink { m.logger.Debug("already processed this magnetlink") return nil @@ -1363,36 +1337,10 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt } if m.archiveManager.IsReady() && settings.HistoryArchiveSupportEnabled { - // Determine preference before any processing - preference, err := m.communitiesManager.GetArchiveDistributionPreference(id) - if err != nil { - m.logger.Warn("failed to get archive distribution preference, using auto", zap.Error(err)) - preference = "auto" - } - - // Skip indexCid processing entirely if preference is torrent-only - if preference == "torrent" { - m.logger.Debug("skipping index CID processing due to torrent-only preference") - return nil - } - lastIndexCidClock, err := m.communitiesManager.GetIndexCidMessageClock(id) if err != nil { return err } - lastMagnetlinkClock, err := m.communitiesManager.GetMagnetlinkMessageClock(id) - if err != nil { - return err - } - - var lastClock uint64 - if preference == "codex" { - // In codex mode, only compare against indexCid clock - lastClock = lastIndexCidClock - } else { - // In auto mode, use the maximum of both clocks - lastClock = max(lastMagnetlinkClock, lastIndexCidClock) - } lastSeenCid, err := m.communitiesManager.GetLastSeenIndexCid(id) if err != nil { @@ -1401,7 +1349,7 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt // We are only interested in a community archive index CID // if it originates from a community that the current account is // part of and doesn't own the private key at the same time - if !community.IsControlNode() && community.Joined() && clock >= lastClock { + if !community.IsControlNode() && community.Joined() && clock >= lastIndexCidClock { if lastSeenCid == cid { m.logger.Debug("already processed this index cid") return nil @@ -3785,10 +3733,26 @@ func (m *Messenger) HandleSyncTrustedUser(state *ReceivedMessageState, message * } func (m *Messenger) HandleCommunityMessageArchiveIndexCid(state *ReceivedMessageState, message *protobuf.CommunityMessageArchiveIndexCid, statusMessage *messagingtypes.Message) error { + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() + if err != nil { + return err + } + if archiveDistributionPreference == communities.ArchiveDistributionMethodTorrent { + // Ignore Cid messages when torrent distribution is selected + return nil + } return m.HandleHistoryArchiveIndexCidMessage(state, state.CurrentMessageState.PublicKey, message.Cid, message.Clock) } func (m *Messenger) HandleCommunityMessageArchiveMagnetlink(state *ReceivedMessageState, message *protobuf.CommunityMessageArchiveMagnetlink, statusMessage *messagingtypes.Message) error { + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() + if err != nil { + return err + } + if archiveDistributionPreference == communities.ArchiveDistributionMethodCodex { + // Ignore Cid messages when codex distribution is selected + return nil + } return m.HandleHistoryArchiveMagnetlinkMessage(state, state.CurrentMessageState.PublicKey, message.MagnetUri, message.Clock) } diff --git a/protocol/requests/set_community_archive_distribution_preference.go b/protocol/requests/set_community_archive_distribution_preference.go index 3cf93032e56..546095ab07f 100644 --- a/protocol/requests/set_community_archive_distribution_preference.go +++ b/protocol/requests/set_community_archive_distribution_preference.go @@ -2,29 +2,22 @@ package requests import ( "errors" - - "github.com/status-im/status-go/crypto/types" ) -type SetCommunityArchiveDistributionPreference struct { - CommunityID types.HexBytes `json:"communityId"` - Preference string `json:"preference"` +type SetArchiveDistributionPreference struct { + Preference string `json:"preference"` } -func (s *SetCommunityArchiveDistributionPreference) Validate() error { +func (s *SetArchiveDistributionPreference) Validate() error { if s == nil { return errors.New("invalid request") } - if len(s.CommunityID) == 0 { - return errors.New("community ID is required") - } - // Validate preference value switch s.Preference { - case "auto", "torrent", "codex": + case "torrent", "codex": return nil default: - return errors.New("invalid preference, must be one of: auto, torrent, codex") + return errors.New("invalid preference, must be one of: torrent, codex") } } diff --git a/services/connector/test_helpers_test.go b/services/connector/test_helpers_test.go index c562c4d750e..029883335ca 100644 --- a/services/connector/test_helpers_test.go +++ b/services/connector/test_helpers_test.go @@ -58,7 +58,8 @@ func setupTests(t *testing.T) (state testState) { state.walletDb = createWalletDB(t) config := params.NodeConfig{ - NetworkID: 10, + NetworkID: 10, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } networks := json.RawMessage("{}") settingsObj := settings.Settings{ diff --git a/services/ext/api.go b/services/ext/api.go index f39732dcd6a..83ea6f09780 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -286,14 +286,14 @@ func (api *PublicAPI) SetCommunityShard(request *requests.SetCommunityShard) (*p return api.service.messenger.SetCommunityShard(request) } -// SetCommunityArchiveDistributionPreference sets the archive distribution preference for a community -func (api *PublicAPI) SetCommunityArchiveDistributionPreference(request *requests.SetCommunityArchiveDistributionPreference) (*protocol.MessengerResponse, error) { - return api.service.messenger.SetCommunityArchiveDistributionPreference(request) +// SetArchiveDistributionPreference sets the archive distribution preference for the node +func (api *PublicAPI) SetArchiveDistributionPreference(request *requests.SetArchiveDistributionPreference) (*protocol.MessengerResponse, error) { + return api.service.messenger.SetArchiveDistributionPreference(request) } -// GetCommunityArchiveDistributionPreference gets the archive distribution preference for a community -func (api *PublicAPI) GetCommunityArchiveDistributionPreference(communityID types.HexBytes) (string, error) { - return api.service.messenger.GetCommunityArchiveDistributionPreference(communityID) +// GetArchiveDistributionPreference gets the archive distribution preference for the node +func (api *PublicAPI) GetArchiveDistributionPreference() (string, error) { + return api.service.messenger.GetArchiveDistributionPreference() } // ExportCommunity exports the private key of the community with given ID diff --git a/services/gif/gif_test.go b/services/gif/gif_test.go index e3bb6f53013..67a516ee418 100644 --- a/services/gif/gif_test.go +++ b/services/gif/gif_test.go @@ -24,7 +24,8 @@ func setupTestDB(t *testing.T, db *sql.DB) (*accounts.Database, func()) { acc, err := accounts.NewDB(db) require.NoError(t, err) config := params.NodeConfig{ - NetworkID: 10, + NetworkID: 10, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } networks := json.RawMessage("{}") settingsObj := settings.Settings{ diff --git a/services/wallet/market/market_test.go b/services/wallet/market/market_test.go index 73074e3930b..8dbc13f177e 100644 --- a/services/wallet/market/market_test.go +++ b/services/wallet/market/market_test.go @@ -37,7 +37,8 @@ func setupTokenManager(t *testing.T) (*token.Manager, func()) { accDb, err := accounts.NewDB(appDb) require.NoError(t, err) config := params.NodeConfig{ - NetworkID: 10, + NetworkID: 10, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } networks := json.RawMessage("{}") settingsObj := settings.Settings{ diff --git a/services/wallet/token/token-lists/token_lists_test.go b/services/wallet/token/token-lists/token_lists_test.go index d5141f7b183..51f516a9a7d 100644 --- a/services/wallet/token/token-lists/token_lists_test.go +++ b/services/wallet/token/token-lists/token_lists_test.go @@ -31,7 +31,8 @@ func initSettings(appDb *sql.DB, autoRefreshEnabled bool) (*settings.Database, e var ( config = params.NodeConfig{ - NetworkID: 10, + NetworkID: 10, + HistoryArchiveDistributionPreference: params.DefaultHistoryArchiveDistributionPreference, } networks = json.RawMessage("{}") settingsObj = settings.Settings{ From 919131285f58a691737f3fb0137610e2a8a5526a Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 15:18:02 +0100 Subject: [PATCH 22/75] updates test in message_handler_test.go after changing preference distribution setup --- protocol/messenger_handler.go | 4 +++- protocol/messenger_handler_test.go | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index dcdbf7c5570..9e15f68d254 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -3739,6 +3739,7 @@ func (m *Messenger) HandleCommunityMessageArchiveIndexCid(state *ReceivedMessage } if archiveDistributionPreference == communities.ArchiveDistributionMethodTorrent { // Ignore Cid messages when torrent distribution is selected + m.logger.Debug("skipping cid processing due to torrent-only preference") return nil } return m.HandleHistoryArchiveIndexCidMessage(state, state.CurrentMessageState.PublicKey, message.Cid, message.Clock) @@ -3750,7 +3751,8 @@ func (m *Messenger) HandleCommunityMessageArchiveMagnetlink(state *ReceivedMessa return err } if archiveDistributionPreference == communities.ArchiveDistributionMethodCodex { - // Ignore Cid messages when codex distribution is selected + // Ignore magnetlink messages when codex distribution is selected + m.logger.Debug("skipping magnetlink processing due to codex-only preference") return nil } return m.HandleHistoryArchiveMagnetlinkMessage(state, state.CurrentMessageState.PublicKey, message.MagnetUri, message.Clock) diff --git a/protocol/messenger_handler_test.go b/protocol/messenger_handler_test.go index 805d08085b4..f9dd9f89185 100644 --- a/protocol/messenger_handler_test.go +++ b/protocol/messenger_handler_test.go @@ -235,14 +235,24 @@ func (s *EventToSystemMessageSuite) TestHandleHistoryArchiveIndexCidMessageWithC CommunityID: community.IDString(), HistoryArchiveSupportEnabled: true, }) - s.m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) + // not valid after new distribution preference implementation + // s.m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) + s.m.communitiesManager.SetArchiveDistributionPreference(params.ArchiveDistributionMethodCodex) var buf bytes.Buffer core := zapcore.NewCore( zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()), zapcore.AddSync(&buf), zap.DebugLevel) s.m.logger = zap.New(core) - err = s.m.HandleHistoryArchiveMagnetlinkMessage(state, &community.PrivateKey().PublicKey, "", 100) + message := &protobuf.CommunityMessageArchiveMagnetlink{ + MagnetUri: "magnet:?xt=urn:btih:d58f7e0c4e3b3f1e8e4f8e4e8e4f8e4e8e4f8e4e", + } + + state.CurrentMessageState.PublicKey = &community.PrivateKey().PublicKey + + // detecting archive distribution preference now happens earlier + // err = s.m.HandleHistoryArchiveMagnetlinkMessage(state, &community.PrivateKey().PublicKey, "", 100) + err = s.m.HandleCommunityMessageArchiveMagnetlink(state, message, nil) s.Require().NoError(err) s.Require().Contains(buf.String(), "skipping magnetlink processing due to codex-only preference") } From 9885e07568ecf12e1b7a136dd2ff4c2657c35cc8 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 15:25:27 +0100 Subject: [PATCH 23/75] updates .gitignore --- .gitignore | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b14205b0550..7475e01ced4 100644 --- a/.gitignore +++ b/.gitignore @@ -133,4 +133,8 @@ pkg/sentry/SENTRY_PRODUCTION !vendor/**/migrations.go # Libs directory (Codex) -libs \ No newline at end of file +libs +coverage_merged.out +test_0.coverage.out +test-coverage.html +api/pre_login.log From 2ef1f596aeceb832f5392f277a2869a03c398bec Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 3 Nov 2025 15:33:58 +0100 Subject: [PATCH 24/75] Update codex lib version --- go.mod | 2 +- go.sum | 4 ++-- nix/pkgs/status-go/library/default.nix | 10 +++++----- vendor/modules.txt | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 5c5cea8446c..35c23285f74 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( github.com/btcsuite/btcd/btcutil v1.1.6 github.com/cenkalti/backoff/v4 v4.2.1 github.com/cockroachdb/errors v1.11.3 - github.com/codex-storage/codex-go-bindings v0.0.25 + github.com/codex-storage/codex-go-bindings v0.0.26 github.com/getsentry/sentry-go v0.29.1 github.com/golang-migrate/migrate/v4 v4.15.2 github.com/gorilla/sessions v1.2.1 diff --git a/go.sum b/go.sum index 16bb4308912..a3688988322 100644 --- a/go.sum +++ b/go.sum @@ -548,8 +548,8 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codex-storage/codex-go-bindings v0.0.25 h1:MzBQrM5IyYKAvKv8dgI4CnVbcKlrWyyaorzgzM+mlz0= -github.com/codex-storage/codex-go-bindings v0.0.25/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= +github.com/codex-storage/codex-go-bindings v0.0.26 h1:v7PgwJq+dTb7YF4i19Dgx7iHJv+frqDbO9AP7qh2N1k= +github.com/codex-storage/codex-go-bindings v0.0.26/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= diff --git a/nix/pkgs/status-go/library/default.nix b/nix/pkgs/status-go/library/default.nix index cc7de7d59da..ceda0f93f75 100644 --- a/nix/pkgs/status-go/library/default.nix +++ b/nix/pkgs/status-go/library/default.nix @@ -8,7 +8,7 @@ let optionalString = pkgs.lib.optionalString; - codexVersion = "v0.0.25"; + codexVersion = "v0.0.26"; arch = if stdenv.hostPlatform.isx86_64 then "amd64" else if stdenv.hostPlatform.isAarch64 then "arm64" @@ -16,10 +16,10 @@ let os = if stdenv.isDarwin then "macos" else "Linux"; hash = if stdenv.hostPlatform.isDarwin - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.25/codex-macos-arm64.zip | jq -r .hash - then "sha256-vlQu7mCGuDL+dKBsD1yZ+PZenZYtmM2TxjU5b/Gi1pQ=" - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.25/codex-Linux-amd64.zip | jq -r .hash - else "sha256-SVJsnEZF5Bkh3zBWBCD1klpAb/Q3bePX8HB7NCeSY20="; + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.26/codex-macos-arm64.zip | jq -r .hash + then "sha256-3CHIWoSjo0plsYqzXQWm1EtY1STcljV4yfXTPon90uE=" + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.26/codex-Linux-amd64.zip | jq -r .hash + else "sha256-YxW2vFZlcLrOx1PYgWW4MIstH/oFBRF0ooS0sl3v6ig="; # Pre-fetch libcodex to avoid network during build codexLib = pkgs.fetchzip { diff --git a/vendor/modules.txt b/vendor/modules.txt index 42b28cade74..bb8a6aa63c1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -269,7 +269,7 @@ github.com/cockroachdb/redact/internal/rfmt/fmtsort # github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 ## explicit; go 1.19 github.com/cockroachdb/tokenbucket -# github.com/codex-storage/codex-go-bindings v0.0.25 +# github.com/codex-storage/codex-go-bindings v0.0.26 ## explicit; go 1.24.0 github.com/codex-storage/codex-go-bindings/codex # github.com/consensys/gnark-crypto v0.18.0 From a178478da51db3a5c30a90cca2617cff09b977ad Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 3 Nov 2025 15:43:52 +0100 Subject: [PATCH 25/75] updates linting --- protocol/messenger_handler_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/protocol/messenger_handler_test.go b/protocol/messenger_handler_test.go index f9dd9f89185..c1b2936745d 100644 --- a/protocol/messenger_handler_test.go +++ b/protocol/messenger_handler_test.go @@ -226,18 +226,22 @@ func (s *EventToSystemMessageSuite) TestHandleHistoryArchiveIndexCidMessageWithC err = s.m.archiveManager.StartCodexClient() s.Require().NoError(err) - defer s.m.archiveManager.Stop() + defer func() { + _ = s.m.archiveManager.Stop() + }() s.Require().True(s.m.archiveManager.IsReady()) community := response.Communities()[0] - s.m.communitiesManager.SaveCommunitySettings(communities.CommunitySettings{ + err = s.m.communitiesManager.SaveCommunitySettings(communities.CommunitySettings{ CommunityID: community.IDString(), HistoryArchiveSupportEnabled: true, }) + s.Require().NoError(err) // not valid after new distribution preference implementation // s.m.communitiesManager.SetArchiveDistributionPreference(community.ID(), communities.ArchiveDistributionMethodCodex) - s.m.communitiesManager.SetArchiveDistributionPreference(params.ArchiveDistributionMethodCodex) + err = s.m.communitiesManager.SetArchiveDistributionPreference(params.ArchiveDistributionMethodCodex) + s.Require().NoError(err) var buf bytes.Buffer core := zapcore.NewCore( From fa0836d99cb0d67f670c482e652c2a52412c07d0 Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Tue, 4 Nov 2025 19:48:43 +1100 Subject: [PATCH 26/75] fix: messageArchiveInterval RPC param in seconds Changes RPC call wakuext_UpdateMessageArchiveInterval to accept seconds instead of nanoseconds. --- services/ext/api.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/services/ext/api.go b/services/ext/api.go index 83ea6f09780..546fc54f1b8 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -1181,7 +1181,8 @@ func (api *PublicAPI) GetMessageArchiveInterval() (time.Duration, error) { } func (api *PublicAPI) UpdateMessageArchiveInterval(duration time.Duration) error { - return api.service.messenger.UpdateMessageArchiveInterval(duration) + d := duration * time.Second + return api.service.messenger.UpdateMessageArchiveInterval(d) } func (api *PublicAPI) SubscribeToPubsubTopic(topic string, optPublicKey string) error { From 792fd8f932f40744f57a102f68623240956dea37 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Tue, 4 Nov 2025 10:51:51 +0100 Subject: [PATCH 27/75] updates API commands return values --- protocol/messenger_communities.go | 19 ++++--------------- services/ext/api.go | 26 ++++++++++++++++++++++---- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 554e5944033..abc5fb941d9 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -4287,9 +4287,9 @@ func (m *Messenger) DisableCommunityHistoryArchiveProtocol() error { return nil } -func (m *Messenger) UpdateMessageArchiveInterval(duration time.Duration) error { +func (m *Messenger) UpdateMessageArchiveInterval(duration time.Duration) (time.Duration, error) { messageArchiveInterval = duration - return nil + return messageArchiveInterval, nil } func (m *Messenger) GetMessageArchiveInterval() (time.Duration, error) { @@ -5142,19 +5142,8 @@ func (m *Messenger) startRequestMissingCommunityChannelsHRKeysLoop() { } // SetArchiveDistributionPreference sets the archive distribution preference for the node -func (m *Messenger) SetArchiveDistributionPreference(request *requests.SetArchiveDistributionPreference) (*MessengerResponse, error) { - if err := request.Validate(); err != nil { - return nil, err - } - - err := m.communitiesManager.SetArchiveDistributionPreference(request.Preference) - if err != nil { - return nil, err - } - - response := &MessengerResponse{} - - return response, nil +func (m *Messenger) SetArchiveDistributionPreference(preference string) error { + return m.communitiesManager.SetArchiveDistributionPreference(preference) } // GetArchiveDistributionPreference gets the archive distribution preference for the node diff --git a/services/ext/api.go b/services/ext/api.go index 546fc54f1b8..debe9a56981 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -287,8 +287,21 @@ func (api *PublicAPI) SetCommunityShard(request *requests.SetCommunityShard) (*p } // SetArchiveDistributionPreference sets the archive distribution preference for the node -func (api *PublicAPI) SetArchiveDistributionPreference(request *requests.SetArchiveDistributionPreference) (*protocol.MessengerResponse, error) { - return api.service.messenger.SetArchiveDistributionPreference(request) +func (api *PublicAPI) SetArchiveDistributionPreference(request *requests.SetArchiveDistributionPreference) (string, error) { + if err := request.Validate(); err != nil { + return "", err + } + + if err := api.service.messenger.SetArchiveDistributionPreference(request.Preference); err != nil { + return "", err + } + + updatedPreference, err := api.service.messenger.GetArchiveDistributionPreference() + if err != nil { + return "", err + } + + return updatedPreference, nil } // GetArchiveDistributionPreference gets the archive distribution preference for the node @@ -1180,9 +1193,14 @@ func (api *PublicAPI) GetMessageArchiveInterval() (time.Duration, error) { return api.service.messenger.GetMessageArchiveInterval() } -func (api *PublicAPI) UpdateMessageArchiveInterval(duration time.Duration) error { +func (api *PublicAPI) UpdateMessageArchiveInterval(duration time.Duration) (time.Duration, error) { d := duration * time.Second - return api.service.messenger.UpdateMessageArchiveInterval(d) + updatedInterval, err := api.service.messenger.UpdateMessageArchiveInterval(d) + if err != nil { + return 0, err + } + // Do something with updatedInterval if needed + return updatedInterval, nil } func (api *PublicAPI) SubscribeToPubsubTopic(topic string, optPublicKey string) error { From 6d4fcd73c453d8b75ae1ff375adfd562a8ae5082 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Tue, 4 Nov 2025 11:59:24 +0100 Subject: [PATCH 28/75] [get/update]MessageArchiveInterval returns value in seconds + a bit of error handling --- services/ext/api.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/services/ext/api.go b/services/ext/api.go index debe9a56981..19b535f9354 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -3,6 +3,7 @@ package ext import ( "context" "crypto/ecdsa" + "errors" "time" "github.com/libp2p/go-libp2p/core/peer" @@ -1189,18 +1190,25 @@ func (api *PublicAPI) DisableCommunityHistoryArchiveProtocol() error { return api.service.messenger.DisableCommunityHistoryArchiveProtocol() } -func (api *PublicAPI) GetMessageArchiveInterval() (time.Duration, error) { - return api.service.messenger.GetMessageArchiveInterval() +func (api *PublicAPI) GetMessageArchiveInterval() (float64, error) { + interval, err := api.service.messenger.GetMessageArchiveInterval() + if err != nil { + return 0, err + } + return float64(interval) / float64(time.Second), nil } func (api *PublicAPI) UpdateMessageArchiveInterval(duration time.Duration) (time.Duration, error) { + if duration <= 0 { + return 0, errors.New("duration must be greater than zero") + } + d := duration * time.Second updatedInterval, err := api.service.messenger.UpdateMessageArchiveInterval(d) if err != nil { return 0, err } - // Do something with updatedInterval if needed - return updatedInterval, nil + return updatedInterval / time.Second, nil } func (api *PublicAPI) SubscribeToPubsubTopic(topic string, optPublicKey string) error { From beda7df930dd05786dbb3e59a8f58bd0053ac8ab Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Tue, 4 Nov 2025 16:37:19 +0100 Subject: [PATCH 29/75] bug fix - archive id not saved to db after being downloaded --- protocol/communities/manager_archive.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 660d643a0e9..0005b0fe253 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -857,6 +857,10 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex // Set up callback for when individual archives are downloaded archiveDownloader.SetOnArchiveDownloaded(func(hash string, from, to uint64) { + err = m.persistence.SaveMessageArchiveID(communityID, hash) + if err != nil { + m.logger.Error("couldn't save message archive ID", zap.Error(err)) + } m.publisher.publish(&Subscription{ HistoryArchiveDownloadedSignal: &signal.HistoryArchiveDownloadedSignal{ CommunityID: communityID.String(), From 890bb23175bdf12ba22785a221b7a9efdc5c6e4b Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Wed, 5 Nov 2025 11:03:02 +0100 Subject: [PATCH 30/75] bug fix - ensure community archive directory exists in the Download path --- ...dex_archive_downloader_integration_test.go | 2 - .../communities/codex_manager_archive_test.go | 210 ++++++++++++++++++ protocol/communities/manager_archive.go | 45 ++-- protocol/communities/manager_archive_file.go | 20 +- 4 files changed, 257 insertions(+), 20 deletions(-) create mode 100644 protocol/communities/codex_manager_archive_test.go diff --git a/protocol/communities/codex_archive_downloader_integration_test.go b/protocol/communities/codex_archive_downloader_integration_test.go index 60cc2a4098d..2bef60210cd 100644 --- a/protocol/communities/codex_archive_downloader_integration_test.go +++ b/protocol/communities/codex_archive_downloader_integration_test.go @@ -47,8 +47,6 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TearDownTest() { } func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWorkflow() { - // client := NewCodexClientTest(suite.T()) - // Step 1: Create test archive data and upload multiple archives to Codex archives := []struct { hash string diff --git a/protocol/communities/codex_manager_archive_test.go b/protocol/communities/codex_manager_archive_test.go new file mode 100644 index 00000000000..01b8b6ae5ec --- /dev/null +++ b/protocol/communities/codex_manager_archive_test.go @@ -0,0 +1,210 @@ +package communities_test + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "path/filepath" + "testing" + "time" + + "github.com/codex-storage/codex-go-bindings/codex" + "github.com/golang/protobuf/proto" + + "github.com/status-im/status-go/appdatabase" + "github.com/status-im/status-go/crypto" + "github.com/status-im/status-go/crypto/types" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol/communities" + "github.com/status-im/status-go/protocol/protobuf" + "github.com/status-im/status-go/protocol/requests" + "github.com/status-im/status-go/protocol/sqlite" + "github.com/status-im/status-go/protocol/tt" + "github.com/status-im/status-go/t/helpers" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type CodexArchiveManagerSuite struct { + suite.Suite + codexClient *communities.CodexClient + // codexConfig *params.CodexConfig + archiveManager *communities.ArchiveManager + manager *communities.Manager + uploadedCIDs []string // Track uploaded CIDs for cleanup +} + +func buildCodexConfig(t *testing.T) *params.CodexConfig { + rootDir := t.TempDir() + return ¶ms.CodexConfig{ + Enabled: true, + HistoryArchiveDataDir: filepath.Join(rootDir, "codex", "archivedata"), + CodexNodeConfig: codex.Config{ + DataDir: filepath.Join(rootDir, "codex", "codexdata"), + BlockRetries: 5, + LogLevel: "ERROR", + Nat: "none", + }, + } +} + +func (s *CodexArchiveManagerSuite) buildManagers() (*communities.Manager, *communities.ArchiveManager) { + db, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{}) + s.Require().NoError(err, "creating sqlite db instance") + err = sqlite.Migrate(db) + s.Require().NoError(err, "protocol migrate") + + key, err := crypto.GenerateKey() + s.Require().NoError(err) + + logger := tt.MustCreateTestLogger() + + m, err := communities.NewManager(key, "", db, logger, nil, nil, nil, &communities.TimeSourceStub{}, nil, nil) + s.Require().NoError(err) + s.Require().NoError(m.Start()) + + amc := &communities.ArchiveManagerConfig{ + TorrentConfig: nil, + CodexConfig: buildCodexConfig(s.T()), + Logger: logger, + Persistence: m.GetPersistence(), + Messaging: nil, + Identity: key, + Publisher: m, + } + t := communities.NewArchiveManager(amc) + s.Require().NoError(err) + + return m, t +} + +func (s *CodexArchiveManagerSuite) CreateCommunity() *communities.Community { + request := &requests.CreateCommunity{ + Name: "status", + Description: "token membership description", + Membership: protobuf.CommunityPermissions_AUTO_ACCEPT, + } + + community, err := s.manager.CreateCommunity(request, true) + s.Require().NoError(err) + s.Require().NotNil(community) + + return community +} + +// SetupSuite runs once before all tests in the suite +func (s *CodexArchiveManagerSuite) SetupTest() { + m, t := s.buildManagers() + communities.SetValidateInterval(30 * time.Millisecond) + s.manager = m + s.archiveManager = t + s.Require().NoError(s.archiveManager.StartCodexClient()) + s.codexClient = s.archiveManager.GetCodexClient() +} + +// TearDownSuite runs once after each test in the suite +func (s *CodexArchiveManagerSuite) TearDownTest() { + // Clean up all uploaded CIDs + for _, cid := range s.uploadedCIDs { + if err := s.codexClient.RemoveCid(cid); err != nil { + s.T().Logf("Warning: Failed to remove CID %s: %v", cid, err) + } else { + s.T().Logf("Successfully removed CID: %s", cid) + } + } + s.archiveManager.StopCodexClient() + s.Require().NoError(s.manager.Stop()) +} + +func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { + // Create test archive data and upload multiple archives to Codex + archives := []struct { + hash string + from uint64 + to uint64 + data []byte + }{ + {"archive-1-hash-abc123", 1000, 2000, make([]byte, 512)}, + {"archive-2-hash-def456", 2000, 3000, make([]byte, 768)}, + {"archive-3-hash-ghi789", 3000, 4000, make([]byte, 1024)}, + } + + // Generate random data for each archive + archiveCIDs := make(map[string]string) // archive hash -> CID + for i := range archives { + if _, err := rand.Read(archives[i].data); err != nil { + s.T().Fatalf("Failed to generate random data for %s: %v", archives[i].hash, err) + } + s.T().Logf("Generated %s data (first 16 bytes hex): %s", + archives[i].hash, hex.EncodeToString(archives[i].data[:16])) + } + + // Upload all archives to Codex + for _, archive := range archives { + cid, err := s.codexClient.Upload(bytes.NewReader(archive.data), archive.hash+".bin") + require.NoError(s.T(), err, "Failed to upload %s", archive.hash) + + archiveCIDs[archive.hash] = cid + s.uploadedCIDs = append(s.uploadedCIDs, cid) + s.T().Logf("Uploaded %s to CID: %s", archive.hash, cid) + + // Verify upload succeeded + exists, err := s.codexClient.HasCid(cid) + require.NoError(s.T(), err, "Failed to check CID existence for %s", archive.hash) + require.True(s.T(), exists, "CID %s should exist after upload", cid) + } + + // Create archive index for CodexArchiveDownloader + index := &protobuf.CodexWakuMessageArchiveIndex{ + Archives: make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata), + } + + for _, archive := range archives { + cid := archiveCIDs[archive.hash] + index.Archives[archive.hash] = &protobuf.CodexWakuMessageArchiveIndexMetadata{ + Cid: cid, + Metadata: &protobuf.WakuMessageArchiveMetadata{ + From: archive.from, + To: archive.to, + }, + } + } + + // upload archive index to codex + codexIndexBytes, err := proto.Marshal(index) + s.Require().NoError(err, "Failed to marshal index") + + cid, err := s.codexClient.UploadArchive(codexIndexBytes) + s.Require().NoError(err, "Failed to upload archive index to Codex") + s.Require().NotEmpty(cid, "Uploaded index CID should not be empty") + + s.T().Logf("Uploaded archive index to CID: %s", cid) + + // Now that we have both the individual archives and the index uploaded to Codex, + // we can proceed with the download workflow. + + communityID := types.HexBytes("test-community-id") + cancelChan := make(chan struct{}) + // logger, _ := zap.NewDevelopment() + + taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, cid, cancelChan) + s.Require().NoError(err, "Failed to download archives") + s.Require().NotNil(taskInfo, "Download task info should not be nil") + s.Require().Equal(len(archives), taskInfo.TotalArchivesCount, "Unexpected total archives count") + s.Require().Equal(len(archives), taskInfo.TotalDownloadedArchivesCount, "Unexpected total downloaded archives count") + s.Require().False(taskInfo.Cancelled, "Download should not be cancelled") + + s.T().Logf("Download task info: %+v", taskInfo) + + for _, archive := range archives { + exists, err := s.manager.GetPersistence().HasMessageArchiveID(communityID, archive.hash) + s.Require().NoError(err, "Failed to check archive ID %s in persistence", archive.hash) + s.Require().True(exists, "Archive hash %s should be stored in persistence", archive.hash) + } +} + +// Run the integration test suite +func TestCodexArchiveManagerSuite(t *testing.T) { + suite.Run(t, new(CodexArchiveManagerSuite)) +} diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 0005b0fe253..247c28e9465 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -97,6 +97,10 @@ func NewArchiveManager(amc *ArchiveManagerConfig) *ArchiveManager { } } +func (m *ArchiveManager) GetCodexClient() *CodexClient { + return m.codexClient +} + func (m *ArchiveManager) SetOnline(online bool) { if online { if m.torrentConfig != nil && m.torrentConfig.Enabled && !m.torrentClientStarted() { @@ -234,18 +238,8 @@ func (m *ArchiveManager) StartCodexClient() error { return m.codexClient.Start() } -func (m *ArchiveManager) Stop() error { - if m.torrentClientStarted() || m.isCodexClientStarted { - m.stopHistoryArchiveTasksIntervals() - } - +func (m *ArchiveManager) StopCodexClient() error { errs := []error{} - if m.torrentClientStarted() { - m.logger.Info("Stopping torrent client") - errs = m.torrentClient.Close() - m.torrentClient = nil - } - if m.isCodexClientStarted { m.logger.Info("Stopping codex client") @@ -270,8 +264,28 @@ func (m *ArchiveManager) Stop() error { return nil } -func (m *ArchiveManager) GetCodexClient() *CodexClient { - return m.codexClient +func (m *ArchiveManager) Stop() error { + if m.torrentClientStarted() || m.isCodexClientStarted { + m.stopHistoryArchiveTasksIntervals() + } + + errs := []error{} + if m.torrentClientStarted() { + m.logger.Info("Stopping torrent client") + errs = m.torrentClient.Close() + m.torrentClient = nil + } + + err := m.StopCodexClient() + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return errors.Join(errs...) + } + + return nil } func (m *ArchiveManager) SetCodexClient(client *CodexClient) { @@ -776,6 +790,11 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex // Create separate cancel channel for the index downloader to avoid channel competition indexDownloaderCancel := make(chan struct{}) + if err := m.ensureCodexCommunityDir(communityID); err != nil { + m.logger.Error("failed to ensure Codex archive directory", zap.String("communityID", id), zap.Error(err)) + return nil, err + } + // Create index downloader with path to index file using helper function indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) indexDownloader := NewCodexIndexDownloader(m.codexClient, indexCid, indexFilePath, indexDownloaderCancel, m.logger) diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index e95ff06420e..c065f11a8c7 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -12,6 +12,7 @@ package communities import ( "bytes" "crypto/ecdsa" + "fmt" "os" "path" "path/filepath" @@ -353,11 +354,8 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte codexWakuMessageArchiveIndex := make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata) codexArchiveIDs := make([]string, 0) - if _, err := os.Stat(codexArchiveDir); os.IsNotExist(err) { - err := os.MkdirAll(codexArchiveDir, 0700) - if err != nil { - return codexArchiveIDs, err - } + if err := m.ensureCodexCommunityDir(communityID); err != nil { + return codexArchiveIDs, err } _, err := os.Stat(codexIndexPath) @@ -559,6 +557,18 @@ func (m *ArchiveFileManager) archiveIndexFile(communityID string) string { return path.Join(m.torrentConfig.DataDir, communityID, "index") } +func (m *ArchiveFileManager) ensureCodexCommunityDir(communityID types.HexBytes) error { + if m.codexConfig == nil { + return fmt.Errorf("codex config not initialized") + } + + codexArchiveDir := m.codexHistoryArchiveDataDirPath(communityID) + if err := os.MkdirAll(codexArchiveDir, 0700); err != nil { + return fmt.Errorf("failed to create Codex archive directory %s: %w", codexArchiveDir, err) + } + return nil +} + func (m *ArchiveFileManager) codexHistoryArchiveDataDirPath(communityID types.HexBytes) string { return filepath.Join(m.codexConfig.HistoryArchiveDataDir, communityID.String()) } From bd2da9f514f22885672203a167f88de19147eaf5 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Wed, 5 Nov 2025 11:18:35 +0100 Subject: [PATCH 31/75] tests DownloadHistoryArchivesByIndexCid emits the signals as expected --- .../communities/codex_manager_archive_test.go | 73 ++++++++++++++++++- 1 file changed, 72 insertions(+), 1 deletion(-) diff --git a/protocol/communities/codex_manager_archive_test.go b/protocol/communities/codex_manager_archive_test.go index 01b8b6ae5ec..9f357c4a7d0 100644 --- a/protocol/communities/codex_manager_archive_test.go +++ b/protocol/communities/codex_manager_archive_test.go @@ -118,6 +118,9 @@ func (s *CodexArchiveManagerSuite) TearDownTest() { } func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { + // Subscribe to signals before starting the test + subscription := s.manager.Subscribe() + // Create test archive data and upload multiple archives to Codex archives := []struct { hash string @@ -186,7 +189,56 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { communityID := types.HexBytes("test-community-id") cancelChan := make(chan struct{}) - // logger, _ := zap.NewDevelopment() + + // Track received signals + receivedSignals := struct { + downloadingStarted bool + archiveDownloaded map[string]bool // hash -> received + seedingSignal bool + }{ + archiveDownloaded: make(map[string]bool), + } + + // Start goroutine to collect signals + done := make(chan struct{}) + go func() { + timeout := time.After(30 * time.Second) + for { + select { + case event := <-subscription: + if event.DownloadingHistoryArchivesStartedSignal != nil { + receivedSignals.downloadingStarted = true + s.T().Logf("Received DownloadingHistoryArchivesStartedSignal for community: %s", + event.DownloadingHistoryArchivesStartedSignal.CommunityID) + } + if event.HistoryArchiveDownloadedSignal != nil { + s.T().Logf("Received HistoryArchiveDownloadedSignal for community: %s, From: %d, To: %d", + event.HistoryArchiveDownloadedSignal.CommunityID, + event.HistoryArchiveDownloadedSignal.From, + event.HistoryArchiveDownloadedSignal.To) + // Find which archive this corresponds to + for _, archive := range archives { + if uint64(event.HistoryArchiveDownloadedSignal.From) == archive.from && + uint64(event.HistoryArchiveDownloadedSignal.To) == archive.to { + receivedSignals.archiveDownloaded[archive.hash] = true + } + } + } + if event.HistoryArchivesSeedingSignal != nil { + receivedSignals.seedingSignal = true + s.T().Logf("Received HistoryArchivesSeedingSignal for community: %s, MagnetLink: %v, IndexCid: %v", + event.HistoryArchivesSeedingSignal.CommunityID, + event.HistoryArchivesSeedingSignal.MagnetLink, + event.HistoryArchivesSeedingSignal.IndexCid) + } + case <-timeout: + close(done) + return + case <-done: + return + } + } + }() taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, cid, cancelChan) s.Require().NoError(err, "Failed to download archives") @@ -197,11 +249,30 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { s.T().Logf("Download task info: %+v", taskInfo) + // Stop the signal collection goroutine + close(done) + + // Wait a bit for any remaining signals to be processed + time.Sleep(100 * time.Millisecond) + + // Verify that archives are stored in persistence for _, archive := range archives { exists, err := s.manager.GetPersistence().HasMessageArchiveID(communityID, archive.hash) s.Require().NoError(err, "Failed to check archive ID %s in persistence", archive.hash) s.Require().True(exists, "Archive hash %s should be stored in persistence", archive.hash) } + + // Verify that all expected signals were received + s.Require().True(receivedSignals.downloadingStarted, "Should have received DownloadingHistoryArchivesStartedSignal") + s.Require().True(receivedSignals.seedingSignal, "Should have received HistoryArchivesSeedingSignal") + + // Verify that we received download signals for all archives + for _, archive := range archives { + s.Require().True(receivedSignals.archiveDownloaded[archive.hash], + "Should have received HistoryArchiveDownloadedSignal for archive %s", archive.hash) + } + + s.T().Logf("All signals verified successfully!") } // Run the integration test suite From 93225938f70937a5c6665747a51eead7af955633 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Wed, 5 Nov 2025 16:46:29 +0100 Subject: [PATCH 32/75] various bug fixes and improvements --- .../communities/codex_archive_downloader.go | 5 + ...dex_archive_downloader_integration_test.go | 7 +- protocol/communities/codex_client.go | 6 +- .../communities/codex_client_interface.go | 8 + protocol/communities/codex_client_test.go | 6 +- ...codex_index_downloader_integration_test.go | 9 +- ...codex_manager_archive_cancellation_test.go | 419 ++++++++++++++++++ .../communities/codex_manager_archive_test.go | 315 ++++++++++++- protocol/communities/codex_testutil_test.go | 2 +- protocol/communities/manager.go | 7 +- protocol/communities/manager_archive.go | 57 ++- protocol/communities/manager_archive_file.go | 24 +- protocol/communities/manager_archive_nop.go | 8 +- protocol/messenger_communities.go | 2 +- signal/events_community_archives.go | 30 ++ 15 files changed, 867 insertions(+), 38 deletions(-) create mode 100644 protocol/communities/codex_manager_archive_cancellation_test.go diff --git a/protocol/communities/codex_archive_downloader.go b/protocol/communities/codex_archive_downloader.go index bccd4527fa6..c56adb88df6 100644 --- a/protocol/communities/codex_archive_downloader.go +++ b/protocol/communities/codex_archive_downloader.go @@ -288,6 +288,11 @@ func (d *CodexArchiveDownloader) downloadAllArchives() { d.totalDownloadedArchivesCount++ d.mu.Unlock() + d.logger.Debug("archive download completed", + zap.String("cid", archiveCid), + zap.String("totalDownloadedArchivesCount", fmt.Sprintf("%d", d.totalDownloadedArchivesCount)), + ) + // Call success callback if d.onArchiveDownloaded != nil { d.onArchiveDownloaded(archiveHash, archiveFrom, archiveTo) diff --git a/protocol/communities/codex_archive_downloader_integration_test.go b/protocol/communities/codex_archive_downloader_integration_test.go index 2bef60210cd..2040d7478f3 100644 --- a/protocol/communities/codex_archive_downloader_integration_test.go +++ b/protocol/communities/codex_archive_downloader_integration_test.go @@ -1,6 +1,3 @@ -//go:build codex_integration -// +build codex_integration - package communities_test import ( @@ -25,7 +22,7 @@ import ( // against a real Codex instance type CodexArchiveDownloaderIntegrationSuite struct { suite.Suite - client communities.CodexClient + client communities.CodexClientInterface uploadedCIDs []string // Track uploaded CIDs for cleanup } @@ -107,7 +104,7 @@ func (suite *CodexArchiveDownloaderIntegrationSuite) TestFullArchiveDownloadWork logger, _ := zap.NewDevelopment() // Use development logger for integration tests downloader := communities.NewCodexArchiveDownloader( - &suite.client, + suite.client, index, communityID, existingArchiveIDs, diff --git a/protocol/communities/codex_client.go b/protocol/communities/codex_client.go index 24a04f1bfd4..afc97de4970 100644 --- a/protocol/communities/codex_client.go +++ b/protocol/communities/codex_client.go @@ -21,13 +21,13 @@ type CodexClient struct { } // NewCodexClient creates a new Codex client -func NewCodexClient(config params.CodexConfig) (CodexClient, error) { +func NewCodexClient(config params.CodexConfig) (CodexClientInterface, error) { node, err := codex.New(config.CodexNodeConfig) if err != nil { - return CodexClient{}, err + return nil, err } - return CodexClient{ + return &CodexClient{ config: config.CodexNodeConfig, node: node, enabled: config.Enabled, diff --git a/protocol/communities/codex_client_interface.go b/protocol/communities/codex_client_interface.go index 1a9598df3a0..4b41fa0f579 100644 --- a/protocol/communities/codex_client_interface.go +++ b/protocol/communities/codex_client_interface.go @@ -31,4 +31,12 @@ type CodexClientInterface interface { // CID management methods HasCid(cid string) (bool, error) RemoveCid(cid string) error + + // lifecycle management methods + Start() error + Stop() error + Destroy() error + + // logging methods + UpdateLogLevel(logLevel string) error } diff --git a/protocol/communities/codex_client_test.go b/protocol/communities/codex_client_test.go index 88c6f0f0816..d2447f60881 100644 --- a/protocol/communities/codex_client_test.go +++ b/protocol/communities/codex_client_test.go @@ -15,7 +15,7 @@ import ( "github.com/status-im/status-go/protocol/communities" ) -func upload(client communities.CodexClient, t *testing.T, buf *bytes.Buffer) string { +func upload(client communities.CodexClientInterface, t *testing.T, buf *bytes.Buffer) string { filename := "hello.txt" cid, err := client.Upload(buf, filename) if err != nil { @@ -74,8 +74,8 @@ func (suite *CodexClientTestSuite) TestDownload_Success() { } func (suite *CodexClientTestSuite) TestDownloadWithContext_Cancel() { - // skip test - // suite.T().Skip("Wait for cancellation support PR to be merged in codex-go-bindings") + // skip test - flaky + suite.T().Skip("Flaky test - needs investigation") client := NewCodexClientTest(suite.T()) diff --git a/protocol/communities/codex_index_downloader_integration_test.go b/protocol/communities/codex_index_downloader_integration_test.go index 0e9f34b2373..ffaa0ed5108 100644 --- a/protocol/communities/codex_index_downloader_integration_test.go +++ b/protocol/communities/codex_index_downloader_integration_test.go @@ -1,6 +1,3 @@ -//go:build codex_integration -// +build codex_integration - package communities_test import ( @@ -29,8 +26,6 @@ import ( type CodexIndexDownloaderIntegrationTestSuite struct { suite.Suite testDir string - host string - port string logger *zap.Logger } @@ -87,7 +82,7 @@ func (suite *CodexIndexDownloaderIntegrationTestSuite) TestIntegration_GotManife defer close(cancelChan) filePath := filepath.Join(suite.testDir, "test-index.bin") - downloader := communities.NewCodexIndexDownloader(&client, cid, filePath, cancelChan, suite.logger) + downloader := communities.NewCodexIndexDownloader(client, cid, filePath, cancelChan, suite.logger) // Test GotManifest manifestChan := downloader.GotManifest() @@ -139,7 +134,7 @@ func (suite *CodexIndexDownloaderIntegrationTestSuite) TestIntegration_DownloadI defer close(cancelChan) filePath := filepath.Join(suite.testDir, "downloaded-index.bin") - downloader := communities.NewCodexIndexDownloader(&client, cid, filePath, cancelChan, suite.logger) + downloader := communities.NewCodexIndexDownloader(client, cid, filePath, cancelChan, suite.logger) // First, get the manifest to know the expected size manifestChan := downloader.GotManifest() diff --git a/protocol/communities/codex_manager_archive_cancellation_test.go b/protocol/communities/codex_manager_archive_cancellation_test.go new file mode 100644 index 00000000000..3a7b8e8a8b6 --- /dev/null +++ b/protocol/communities/codex_manager_archive_cancellation_test.go @@ -0,0 +1,419 @@ +package communities_test + +import ( + "context" + "crypto/rand" + "io" + "path/filepath" + "testing" + "time" + + "github.com/codex-storage/codex-go-bindings/codex" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/proto" + + "github.com/status-im/status-go/appdatabase" + "github.com/status-im/status-go/crypto" + "github.com/status-im/status-go/crypto/types" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol/communities" + mock_communities "github.com/status-im/status-go/protocol/communities/mock/communities" + "github.com/status-im/status-go/protocol/protobuf" + "github.com/status-im/status-go/protocol/sqlite" + "github.com/status-im/status-go/protocol/tt" + "github.com/status-im/status-go/t/helpers" + + "github.com/stretchr/testify/suite" +) + +// MockCodexArchiveManagerSuite contains deterministic unit tests using mocked CodexClient +type MockCodexArchiveManagerSuite struct { + suite.Suite + ctrl *gomock.Controller + mockCodex *mock_communities.MockCodexClientInterface + archiveManager *communities.ArchiveManager + manager *communities.Manager +} + +func (s *MockCodexArchiveManagerSuite) buildManagers() (*communities.Manager, *communities.ArchiveManager) { + db, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{}) + s.Require().NoError(err, "creating sqlite db instance") + err = sqlite.Migrate(db) + s.Require().NoError(err, "protocol migrate") + + key, err := crypto.GenerateKey() + s.Require().NoError(err) + + logger := tt.MustCreateTestLogger() + + m, err := communities.NewManager(key, "", db, logger, nil, nil, nil, &communities.TimeSourceStub{}, nil, nil) + s.Require().NoError(err) + s.Require().NoError(m.Start()) + + rootDir := s.T().TempDir() + codexConfig := ¶ms.CodexConfig{ + Enabled: true, + HistoryArchiveDataDir: filepath.Join(rootDir, "codex", "archivedata"), + } + + amc := &communities.ArchiveManagerConfig{ + TorrentConfig: nil, + CodexConfig: codexConfig, + Logger: logger, + Persistence: m.GetPersistence(), + Messaging: nil, + Identity: key, + Publisher: m, + } + archiveManager := communities.NewArchiveManager(amc) + + return m, archiveManager +} + +func (s *MockCodexArchiveManagerSuite) SetupTest() { + s.ctrl = gomock.NewController(s.T()) + s.mockCodex = mock_communities.NewMockCodexClientInterface(s.ctrl) + + m, am := s.buildManagers() + communities.SetValidateInterval(30 * time.Millisecond) + s.manager = m + s.archiveManager = am + + // Inject the mock CodexClient into the ArchiveManager + s.archiveManager.SetCodexClient(s.mockCodex) +} + +func (s *MockCodexArchiveManagerSuite) TearDownTest() { + s.ctrl.Finish() + s.Require().NoError(s.manager.Stop()) +} + +// TestMockDownloadCancellationBeforeManifestFetch tests cancellation before manifest is fetched +// This test is 100% deterministic - we control exactly when operations complete +func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationBeforeManifestFetch() { + // Subscribe to signals + subscription := s.manager.Subscribe() + + indexCid := "test-index-cid-xyz789" + communityID := types.HexBytes("mock-cancel-test-1") + cancelChan := make(chan struct{}) + + // Mock expectations: FetchManifestWithContext will be called but should be cancelled + s.mockCodex.EXPECT(). + FetchManifestWithContext(gomock.Any(), indexCid). + DoAndReturn(func(ctx context.Context, cid string) (codex.Manifest, error) { + // Block until context is cancelled + <-ctx.Done() + return codex.Manifest{}, ctx.Err() + }). + MaxTimes(1) // May or may not be called depending on timing + + // Track signals + manifestFetchedReceived := false + signalDone := make(chan struct{}) + go func() { + timeout := time.After(5 * time.Second) + for { + select { + case event := <-subscription: + if event.ManifestFetchedSignal != nil { + manifestFetchedReceived = true + } + case <-timeout: + close(signalDone) + return + case <-signalDone: + return + } + } + }() + + // Cancel immediately before starting + close(cancelChan) + + // Set short timeout for test + s.archiveManager.SetDownloadTimeout(1 * time.Second) + + // Start download - should return immediately due to cancellation + taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) + s.Require().NoError(err) + s.Require().NotNil(taskInfo) + s.Require().True(taskInfo.Cancelled, "Download should be marked as cancelled") + s.Require().Equal(0, taskInfo.TotalDownloadedArchivesCount, "No archives should be downloaded") + + close(signalDone) + time.Sleep(50 * time.Millisecond) + + s.Require().False(manifestFetchedReceived, "ManifestFetchedSignal should not be received when cancelled early") + s.T().Logf("✓ Mock test: Early cancellation verified with zero CodexClient calls") +} + +// TestMockDownloadCancellationDuringIndexDownload tests cancellation during index download +// Uses mock to control exact timing of manifest fetch completion +func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringIndexDownload() { + subscription := s.manager.Subscribe() + + archiveData := make([]byte, 1024) + _, err := rand.Read(archiveData) + s.Require().NoError(err) + + archiveCid := "test-archive-cid-def456" + indexCid := "test-index-cid-uvw123" + + index := &protobuf.CodexWakuMessageArchiveIndex{ + Archives: map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata{ + "test-hash-large": { + Cid: archiveCid, + Metadata: &protobuf.WakuMessageArchiveMetadata{ + From: 1000, + To: 2000, + }, + }, + }, + } + + codexIndexBytes, err := proto.Marshal(index) + s.Require().NoError(err) + + communityID := types.HexBytes("mock-cancel-test-2") + cancelChan := make(chan struct{}) + + // Mock expectations: Manifest fetch succeeds, but index download never completes + manifest := codex.Manifest{ + Cid: indexCid, + DatasetSize: len(codexIndexBytes), + } + + // FetchManifestWithContext will succeed + s.mockCodex.EXPECT(). + FetchManifestWithContext(gomock.Any(), indexCid). + Return(manifest, nil). + Times(1) + + // DownloadWithContext will be called but blocked until cancelled + downloadStarted := make(chan struct{}) + s.mockCodex.EXPECT(). + DownloadWithContext(gomock.Any(), indexCid, gomock.Any()). + DoAndReturn(func(ctx context.Context, cid string, output interface{}) error { + close(downloadStarted) + // Block until context is cancelled + <-ctx.Done() + return ctx.Err() + }). + Times(1) + + // Track signals + manifestFetchedReceived := false + indexDownloadCompletedReceived := false + signalDone := make(chan struct{}) + + go func() { + timeout := time.After(10 * time.Second) + for { + select { + case event := <-subscription: + if event.ManifestFetchedSignal != nil { + manifestFetchedReceived = true + s.T().Logf("Received ManifestFetchedSignal - waiting for download to start before cancelling") + // Wait for download to actually start + <-downloadStarted + s.T().Logf("Download started, now cancelling") + close(cancelChan) + } + if event.IndexDownloadCompletedSignal != nil { + indexDownloadCompletedReceived = true + } + case <-timeout: + close(signalDone) + return + case <-signalDone: + return + } + } + }() + + // Set short timeout for test + s.archiveManager.SetDownloadTimeout(1 * time.Second) + + // Start download + taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) + s.Require().NoError(err) + s.Require().NotNil(taskInfo) + s.Require().True(taskInfo.Cancelled, "Download should be marked as cancelled") + + close(signalDone) + time.Sleep(50 * time.Millisecond) + + // Verify signals + s.Require().True(manifestFetchedReceived, "Should have received ManifestFetchedSignal") + s.Require().False(indexDownloadCompletedReceived, "Should NOT have received IndexDownloadCompletedSignal") + + s.T().Logf("✓ Mock test: Index download cancellation verified with controlled timing") +} + +// TestMockDownloadCancellationDuringArchiveDownload tests cancellation during archive downloads +// Mock allows us to control exactly when first archive completes +func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringArchiveDownload() { + subscription := s.manager.Subscribe() + + // Create multiple archives + archives := []struct { + hash string + cid string + from uint64 + to uint64 + data []byte + }{ + {"cancel-archive-1", "archive-cid-1", 1000, 2000, make([]byte, 1024)}, + {"cancel-archive-2", "archive-cid-2", 2000, 3000, make([]byte, 1024)}, + {"cancel-archive-3", "archive-cid-3", 3000, 4000, make([]byte, 1024)}, + } + + for i := range archives { + _, err := rand.Read(archives[i].data) + s.Require().NoError(err) + } + + indexCid := "test-index-cid-archive-download" + index := &protobuf.CodexWakuMessageArchiveIndex{ + Archives: make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata), + } + + for _, archive := range archives { + index.Archives[archive.hash] = &protobuf.CodexWakuMessageArchiveIndexMetadata{ + Cid: archive.cid, + Metadata: &protobuf.WakuMessageArchiveMetadata{ + From: archive.from, + To: archive.to, + }, + } + } + + codexIndexBytes, err := proto.Marshal(index) + s.Require().NoError(err) + + communityID := types.HexBytes("mock-cancel-test-3") + cancelChan := make(chan struct{}) + + // Mock expectations + manifest := codex.Manifest{ + Cid: indexCid, + DatasetSize: len(codexIndexBytes), + } + + // Manifest fetch succeeds + s.mockCodex.EXPECT(). + FetchManifestWithContext(gomock.Any(), indexCid). + Return(manifest, nil). + Times(1) + + // Index download succeeds + s.mockCodex.EXPECT(). + DownloadWithContext(gomock.Any(), indexCid, gomock.Any()). + DoAndReturn(func(ctx context.Context, cid string, output interface{}) error { + // Write the index bytes to whatever writer we receive + if w, ok := output.(io.Writer); ok { + w.Write(codexIndexBytes) + } + return nil + }). + Times(1) + + // First archive download succeeds, triggers cancellation + // firstArchiveDownloaded := make(chan struct{}) + s.mockCodex.EXPECT(). + TriggerDownloadWithContext(gomock.Any(), archives[0].cid). + DoAndReturn(func(ctx context.Context, cid string) (codex.Manifest, error) { + // close(firstArchiveDownloaded) + return codex.Manifest{Cid: cid, DatasetSize: len(archives[0].data)}, nil + }). + Times(1) + + // HasCid for first archive - called during polling after trigger succeeds + s.mockCodex.EXPECT(). + HasCid(archives[0].cid). + Return(true, nil). + AnyTimes() + + // TriggerDownloadWithContext for remaining archives + // All 3 goroutines start simultaneously, and each calls TriggerDownloadWithContext BEFORE polling. + // Archives 2 and 3 will have their triggers called, but should receive cancellation. + for i := 1; i < len(archives); i++ { + s.mockCodex.EXPECT(). + TriggerDownloadWithContext(gomock.Any(), archives[i].cid). + DoAndReturn(func(ctx context.Context, cid string) (codex.Manifest, error) { + // Block until context is cancelled + <-ctx.Done() + return codex.Manifest{}, ctx.Err() + }). + Times(1) + } + + // HasCid should NOT be called for archives 2 and 3 since their triggers will fail with cancellation + + // Track signals + downloadStartedReceived := false + indexDownloadCompletedReceived := false + archivesDownloaded := 0 + signalDone := make(chan struct{}) + + go func() { + timeout := time.After(10 * time.Second) + for { + select { + case event := <-subscription: + if event == nil { + continue + } + if event.DownloadingHistoryArchivesStartedSignal != nil { + downloadStartedReceived = true + } + if event.IndexDownloadCompletedSignal != nil { + indexDownloadCompletedReceived = true + } + if event.HistoryArchiveDownloadedSignal != nil { + archivesDownloaded++ + if archivesDownloaded == 1 { + // We received the signal, which means HasCid returned true and count was incremented. + s.T().Logf("First archive downloaded (signal received), now cancelling") + close(cancelChan) + } + } + case <-timeout: + close(signalDone) + return + case <-signalDone: + return + } + } + }() + + // Set short timeout for test + s.archiveManager.SetDownloadTimeout(1 * time.Second) + + // Start download + taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) + s.Require().NoError(err) + s.Require().NotNil(taskInfo) + s.Require().True(taskInfo.Cancelled, "Download should be marked as cancelled") + s.Require().Equal(3, taskInfo.TotalArchivesCount, "Should know total is 3 archives") + + close(signalDone) + time.Sleep(50 * time.Millisecond) + + // Verify signals + s.Require().True(downloadStartedReceived, "Should have received DownloadingHistoryArchivesStartedSignal") + s.Require().True(indexDownloadCompletedReceived, "Should have received IndexDownloadCompletedSignal") + s.Require().Equal(1, archivesDownloaded, "Should have received exactly 1 HistoryArchiveDownloadedSignal") + + // Since we received the signal, HasCid completed and the count was incremented + s.Require().Equal(1, taskInfo.TotalDownloadedArchivesCount, + "Should have downloaded exactly 1 archive (signal was received)") + + s.T().Logf("✓ Mock test: Archive download cancellation verified - downloaded %d archive(s) before cancel", archivesDownloaded) +} + +// Run the mock-based unit test suite +func TestMockCodexArchiveManagerSuite(t *testing.T) { + suite.Run(t, new(MockCodexArchiveManagerSuite)) +} diff --git a/protocol/communities/codex_manager_archive_test.go b/protocol/communities/codex_manager_archive_test.go index 9f357c4a7d0..ff4e5612a39 100644 --- a/protocol/communities/codex_manager_archive_test.go +++ b/protocol/communities/codex_manager_archive_test.go @@ -28,7 +28,7 @@ import ( type CodexArchiveManagerSuite struct { suite.Suite - codexClient *communities.CodexClient + codexClient communities.CodexClientInterface // codexConfig *params.CodexConfig archiveManager *communities.ArchiveManager manager *communities.Manager @@ -100,7 +100,9 @@ func (s *CodexArchiveManagerSuite) SetupTest() { s.manager = m s.archiveManager = t s.Require().NoError(s.archiveManager.StartCodexClient()) - s.codexClient = s.archiveManager.GetCodexClient() + client := s.archiveManager.GetCodexClient() + s.Require().NotNil(client) + s.codexClient = client } // TearDownSuite runs once after each test in the suite @@ -275,6 +277,315 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { s.T().Logf("All signals verified successfully!") } +func (s *CodexArchiveManagerSuite) TestDownloadCancellationBeforeManifestFetch() { + // Subscribe to signals + subscription := s.manager.Subscribe() + + // Create a single test archive + archiveData := make([]byte, 256) + _, err := rand.Read(archiveData) + s.Require().NoError(err, "Failed to generate random data") + + // Upload archive to Codex + archiveCid, err := s.codexClient.Upload(bytes.NewReader(archiveData), "test-archive.bin") + s.Require().NoError(err, "Failed to upload archive") + s.uploadedCIDs = append(s.uploadedCIDs, archiveCid) + + // Create and upload index + index := &protobuf.CodexWakuMessageArchiveIndex{ + Archives: map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata{ + "test-hash": { + Cid: archiveCid, + Metadata: &protobuf.WakuMessageArchiveMetadata{ + From: 1000, + To: 2000, + }, + }, + }, + } + + codexIndexBytes, err := proto.Marshal(index) + s.Require().NoError(err, "Failed to marshal index") + + indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) + s.Require().NoError(err, "Failed to upload index") + s.uploadedCIDs = append(s.uploadedCIDs, indexCid) + + communityID := types.HexBytes("cancel-test-community-1") + cancelChan := make(chan struct{}) + + // Track signals + downloadStartedReceived := false + manifestFetchedReceived := false + signalDone := make(chan struct{}) + go func() { + timeout := time.After(10 * time.Second) + for { + select { + case event := <-subscription: + if event.DownloadingHistoryArchivesStartedSignal != nil { + downloadStartedReceived = true + } + if event.ManifestFetchedSignal != nil { + manifestFetchedReceived = true + } + case <-timeout: + close(signalDone) + return + case <-signalDone: + return + } + } + }() + + // Cancel immediately before the download starts + close(cancelChan) + + taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) + s.Require().NoError(err, "Download should return without error on cancellation") + s.Require().NotNil(taskInfo, "Task info should not be nil") + s.Require().True(taskInfo.Cancelled, "Download should be marked as cancelled") + s.Require().Equal(0, taskInfo.TotalDownloadedArchivesCount, "No archives should be downloaded") + + close(signalDone) + time.Sleep(100 * time.Millisecond) + + // Verify that neither signal was received + s.Require().False(downloadStartedReceived, "DownloadingHistoryArchivesStartedSignal should not be received when cancelled early") + s.Require().False(manifestFetchedReceived, "ManifestFetchedSignal should not be received when cancelled early") + + s.T().Logf("Early cancellation test passed successfully!") +} + +func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringIndexDownload() { + // Subscribe to signals + subscription := s.manager.Subscribe() + + // Create a test archive + archiveData := make([]byte, 1024*10) // 10KB + _, err := rand.Read(archiveData) + s.Require().NoError(err, "Failed to generate random data") + + // Upload archive to Codex + archiveCid, err := s.codexClient.Upload(bytes.NewReader(archiveData), "test-archive-large.bin") + s.Require().NoError(err, "Failed to upload archive") + s.uploadedCIDs = append(s.uploadedCIDs, archiveCid) + + // Create and upload index + index := &protobuf.CodexWakuMessageArchiveIndex{ + Archives: map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata{ + "test-hash-large": { + Cid: archiveCid, + Metadata: &protobuf.WakuMessageArchiveMetadata{ + From: 1000, + To: 2000, + }, + }, + }, + } + + codexIndexBytes, err := proto.Marshal(index) + s.Require().NoError(err, "Failed to marshal index") + + indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) + s.Require().NoError(err, "Failed to upload index") + s.uploadedCIDs = append(s.uploadedCIDs, indexCid) + + communityID := types.HexBytes("cancel-test-community-2") + cancelChan := make(chan struct{}) + + // Track signals + manifestFetchedReceived := false + indexDownloadCompletedReceived := false + downloadStartedReceived := false + signalDone := make(chan struct{}) + + go func() { + timeout := time.After(10 * time.Second) + for { + select { + case event := <-subscription: + if event.ManifestFetchedSignal != nil { + manifestFetchedReceived = true + s.T().Logf("Received ManifestFetchedSignal - now cancelling during index download") + // Cancel as soon as we get the manifest (before index download completes) + close(cancelChan) + } + if event.IndexDownloadCompletedSignal != nil { + indexDownloadCompletedReceived = true + } + if event.DownloadingHistoryArchivesStartedSignal != nil { + downloadStartedReceived = true + } + case <-timeout: + close(signalDone) + return + case <-signalDone: + return + } + } + }() + + // Start download in goroutine + resultChan := make(chan struct { + taskInfo *communities.HistoryArchiveDownloadTaskInfo + err error + }, 1) + + go func() { + taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) + resultChan <- struct { + taskInfo *communities.HistoryArchiveDownloadTaskInfo + err error + }{taskInfo, err} + }() + + result := <-resultChan + s.Require().NoError(result.err, "Download should return without error on cancellation") + s.Require().NotNil(result.taskInfo, "Task info should not be nil") + s.Require().True(result.taskInfo.Cancelled, "Download should be marked as cancelled") + + close(signalDone) + time.Sleep(100 * time.Millisecond) + + // Verify signals + s.Require().True(manifestFetchedReceived, "Should have received ManifestFetchedSignal") + s.Require().False(indexDownloadCompletedReceived, "Should NOT have received IndexDownloadCompletedSignal (cancelled before completion)") + s.Require().False(downloadStartedReceived, "Should NOT have received DownloadingHistoryArchivesStartedSignal (cancelled before archives start)") + + s.T().Logf("Index download cancellation test passed! Cancelled deterministically after manifest fetch.") +} + +func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringArchiveDownload() { + // Subscribe to signals + subscription := s.manager.Subscribe() + + // Create multiple test archives + archives := []struct { + hash string + from uint64 + to uint64 + data []byte + }{ + {"cancel-archive-1", 1000, 2000, make([]byte, 1024*5)}, // 5KB + {"cancel-archive-2", 2000, 3000, make([]byte, 1024*5)}, + {"cancel-archive-3", 3000, 4000, make([]byte, 1024*5)}, + } + + // Generate and upload archives + archiveCIDs := make(map[string]string) + for i := range archives { + _, err := rand.Read(archives[i].data) + s.Require().NoError(err, "Failed to generate random data") + + cid, err := s.codexClient.Upload(bytes.NewReader(archives[i].data), archives[i].hash+".bin") + s.Require().NoError(err, "Failed to upload archive") + archiveCIDs[archives[i].hash] = cid + s.uploadedCIDs = append(s.uploadedCIDs, cid) + } + + // Create and upload index + index := &protobuf.CodexWakuMessageArchiveIndex{ + Archives: make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata), + } + + for _, archive := range archives { + index.Archives[archive.hash] = &protobuf.CodexWakuMessageArchiveIndexMetadata{ + Cid: archiveCIDs[archive.hash], + Metadata: &protobuf.WakuMessageArchiveMetadata{ + From: archive.from, + To: archive.to, + }, + } + } + + codexIndexBytes, err := proto.Marshal(index) + s.Require().NoError(err, "Failed to marshal index") + + indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) + s.Require().NoError(err, "Failed to upload index") + s.uploadedCIDs = append(s.uploadedCIDs, indexCid) + + communityID := types.HexBytes("cancel-test-community-3") + cancelChan := make(chan struct{}) + + // Track signals + downloadStartedReceived := false + indexDownloadCompletedReceived := false + archivesDownloaded := 0 + signalDone := make(chan struct{}) + + go func() { + timeout := time.After(15 * time.Second) + for { + select { + case event := <-subscription: + if event.DownloadingHistoryArchivesStartedSignal != nil { + downloadStartedReceived = true + s.T().Logf("Received DownloadingHistoryArchivesStartedSignal") + } + if event.IndexDownloadCompletedSignal != nil { + indexDownloadCompletedReceived = true + s.T().Logf("Received IndexDownloadCompletedSignal - waiting for first archive download before cancelling") + } + if event.HistoryArchiveDownloadedSignal != nil { + archivesDownloaded++ + s.T().Logf("Received HistoryArchiveDownloadedSignal (%d archives downloaded so far)", archivesDownloaded) + // Cancel after the first archive is downloaded + if archivesDownloaded == 1 { + s.T().Logf("Cancelling after first archive download") + close(cancelChan) + } + } + case <-timeout: + close(signalDone) + return + case <-signalDone: + return + } + } + }() + + // Start download in goroutine + resultChan := make(chan struct { + taskInfo *communities.HistoryArchiveDownloadTaskInfo + err error + }, 1) + + go func() { + taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) + resultChan <- struct { + taskInfo *communities.HistoryArchiveDownloadTaskInfo + err error + }{taskInfo, err} + }() + + result := <-resultChan + s.Require().NoError(result.err, "Download should return without error on cancellation") + s.Require().NotNil(result.taskInfo, "Task info should not be nil") + s.Require().True(result.taskInfo.Cancelled, "Download should be marked as cancelled") + + close(signalDone) + time.Sleep(100 * time.Millisecond) + + // Verify signals + s.Require().True(downloadStartedReceived, "Should have received DownloadingHistoryArchivesStartedSignal") + s.Require().True(indexDownloadCompletedReceived, "Should have received IndexDownloadCompletedSignal") + s.Require().GreaterOrEqual(archivesDownloaded, 1, "Should have downloaded at least 1 archive before cancellation (via signals)") + + s.T().Logf("Archive download cancellation test passed! Cancelled deterministically after downloading %d archive(s)", archivesDownloaded) + s.T().Logf("Task info: TotalArchivesCount=%d, TotalDownloadedArchivesCount=%d, Cancelled=%v", + result.taskInfo.TotalArchivesCount, + result.taskInfo.TotalDownloadedArchivesCount, + result.taskInfo.Cancelled) + + // Note: Due to parallel downloads, the TotalDownloadedArchivesCount in taskInfo might not match + // the number of signals received because cancellation can happen while downloads are in-flight. + // The important thing is that we successfully cancelled based on a signal and the Cancelled flag is set. + s.T().Logf("Signals received: %d archives downloaded, TaskInfo reports: %d archives", + archivesDownloaded, result.taskInfo.TotalDownloadedArchivesCount) +} + // Run the integration test suite func TestCodexArchiveManagerSuite(t *testing.T) { suite.Run(t, new(CodexArchiveManagerSuite)) diff --git a/protocol/communities/codex_testutil_test.go b/protocol/communities/codex_testutil_test.go index fa2cb2f633e..03db7613b83 100644 --- a/protocol/communities/codex_testutil_test.go +++ b/protocol/communities/codex_testutil_test.go @@ -10,7 +10,7 @@ import ( "github.com/status-im/status-go/protocol/communities" ) -func NewCodexClientTest(t *testing.T) communities.CodexClient { +func NewCodexClientTest(t *testing.T) communities.CodexClientInterface { client, err := communities.NewCodexClient(params.CodexConfig{ Enabled: true, HistoryArchiveDataDir: filepath.Join(t.TempDir(), "codex", "archivedata"), diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index 45cc2935434..ec3bc8cb0ce 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -213,8 +213,8 @@ type ArchiveService interface { SetCodexConfig(*params.CodexConfig) StartTorrentClient() error StartCodexClient() error - SetCodexClient(client *CodexClient) - GetCodexClient() *CodexClient + SetCodexClient(client CodexClientInterface) + GetCodexClient() CodexClientInterface Stop() error IsReady() bool GetCommunityChatsFilters(communityID types.HexBytes) (messagingtypes.ChatFilters, error) @@ -227,6 +227,7 @@ type ArchiveService interface { UnseedHistoryArchiveTorrent(communityID types.HexBytes) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) IsSeedingHistoryArchiveTorrent(communityID types.HexBytes) bool + IsSeedingHistoryArchiveCodex(communityID types.HexBytes) bool GetHistoryArchiveDownloadTask(communityID string) *HistoryArchiveDownloadTask AddHistoryArchiveDownloadTask(communityID string, task *HistoryArchiveDownloadTask) DownloadHistoryArchivesByMagnetlink(communityID types.HexBytes, magnetlink string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) @@ -517,6 +518,8 @@ type Subscription struct { DownloadingHistoryArchivesStartedSignal *signal.DownloadingHistoryArchivesStartedSignal DownloadingHistoryArchivesFinishedSignal *signal.DownloadingHistoryArchivesFinishedSignal ImportingHistoryArchiveMessagesSignal *signal.ImportingHistoryArchiveMessagesSignal + ManifestFetchedSignal *signal.ManifestFetchedSignal + IndexDownloadCompletedSignal *signal.IndexDownloadCompletedSignal CommunityEventsMessage *CommunityEventsMessage AcceptedRequestsToJoin []types.HexBytes RejectedRequestsToJoin []types.HexBytes diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 247c28e9465..e04640c7993 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -60,8 +60,9 @@ type ArchiveManager struct { torrentConfig *params.TorrentConfig torrentClient *torrent.Client codexConfig *params.CodexConfig - codexClient *CodexClient + codexClient CodexClientInterface isCodexClientStarted bool + downloadTimeout time.Duration // timeout for archive downloads, defaults to 20s torrentTasks map[string]metainfo.Hash historyArchiveDownloadTasks map[string]*HistoryArchiveDownloadTask historyArchiveTasksWaitGroup sync.WaitGroup @@ -84,6 +85,7 @@ func NewArchiveManager(amc *ArchiveManagerConfig) *ArchiveManager { return &ArchiveManager{ torrentConfig: amc.TorrentConfig, codexConfig: amc.CodexConfig, + downloadTimeout: 20 * time.Second, torrentTasks: make(map[string]metainfo.Hash), historyArchiveDownloadTasks: make(map[string]*HistoryArchiveDownloadTask), @@ -97,7 +99,7 @@ func NewArchiveManager(amc *ArchiveManagerConfig) *ArchiveManager { } } -func (m *ArchiveManager) GetCodexClient() *CodexClient { +func (m *ArchiveManager) GetCodexClient() CodexClientInterface { return m.codexClient } @@ -231,8 +233,8 @@ func (m *ArchiveManager) StartCodexClient() error { if err != nil { return err } - m.codexClient = &client - m.ArchiveFileManager.codexClient = &client + m.codexClient = client + m.ArchiveFileManager.codexClient = client m.isCodexClientStarted = true return m.codexClient.Start() @@ -288,12 +290,16 @@ func (m *ArchiveManager) Stop() error { return nil } -func (m *ArchiveManager) SetCodexClient(client *CodexClient) { +func (m *ArchiveManager) SetCodexClient(client CodexClientInterface) { m.codexClient = client m.ArchiveFileManager.codexClient = client m.isCodexClientStarted = true } +func (m *ArchiveManager) SetDownloadTimeout(timeout time.Duration) { + m.downloadTimeout = timeout +} + func (m *ArchiveManager) torrentClientStarted() bool { return m.torrentClient != nil } @@ -585,6 +591,11 @@ func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes if err != nil { m.logger.Error("failed to remove CID from Codex", zap.Error(err)) } + + err = m.removeCodexIndexCidFile(communityID) + if err != nil { + m.logger.Error("failed to remove local index file", zap.Error(err)) + } } func (m *ArchiveManager) IsSeedingHistoryArchiveTorrent(communityID types.HexBytes) bool { @@ -594,6 +605,23 @@ func (m *ArchiveManager) IsSeedingHistoryArchiveTorrent(communityID types.HexByt return ok && torrent.Seeding() } +func (m *ArchiveManager) IsSeedingHistoryArchiveCodex(communityID types.HexBytes) bool { + if m.CodexIndexCidFileExists(communityID) { + cid, err := m.GetHistoryArchiveIndexCid(communityID) + if err != nil { + m.logger.Debug("failed to read Codex index CID", zap.String("communityID", communityID.String()), zap.Error(err)) + return false + } + hasCid, err := m.codexClient.HasCid(cid) + if err != nil { + m.logger.Debug("failed to verify Codex CID availability", zap.String("communityID", communityID.String()), zap.String("cid", cid), zap.Error(err)) + return false + } + return hasCid + } + return false +} + func (m *ArchiveManager) GetHistoryArchiveDownloadTask(communityID string) *HistoryArchiveDownloadTask { return m.historyArchiveDownloadTasks[communityID] } @@ -785,7 +813,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex Cancelled: false, } - timeout := time.After(20 * time.Second) + timeout := time.After(m.downloadTimeout) // Create separate cancel channel for the index downloader to avoid channel competition indexDownloaderCancel := make(chan struct{}) @@ -820,6 +848,14 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex return nil, fmt.Errorf("failed to fetch Codex manifest for CID %s: %w", indexCid, err) } + // Publish manifest fetched signal + m.publisher.publish(&Subscription{ + ManifestFetchedSignal: &signal.ManifestFetchedSignal{ + CommunityID: communityID.String(), + IndexCid: indexCid, + }, + }) + // Start downloading the index file indexDownloader.DownloadIndexFile() @@ -844,6 +880,14 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex if indexDownloader.IsDownloadComplete() { + // Publish index download completed signal + m.publisher.publish(&Subscription{ + IndexDownloadCompletedSignal: &signal.IndexDownloadCompletedSignal{ + CommunityID: communityID.String(), + IndexCid: indexCid, + }, + }) + err := m.writeCodexIndexCidToFile(communityID, indexCid) if err != nil { m.logger.Error("failed to write Codex index CID to file", zap.Error(err)) @@ -911,6 +955,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex case <-cancelTask: m.logger.Debug("cancelled downloading individual archives") close(archiveDownloaderCancel) + downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() downloadTaskInfo.Cancelled = true return downloadTaskInfo, nil case <-archiveTicker.C: diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index c065f11a8c7..a03dcd5a5a6 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -12,6 +12,7 @@ package communities import ( "bytes" "crypto/ecdsa" + "errors" "fmt" "os" "path" @@ -35,7 +36,7 @@ import ( type ArchiveFileManager struct { torrentConfig *params.TorrentConfig codexConfig *params.CodexConfig - codexClient *CodexClient + codexClient CodexClientInterface logger *zap.Logger persistence *Persistence identity *ecdsa.PrivateKey @@ -56,7 +57,7 @@ func NewArchiveFileManager(amc *ArchiveManagerConfig) *ArchiveFileManager { } } -func (m *ArchiveFileManager) SetCodexClient(codexClient *CodexClient) { +func (m *ArchiveFileManager) SetCodexClient(codexClient CodexClientInterface) { m.codexClient = codexClient } @@ -593,7 +594,20 @@ func (m *ArchiveFileManager) readCodexIndexFromFile(communityID types.HexBytes) func (m *ArchiveFileManager) removeCodexIndexFile(communityID types.HexBytes) error { indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) - return os.Remove(indexFilePath) + err := os.Remove(indexFilePath) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + return nil +} + +func (m *ArchiveFileManager) removeCodexIndexCidFile(communityID types.HexBytes) error { + indexCidFilePath := m.codexHistoryArchiveIndexCidFilePath(communityID) + err := os.Remove(indexCidFilePath) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + return nil } func (m *ArchiveFileManager) writeCodexIndexCidToFile(communityID types.HexBytes, cid string) error { @@ -681,9 +695,7 @@ func (m *ArchiveFileManager) GetHistoryArchiveMagnetlink(communityID types.HexBy } func (m *ArchiveFileManager) GetHistoryArchiveIndexCid(communityID types.HexBytes) (string, error) { - codexIndexCidPath := m.codexHistoryArchiveIndexCidFilePath(communityID) - - cidData, err := os.ReadFile(codexIndexCidPath) + cidData, err := m.readCodexIndexCidFromFile(communityID) if err != nil { return "", err } diff --git a/protocol/communities/manager_archive_nop.go b/protocol/communities/manager_archive_nop.go index 13775e70146..e0fd954ea44 100644 --- a/protocol/communities/manager_archive_nop.go +++ b/protocol/communities/manager_archive_nop.go @@ -39,11 +39,11 @@ func (tmm *ArchiveManagerNop) StartCodexClient() error { return nil } -func (tmm *ArchiveManagerNop) GetCodexClient() *CodexClient { +func (tmm *ArchiveManagerNop) GetCodexClient() CodexClientInterface { return nil } -func (tmm *ArchiveManagerNop) SetCodexClient(client *CodexClient) {} +func (tmm *ArchiveManagerNop) SetCodexClient(client CodexClientInterface) {} func (tmm *ArchiveManagerNop) Stop() error { return nil @@ -86,6 +86,10 @@ func (tmm *ArchiveManagerNop) IsSeedingHistoryArchiveTorrent(communityID types.H return false } +func (tmm *ArchiveManagerNop) IsSeedingHistoryArchiveCodex(communityID types.HexBytes) bool { + return false +} + func (tmm *ArchiveManagerNop) GetHistoryArchiveDownloadTask(communityID string) *HistoryArchiveDownloadTask { return nil } diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index abc5fb941d9..946e30e5784 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -2844,7 +2844,7 @@ func (m *Messenger) EditCommunity(request *requests.EditCommunity) (*MessengerRe if m.archiveManager.IsReady() { if !communitySettings.HistoryArchiveSupportEnabled { m.archiveManager.StopHistoryArchiveTasksInterval(id) - } else if !m.archiveManager.IsSeedingHistoryArchiveTorrent(id) { + } else if !m.archiveManager.IsSeedingHistoryArchiveTorrent(id) && !m.archiveManager.IsSeedingHistoryArchiveCodex(id) { var communities []*communities.Community communities = append(communities, community) go m.InitHistoryArchiveTasks(communities) diff --git a/signal/events_community_archives.go b/signal/events_community_archives.go index 9541524dea2..d49067b452e 100644 --- a/signal/events_community_archives.go +++ b/signal/events_community_archives.go @@ -36,6 +36,12 @@ const ( // EventDownloadingHistoryArchivesFinished is triggered when the community member node // has downloaded all archives EventDownloadingHistoryArchivesFinished = "community.downloadingHistoryArchivesFinished" + // EventManifestFetched is triggered when the community member node + // has successfully fetched the manifest for an archive index + EventManifestFetched = "community.manifestFetched" + // EventIndexDownloadCompleted is triggered when the community member node + // has completed downloading the archive index file + EventIndexDownloadCompleted = "community.indexDownloadCompleted" ) type CreatingHistoryArchivesSignal struct { @@ -82,6 +88,16 @@ type DownloadingHistoryArchivesFinishedSignal struct { CommunityID string `json:"communityId"` } +type ManifestFetchedSignal struct { + CommunityID string `json:"communityId"` + IndexCid string `json:"indexCid"` +} + +type IndexDownloadCompletedSignal struct { + CommunityID string `json:"communityId"` + IndexCid string `json:"indexCid"` +} + func SendHistoryArchivesProtocolEnabled() { send(EventHistoryArchivesProtocolEnabled, nil) } @@ -147,3 +163,17 @@ func SendDownloadingHistoryArchivesFinished(communityID string) { CommunityID: communityID, }) } + +func SendManifestFetched(communityID string, indexCid string) { + send(EventManifestFetched, ManifestFetchedSignal{ + CommunityID: communityID, + IndexCid: indexCid, + }) +} + +func SendIndexDownloadCompleted(communityID string, indexCid string) { + send(EventIndexDownloadCompleted, IndexDownloadCompletedSignal{ + CommunityID: communityID, + IndexCid: indexCid, + }) +} From 2cc4f52973b9219761df2d949746ba346ddc3b12 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 02:16:04 +0100 Subject: [PATCH 33/75] add proper launch config for debugging --- .vscode/launch.json | 100 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 .vscode/launch.json diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000000..06cf9287b86 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,100 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Debug Current Test", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${fileDirname}", + "args": ["-test.v", "-test.run", "^${selectedText}$"], + "buildFlags": "-tags=gowaku_no_rln,gowaku_skip_migrations", + "env": { + "CGO_ENABLED": "1" + } + }, + { + "name": "Debug Current Test Function", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${fileDirname}", + "buildFlags": "-tags=gowaku_no_rln,gowaku_skip_migrations", + "env": { + "CGO_ENABLED": "1" + } + }, + { + "name": "Debug All Tests in Current File", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${fileDirname}", + "args": ["-test.v"], + "buildFlags": "-tags=gowaku_no_rln,gowaku_skip_migrations", + "env": { + "CGO_ENABLED": "1" + } + }, + { + "name": "Debug All Tests in Current Package", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${fileDirname}", + "args": ["-test.v", "-count=1"], + "buildFlags": "-tags=gowaku_no_rln,gowaku_skip_migrations", + "env": { + "CGO_ENABLED": "1" + } + }, + { + "name": "Debug Specific Test Suite (e.g., TestMessengerCommunitiesTokenPermissionsSuite)", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${workspaceFolder}/protocol", + "args": [ + "-test.v", + "-test.run", + "TestMessengerCommunitiesTokenPermissionsSuite" + ], + "buildFlags": "-tags=gowaku_no_rln,gowaku_skip_migrations", + "env": { + "CGO_ENABLED": "1" + } + }, + { + "name": "Debug Specific Test Case", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${workspaceFolder}/protocol", + "args": [ + "-test.v", + "-test.run", + "TestMessengerCommunitiesTokenPermissionsSuite/TestFullCodexIntegration", + "-count=1" + ], + "buildFlags": "-tags=gowaku_no_rln,gowaku_skip_migrations", + "env": { + "CGO_ENABLED": "1" + } + }, + { + "name": "Debug Mock Tests (Codex)", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${workspaceFolder}/protocol/communities", + "args": ["-test.v", "-test.run", "TestMock"], + "buildFlags": "-tags=gowaku_no_rln,gowaku_skip_migrations", + "env": { + "CGO_ENABLED": "1" + } + } + ] +} From 35b086b556362c2cf94607a41cfd1241b8201424 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 02:17:48 +0100 Subject: [PATCH 34/75] add more assertions for archive downloading --- .../communities/codex_manager_archive_test.go | 39 ++++++++++++++++--- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/protocol/communities/codex_manager_archive_test.go b/protocol/communities/codex_manager_archive_test.go index ff4e5612a39..add918aa266 100644 --- a/protocol/communities/codex_manager_archive_test.go +++ b/protocol/communities/codex_manager_archive_test.go @@ -2,6 +2,7 @@ package communities_test import ( "bytes" + "crypto/ecdsa" "crypto/rand" "encoding/hex" "path/filepath" @@ -28,11 +29,11 @@ import ( type CodexArchiveManagerSuite struct { suite.Suite - codexClient communities.CodexClientInterface - // codexConfig *params.CodexConfig + codexClient communities.CodexClientInterface archiveManager *communities.ArchiveManager manager *communities.Manager - uploadedCIDs []string // Track uploaded CIDs for cleanup + identity *ecdsa.PrivateKey // Store identity for test access + uploadedCIDs []string // Track uploaded CIDs for cleanup } func buildCodexConfig(t *testing.T) *params.CodexConfig { @@ -49,7 +50,7 @@ func buildCodexConfig(t *testing.T) *params.CodexConfig { } } -func (s *CodexArchiveManagerSuite) buildManagers() (*communities.Manager, *communities.ArchiveManager) { +func (s *CodexArchiveManagerSuite) buildManagers() (*communities.Manager, *communities.ArchiveManager, *ecdsa.PrivateKey) { db, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{}) s.Require().NoError(err, "creating sqlite db instance") err = sqlite.Migrate(db) @@ -76,7 +77,7 @@ func (s *CodexArchiveManagerSuite) buildManagers() (*communities.Manager, *commu t := communities.NewArchiveManager(amc) s.Require().NoError(err) - return m, t + return m, t, key } func (s *CodexArchiveManagerSuite) CreateCommunity() *communities.Community { @@ -95,10 +96,11 @@ func (s *CodexArchiveManagerSuite) CreateCommunity() *communities.Community { // SetupSuite runs once before all tests in the suite func (s *CodexArchiveManagerSuite) SetupTest() { - m, t := s.buildManagers() + m, t, key := s.buildManagers() communities.SetValidateInterval(30 * time.Millisecond) s.manager = m s.archiveManager = t + s.identity = key s.Require().NoError(s.archiveManager.StartCodexClient()) client := s.archiveManager.GetCodexClient() s.Require().NotNil(client) @@ -275,6 +277,31 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { } s.T().Logf("All signals verified successfully!") + + // Verify that the index file exists and has correct content + loadedIndex, err := s.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.identity, communityID) + s.Require().NoError(err, "Failed to load index file from disk") + s.Require().NotNil(loadedIndex, "Loaded index should not be nil") + s.Require().Equal(len(archives), len(loadedIndex.Archives), "Loaded index should contain all archives") + + // Verify each archive in the loaded index matches the original + for _, archive := range archives { + loadedMetadata, exists := loadedIndex.Archives[archive.hash] + s.Require().True(exists, "Archive %s should exist in loaded index", archive.hash) + s.Require().NotNil(loadedMetadata, "Archive metadata should not be nil for %s", archive.hash) + s.Require().Equal(archiveCIDs[archive.hash], loadedMetadata.Cid, "CID should match for archive %s", archive.hash) + s.Require().Equal(archive.from, loadedMetadata.Metadata.From, "From timestamp should match for archive %s", archive.hash) + s.Require().Equal(archive.to, loadedMetadata.Metadata.To, "To timestamp should match for archive %s", archive.hash) + } + + s.T().Logf("Index file content verified successfully!") + + // Verify that the CID file exists and contains the correct CID + storedCid, err := s.archiveManager.GetHistoryArchiveIndexCid(communityID) + s.Require().NoError(err, "Failed to read CID file") + s.Require().Equal(cid, storedCid, "Stored CID should match the uploaded index CID") + + s.T().Logf("CID file content verified successfully! CID: %s", storedCid) } func (s *CodexArchiveManagerSuite) TestDownloadCancellationBeforeManifestFetch() { From 65e66b353c1f601d74fc30b15d579e15ae2f5f07 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 02:21:27 +0100 Subject: [PATCH 35/75] make start/stop of CodexClient more thread safe and ensure uniqueness of the discovery UDP port --- protocol/communities/manager_archive.go | 97 ++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 4 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index e04640c7993..c46b60d3a67 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -62,6 +62,7 @@ type ArchiveManager struct { codexConfig *params.CodexConfig codexClient CodexClientInterface isCodexClientStarted bool + codexClientMu sync.RWMutex downloadTimeout time.Duration // timeout for archive downloads, defaults to 20s torrentTasks map[string]metainfo.Hash historyArchiveDownloadTasks map[string]*HistoryArchiveDownloadTask @@ -100,11 +101,17 @@ func NewArchiveManager(amc *ArchiveManagerConfig) *ArchiveManager { } func (m *ArchiveManager) GetCodexClient() CodexClientInterface { + m.codexClientMu.RLock() + defer m.codexClientMu.RUnlock() return m.codexClient } func (m *ArchiveManager) SetOnline(online bool) { if online { + m.codexClientMu.RLock() + codexStarted := m.isCodexClientStarted + m.codexClientMu.RUnlock() + if m.torrentConfig != nil && m.torrentConfig.Enabled && !m.torrentClientStarted() { err := m.StartTorrentClient() if err != nil { @@ -112,7 +119,7 @@ func (m *ArchiveManager) SetOnline(online bool) { } } - if m.codexConfig != nil && m.codexConfig.Enabled && !m.isCodexClientStarted { + if m.codexConfig != nil && m.codexConfig.Enabled && !codexStarted { err := m.StartCodexClient() if err != nil { m.logger.Error("couldn't start codex client", zap.Error(err)) @@ -140,7 +147,7 @@ func (m *ArchiveManager) getTCPandUDPport(portNumber int) (int, error) { } // Find free port - for i := 0; i < 10; i++ { + for range 10 { port := func() int { tcpAddr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort("localhost", "0")) if err != nil { @@ -181,6 +188,60 @@ func (m *ArchiveManager) getTCPandUDPport(portNumber int) (int, error) { return 0, fmt.Errorf("no free port found") } +func (m *ArchiveManager) getFreeUDPPort() (int, error) { + udpAddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort("localhost", "0")) + if err != nil { + return 0, err + } + + udpListener, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return 0, err + } + defer udpListener.Close() + + return udpListener.LocalAddr().(*net.UDPAddr).Port, nil +} + +func (m *ArchiveManager) ensureCodexDiscoveryPort(config *params.CodexConfig) error { + checkPortAvailable := func(port int) (bool, error) { + addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort("localhost", fmt.Sprintf("%d", port))) + if err != nil { + return false, err + } + // Attempt to listen on the port; if it succeeds, it's available. + conn, err := net.ListenUDP("udp", addr) + if err != nil { + return false, nil + } + _ = conn.Close() + return true, nil + } + + port := config.CodexNodeConfig.DiscoveryPort + if port != 0 { + available, err := checkPortAvailable(port) + if err != nil { + return err + } + if available { + return nil + } + m.logger.Warn("codex discovery port already in use, selecting a free one", zap.Int("port", port)) + } + + for range 10 { + freePort, err := m.getFreeUDPPort() + if err != nil { + continue + } + config.CodexNodeConfig.DiscoveryPort = freePort + return nil + } + + return fmt.Errorf("no free discovery port found for codex") +} + func (m *ArchiveManager) StartTorrentClient() error { if m.torrentConfig == nil { return fmt.Errorf("can't start torrent client: missing torrentConfig") @@ -220,6 +281,9 @@ func (m *ArchiveManager) StartTorrentClient() error { } func (m *ArchiveManager) StartCodexClient() error { + m.codexClientMu.Lock() + defer m.codexClientMu.Unlock() + if m.codexConfig == nil { return fmt.Errorf("can't start codex client: missing codexConfig") } @@ -229,18 +293,36 @@ func (m *ArchiveManager) StartCodexClient() error { } var err error - client, err := NewCodexClient(*m.codexConfig) + cfgCopy := *m.codexConfig + cfgCopy.CodexNodeConfig = m.codexConfig.CodexNodeConfig + + if err := m.ensureCodexDiscoveryPort(&cfgCopy); err != nil { + return err + } + + client, err := NewCodexClient(cfgCopy) if err != nil { return err } m.codexClient = client m.ArchiveFileManager.codexClient = client m.isCodexClientStarted = true + m.codexConfig.CodexNodeConfig.DiscoveryPort = cfgCopy.CodexNodeConfig.DiscoveryPort - return m.codexClient.Start() + if err := m.codexClient.Start(); err != nil { + m.isCodexClientStarted = false + m.codexClient = nil + m.ArchiveFileManager.codexClient = nil + return err + } + + return nil } func (m *ArchiveManager) StopCodexClient() error { + m.codexClientMu.Lock() + defer m.codexClientMu.Unlock() + errs := []error{} if m.isCodexClientStarted { m.logger.Info("Stopping codex client") @@ -257,6 +339,7 @@ func (m *ArchiveManager) StopCodexClient() error { m.isCodexClientStarted = false m.codexClient = nil + m.ArchiveFileManager.codexClient = nil } if len(errs) > 0 { @@ -291,6 +374,9 @@ func (m *ArchiveManager) Stop() error { } func (m *ArchiveManager) SetCodexClient(client CodexClientInterface) { + m.codexClientMu.Lock() + defer m.codexClientMu.Unlock() + m.codexClient = client m.ArchiveFileManager.codexClient = client m.isCodexClientStarted = true @@ -305,6 +391,9 @@ func (m *ArchiveManager) torrentClientStarted() bool { } func (m *ArchiveManager) IsReady() bool { + m.codexClientMu.RLock() + defer m.codexClientMu.RUnlock() + // Simply checking for `torrentConfig.Enabled` or `codexConfig.Enabled` // isn't enough as there's a possibility that the torrent client or the // codex client couldn't be instantiated (for example in case of port conflicts) From dccdd7df379acc69f357d1708776830c87fd75f6 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 02:30:43 +0100 Subject: [PATCH 36/75] widens the integration tests so that full downloadAndImportCodexHistoryArchives is used --- ...nities_messenger_token_permissions_test.go | 544 +++++++++++++++++- 1 file changed, 538 insertions(+), 6 deletions(-) diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index 4f2f22f8a51..4958e12ab96 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -162,6 +162,7 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) defaultNodeCfg(tempDir strin // false is default, but being explicit here for clarity nodeCfg.CodexConfig.Enabled = false + nodeCfg.CodexConfig.CodexNodeConfig.Nat = "none" nodeCfg.TorrentConfig.Enabled = false nodeCfg.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference @@ -182,15 +183,15 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) SetupTest() { s.nodeConfigs = make(map[string]*params.NodeConfig) - ownerNodeConfig := s.defaultNodeCfg(filepath.Join(os.TempDir(), "owner_"+uuid.NewString())) + ownerNodeConfig := s.defaultNodeCfg(filepath.Join(s.T().TempDir(), "owner")) s.owner = s.newMessenger(ownerPassword, []string{ownerAddress}, "owner", []Option{}, ownerNodeConfig) s.nodeConfigs[s.owner.IdentityPublicKeyString()] = ownerNodeConfig - bobNodeConfig := s.defaultNodeCfg(filepath.Join(os.TempDir(), "bob_"+uuid.NewString())) + bobNodeConfig := s.defaultNodeCfg(filepath.Join(s.T().TempDir(), "bob")) s.bob = s.newMessenger(bobPassword, []string{bobAddress}, "bob", []Option{}, bobNodeConfig) s.nodeConfigs[s.bob.IdentityPublicKeyString()] = bobNodeConfig - aliceNodeConfig := s.defaultNodeCfg(filepath.Join(os.TempDir(), "alice_"+uuid.NewString())) + aliceNodeConfig := s.defaultNodeCfg(filepath.Join(s.T().TempDir(), "alice")) s.alice = s.newMessenger(alicePassword, []string{aliceAddress1, aliceAddress2}, "alice", []Option{}, aliceNodeConfig) s.nodeConfigs[s.alice.IdentityPublicKeyString()] = aliceNodeConfig @@ -2380,9 +2381,9 @@ func PrintArchiveIndex(index *protobuf.CodexWakuMessageArchiveIndex) { } func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArchiveMessages() { - - archiveDataDir := filepath.Join(os.TempDir(), "codex", "archivedata") - codexDataDir := filepath.Join(os.TempDir(), "codex", "codexdata") + dataDir := s.T().TempDir() + archiveDataDir := filepath.Join(dataDir, "codex", "archivedata") + codexDataDir := filepath.Join(dataDir, "codex", "codexdata") log.Println("Data directory:", archiveDataDir) @@ -2615,6 +2616,526 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch s.Require().Equal(messageText1, receivedMessage1.Text) } +func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration() { + + dataDir := s.T().TempDir() + archiveDataDir := filepath.Join(dataDir, "codex", "archivedata") + codexDataDir := filepath.Join(dataDir, "codex", "codexdata") + + log.Println("Data directory:", archiveDataDir) + + codexConfig := params.CodexConfig{ + Enabled: false, + HistoryArchiveDataDir: archiveDataDir, + CodexNodeConfig: codex.Config{ + DataDir: codexDataDir, + BlockRetries: 10, + LogLevel: "ERROR", + LogFormat: codex.LogFormatNoColors, + Nat: "none", + }, + } + + // Share archive directory between all users + // so that bob can access owner's created archive + s.owner.archiveManager.SetCodexConfig(&codexConfig) + s.bob.archiveManager.SetCodexConfig(&codexConfig) + + err := s.owner.archiveManager.StartCodexClient() + s.Require().NoError(err) + codexClient := s.owner.archiveManager.GetCodexClient() + s.Require().NotNil(codexClient) + // no need to stop codex client, as it will be stopped during messenger Stop + // defer codexClient.Stop() //nolint: errcheck + + s.bob.archiveManager.SetCodexClient(codexClient) + + // 1.1. Create community + community, chat := s.createCommunity() + + archiveDistributionPreferenceOwner, err := s.owner.GetArchiveDistributionPreference() + s.Require().NoError(err) + log.Println("Archive distribution preference for owner:", archiveDistributionPreferenceOwner) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceOwner) + + archiveDistributionPreferenceBob, err := s.bob.GetArchiveDistributionPreference() + s.Require().NoError(err) + log.Println("Archive distribution preference for bob:", archiveDistributionPreferenceBob) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceBob) + + // 1.2. Setup permissions + communityPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_BECOME_MEMBER, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "100000000000000000000", + Decimals: uint64(18), + }, + }, + } + + channelPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_CAN_VIEW_AND_POST_CHANNEL, + ChatIds: []string{chat.ID}, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "200000000000000000000", + Decimals: uint64(18), + }, + }, + } + + waitOnChannelKeyAdded := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeyAdd { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.owner.identity.PublicKey)] + return ok + }) + + waitOnCommunityPermissionCreated := waitOnCommunitiesEvent(s.owner, func(sub *communities.Subscription) bool { + return len(sub.Community.TokenPermissions()) == 2 + }) + + response, err := s.owner.CreateCommunityTokenPermission(communityPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + response, err = s.owner.CreateCommunityTokenPermission(channelPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + community = response.Communities()[0] + s.Require().True(community.HasTokenPermissions()) + s.Require().Len(community.TokenPermissions(), 2) + + err = <-waitOnCommunityPermissionCreated + s.Require().NoError(err) + s.Require().True(community.Encrypted()) + + err = <-waitOnChannelKeyAdded + s.Require().NoError(err) + + // 2. Owner: Send a message A + messageText1 := RandomLettersString(10) + message1 := s.sendChatMessage(s.owner, chat.ID, messageText1) + + // 2.2. Retrieve own message (to make it stored in the archive later) + _, err = s.owner.RetrieveAll() + s.Require().NoError(err) + + log.Println("Message sent with ID:", message1.ID) + + // 3. Owner: Create community archive + const partition = 2 * time.Minute + messageDate := time.UnixMilli(int64(message1.Timestamp)) + startDate := messageDate.Add(-time.Minute) + endDate := messageDate.Add(time.Minute) + topic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(chat.ID)) + communityCommonTopic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(community.UniversalChatID())) + topics := []messagingtypes.ContentTopic{topic, communityCommonTopic} + + s.owner.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + s.bob.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + + archiveIDs, err := s.owner.archiveManager.CreateHistoryArchiveCodexFromDB(community.ID(), topics, startDate, endDate, partition, community.Encrypted()) + s.Require().NoError(err) + s.Require().Len(archiveIDs, 1) + + community, err = s.owner.GetCommunityByID(community.ID()) + s.Require().NoError(err) + + // 4. Bob: join community (satisfying membership, but not channel permissions) + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, communityPermission.TokenCriteria[0]) + s.advertiseCommunityTo(community, s.bob) + + waitForKeysDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action := sub.keyActions.CommunityKeyAction + if action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok := action.Members[s.bob.IdentityPublicKeyString()] + return ok + }) + + s.joinCommunity(community, s.bob) + + err = <-waitForKeysDistributedToBob + s.Require().NoError(err) + + // 5. Bob: Import community archive + // The archive is successfully decrypted, but the message inside is not. + // https://github.com/status-im/status-desktop/issues/13105 can be reproduced at this stage + // by forcing `encryption.ErrHashRatchetGroupIDNotFound` in `ExtractMessagesFromHistoryArchive` after decryption here: + // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4403 + + // Ensure owner has archive + archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) + s.Require().NoError(err) + s.Require().Len(archiveIndex.Archives, 1) + + PrintArchiveIndex(archiveIndex) + + indexCid, err := s.owner.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + s.Require().NoError(err) + + // log + s.T().Logf("Codex archive OWNER index CID: %s", indexCid) + + // Let's trigger actual download from codex - archive will be overwritten + cancelChan := make(chan struct{}) + defer close(cancelChan) + s.bob.importDelayer.once.Do(func() { + close(s.bob.importDelayer.wait) + }) + + s.bob.downloadAndImportCodexHistoryArchives(community.ID(), indexCid, cancelChan) + s.Require().NoError(err) + + // Ensure message1 wasn't imported, as it's encrypted, and we don't have access to the channel + receivedMessage1, err := s.bob.MessageByID(message1.ID) + s.Require().Nil(receivedMessage1) + s.Require().Error(err) + + chatID := []byte(chat.ID) + hashRatchetMessagesCount, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) + s.Require().NoError(err) + s.Require().Equal(1, hashRatchetMessagesCount) + + // Make bob satisfy channel criteria + waitOnChannelKeyToBeDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.bob.identity.PublicKey)] + return ok + }) + + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, channelPermission.TokenCriteria[0]) + + // force owner to reevaluate channel members + // in production it will happen automatically, by periodic check + err = s.owner.communitiesManager.ForceMembersReevaluation(community.ID()) + s.Require().NoError(err) + + err = <-waitOnChannelKeyToBeDistributedToBob + s.Require().NoError(err) + + // Finally ensure that the message from archive was retrieved and decrypted + + // NOTE: In theory a single RetrieveAll call should be enough, + // because we immediately process all hash ratchet messages + response, err = s.bob.RetrieveAll() + s.Require().NoError(err) + s.Require().Len(response.Messages(), 1) + + receivedMessage1, ok := response.messages[message1.ID] + log.Printf("Received message: %+v, ok: %v", receivedMessage1, ok) + s.Require().True(ok) + s.Require().Equal(messageText1, receivedMessage1.Text) +} + +func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration2() { + // skip for now - WIP + s.T().Skip("WIP") + + dataDir := s.T().TempDir() + archiveDataDir := filepath.Join(dataDir, "codex", "archivedata") + codexDataDir := filepath.Join(dataDir, "codex", "codexdata") + + log.Println("Data directory:", archiveDataDir) + + codexConfig := params.CodexConfig{ + Enabled: false, + HistoryArchiveDataDir: archiveDataDir, + CodexNodeConfig: codex.Config{ + DataDir: codexDataDir, + BlockRetries: 10, + LogLevel: "ERROR", + LogFormat: codex.LogFormatNoColors, + Nat: "none", + }, + } + + // Share archive directory between all users + // so that bob can access owner's created archive + s.owner.archiveManager.SetCodexConfig(&codexConfig) + s.bob.archiveManager.SetCodexConfig(&codexConfig) + + err := s.owner.archiveManager.StartCodexClient() + s.Require().NoError(err) + codexClient := s.owner.archiveManager.GetCodexClient() + s.Require().NotNil(codexClient) + // no need to stop codex client, as it will be stopped during messenger Stop + // defer codexClient.Stop() //nolint: errcheck + + s.bob.archiveManager.SetCodexClient(codexClient) + + // 1.1. Create community + community, chat := s.createCommunity() + + archiveDistributionPreferenceOwner, err := s.owner.GetArchiveDistributionPreference() + s.Require().NoError(err) + log.Println("Archive distribution preference for owner:", archiveDistributionPreferenceOwner) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceOwner) + + archiveDistributionPreferenceBob, err := s.bob.GetArchiveDistributionPreference() + s.Require().NoError(err) + log.Println("Archive distribution preference for bob:", archiveDistributionPreferenceBob) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceBob) + + // 1.2. Setup permissions + communityPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_BECOME_MEMBER, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "100000000000000000000", + Decimals: uint64(18), + }, + }, + } + + channelPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_CAN_VIEW_AND_POST_CHANNEL, + ChatIds: []string{chat.ID}, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "200000000000000000000", + Decimals: uint64(18), + }, + }, + } + + waitOnChannelKeyAdded := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeyAdd { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.owner.identity.PublicKey)] + return ok + }) + + waitOnCommunityPermissionCreated := waitOnCommunitiesEvent(s.owner, func(sub *communities.Subscription) bool { + return len(sub.Community.TokenPermissions()) == 2 + }) + + response, err := s.owner.CreateCommunityTokenPermission(communityPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + response, err = s.owner.CreateCommunityTokenPermission(channelPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + community = response.Communities()[0] + s.Require().True(community.HasTokenPermissions()) + s.Require().Len(community.TokenPermissions(), 2) + + err = <-waitOnCommunityPermissionCreated + s.Require().NoError(err) + s.Require().True(community.Encrypted()) + + err = <-waitOnChannelKeyAdded + s.Require().NoError(err) + + // 2. Owner: Send a message A + messageText1 := RandomLettersString(10) + message1 := s.sendChatMessage(s.owner, chat.ID, messageText1) + + // 2.2. Retrieve own message (to make it stored in the archive later) + _, err = s.owner.RetrieveAll() + s.Require().NoError(err) + + log.Println("Message sent with ID:", message1.ID) + + // 3. Owner: Create community archive + const partition = 2 * time.Minute + messageDate := time.UnixMilli(int64(message1.Timestamp)) + startDate := messageDate.Add(-time.Minute) + endDate := messageDate.Add(time.Minute) + topic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(chat.ID)) + communityCommonTopic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(community.UniversalChatID())) + topics := []messagingtypes.ContentTopic{topic, communityCommonTopic} + + s.owner.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + s.bob.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + + archiveIDs, err := s.owner.archiveManager.CreateHistoryArchiveCodexFromDB(community.ID(), topics, startDate, endDate, partition, community.Encrypted()) + s.Require().NoError(err) + s.Require().Len(archiveIDs, 1) + + community, err = s.owner.GetCommunityByID(community.ID()) + s.Require().NoError(err) + + // 4. Bob: join community (satisfying membership, but not channel permissions) + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, communityPermission.TokenCriteria[0]) + s.advertiseCommunityTo(community, s.bob) + + waitForKeysDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action := sub.keyActions.CommunityKeyAction + if action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok := action.Members[s.bob.IdentityPublicKeyString()] + return ok + }) + + s.joinCommunity(community, s.bob) + + err = <-waitForKeysDistributedToBob + s.Require().NoError(err) + + // 5. Bob: Import community archive + // The archive is successfully decrypted, but the message inside is not. + // https://github.com/status-im/status-desktop/issues/13105 can be reproduced at this stage + // by forcing `encryption.ErrHashRatchetGroupIDNotFound` in `ExtractMessagesFromHistoryArchive` after decryption here: + // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4403 + + // Ensure owner has archive + archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) + s.Require().NoError(err) + s.Require().Len(archiveIndex.Archives, 1) + + PrintArchiveIndex(archiveIndex) + + indexCid, err := s.owner.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + s.Require().NoError(err) + + // log + s.T().Logf("Codex archive OWNER index CID: %s", indexCid) + + // Ensure bob has archive (because they share same local directory) + // Let's trigger actual download from codex - archive will be overwritten + cancelChan := make(chan struct{}) + defer close(cancelChan) + // taskInfo, err := s.bob.archiveManager.DownloadHistoryArchivesByIndexCid(community.ID(), indexCid, cancelChan) + // we need to cancel the delayer, otherwise importHistoryArchives will wait for 5 minutes + // before starting the import + s.bob.importDelayer.once.Do(func() { + close(s.bob.importDelayer.wait) + }) + + currentMessageState := &CurrentMessageState{} + + state := &ReceivedMessageState{ + Response: &MessengerResponse{}, + Timesource: s.bob.getTimesource(), + CurrentMessageState: currentMessageState, + ExistingMessagesMap: map[string]bool{}, + AllChats: s.bob.allChats, + } + + state.CurrentMessageState.PublicKey = &community.PrivateKey().PublicKey + + // message := &protobuf.CommunityMessageArchiveIndexCid{ + // Clock: 1000, + // Cid: indexCid, + // } + + // err = s.bob.communitiesManager.SaveCommunitySettings(communities.CommunitySettings{ + // CommunityID: community.IDString(), + // HistoryArchiveSupportEnabled: true, + // }) + // s.Require().NoError(err) + + settings, err := s.bob.communitiesManager.GetCommunitySettingsByID(community.ID()) + s.Require().NoError(err) + + s.T().Logf("Bob community settings before download: %+v", settings) + + // s.bob.HandleCommunityMessageArchiveIndexCid(state, message, nil) + s.bob.downloadAndImportCodexHistoryArchives(community.ID(), indexCid, cancelChan) + s.Require().NoError(err) + + // // s.T().Logf("Codex download task info: %+v", taskInfo) + + // archiveIndex, err = s.bob.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.bob.identity, community.ID()) + // s.Require().NoError(err) + // s.Require().Len(archiveIndex.Archives, 1) + + // PrintArchiveIndex(archiveIndex) + + // archiveHash := maps.Keys(archiveIndex.Archives)[0] + + // Save message archive ID as in + // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4325-L4336 + // err = s.bob.archiveManager.SaveMessageArchiveID(community.ID(), archiveHash) + // s.Require().NoError(err) + + // Import archive + // s.bob.importDelayer.once.Do(func() { + // close(s.bob.importDelayer.wait) + // }) + // cancel := make(chan struct{}) + // err = s.bob.importHistoryArchives(community.ID(), cancel) + // s.Require().NoError(err) + + // Ensure message1 wasn't imported, as it's encrypted, and we don't have access to the channel + receivedMessage1, err := s.bob.MessageByID(message1.ID) + s.Require().Nil(receivedMessage1) + s.Require().Error(err) + + chatID := []byte(chat.ID) + hashRatchetMessagesCount, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) + s.Require().NoError(err) + s.Require().Equal(1, hashRatchetMessagesCount) + + // Make bob satisfy channel criteria + waitOnChannelKeyToBeDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.bob.identity.PublicKey)] + return ok + }) + + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, channelPermission.TokenCriteria[0]) + + // force owner to reevaluate channel members + // in production it will happen automatically, by periodic check + err = s.owner.communitiesManager.ForceMembersReevaluation(community.ID()) + s.Require().NoError(err) + + err = <-waitOnChannelKeyToBeDistributedToBob + s.Require().NoError(err) + + // Finally ensure that the message from archive was retrieved and decrypted + + // NOTE: In theory a single RetrieveAll call should be enough, + // because we immediately process all hash ratchet messages + response, err = s.bob.RetrieveAll() + s.Require().NoError(err) + s.Require().Len(response.Messages(), 1) + + receivedMessage1, ok := response.messages[message1.ID] + log.Printf("Received message: %+v, ok: %v", receivedMessage1, ok) + s.Require().True(ok) + s.Require().Equal(messageText1, receivedMessage1.Text) +} + func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabase() { // The messengers used in the tests in this suite use the helper newTestMessenger (protocol/messenger_builder_test.go). In the config setup (config.complete), tmc.nodeConfig defaults to an empty params.NodeConfig{} unless the test overrides it. The default params.NodeConfig zero-value has all nested configs (including CodexConfig.Enabled) set to false. @@ -2642,6 +3163,17 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas s.Assert().False(ownerNodeCfgFromDB.CodexConfig.Enabled) s.Assert().False(bobNodeCfgFromDB.CodexConfig.Enabled) + s.owner.EnableCommunityHistoryArchiveProtocol() + s.bob.EnableCommunityHistoryArchiveProtocol() + + ownerNodeCfgFromDB2, err := s.owner.settings.GetNodeConfig() + s.Require().NoError(err) + bobNodeCfgFromDB2, err := s.bob.settings.GetNodeConfig() + s.Require().NoError(err) + + s.Assert().True(ownerNodeCfgFromDB2.CodexConfig.Enabled) + s.Assert().True(bobNodeCfgFromDB2.CodexConfig.Enabled) + // s.owner.settings.SaveSetting("node-config", ownerNodeCfgFromDB) } From 9d5eb345dc782c1932e8735d8d9fad67293e1fe5 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 05:38:03 +0100 Subject: [PATCH 37/75] more selective readiness control (bug fix) and bigger integration test (two nodes) --- protocol/communities/codex_client.go | 12 + .../communities/codex_client_interface.go | 5 + protocol/communities/manager.go | 2 + protocol/communities/manager_archive.go | 26 +- ...nities_messenger_token_permissions_test.go | 227 +++++++++++++++++- protocol/messenger_communities.go | 4 +- .../messenger_communities_import_discord.go | 134 ++++++----- protocol/messenger_handler.go | 8 +- 8 files changed, 351 insertions(+), 67 deletions(-) diff --git a/protocol/communities/codex_client.go b/protocol/communities/codex_client.go index afc97de4970..dbf5d026c27 100644 --- a/protocol/communities/codex_client.go +++ b/protocol/communities/codex_client.go @@ -135,3 +135,15 @@ func (c *CodexClient) UploadArchive(encodedArchive []byte) (string, error) { func (c *CodexClient) DownloadManifest(cid string) (codex.Manifest, error) { return c.node.DownloadManifest(cid) } + +func (c *CodexClient) PeerId() (string, error) { + return c.node.PeerId() +} + +func (c *CodexClient) Debug() (codex.DebugInfo, error) { + return c.node.Debug() +} + +func (c *CodexClient) Connect(peerId string, peerAddresses []string) error { + return c.node.Connect(peerId, peerAddresses) +} diff --git a/protocol/communities/codex_client_interface.go b/protocol/communities/codex_client_interface.go index 4b41fa0f579..bc02d122f7a 100644 --- a/protocol/communities/codex_client_interface.go +++ b/protocol/communities/codex_client_interface.go @@ -37,6 +37,11 @@ type CodexClientInterface interface { Stop() error Destroy() error + // Peer Management methods + PeerId() (string, error) + Debug() (codex.DebugInfo, error) + Connect(peerId string, peerAddresses []string) error + // logging methods UpdateLogLevel(logLevel string) error } diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index ec3bc8cb0ce..a14cbb65579 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -217,6 +217,8 @@ type ArchiveService interface { GetCodexClient() CodexClientInterface Stop() error IsReady() bool + IsTorrentReady() bool + IsCodexReady() bool GetCommunityChatsFilters(communityID types.HexBytes) (messagingtypes.ChatFilters, error) GetCommunityChatsTopics(communityID types.HexBytes) ([]messagingtypes.ContentTopic, error) GetHistoryArchivePartitionStartTimestamp(communityID types.HexBytes) (uint64, error) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index c46b60d3a67..2816b349197 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -390,15 +390,31 @@ func (m *ArchiveManager) torrentClientStarted() bool { return m.torrentClient != nil } +func (m *ArchiveManager) IsTorrentReady() bool { + m.codexClientMu.RLock() + defer m.codexClientMu.RUnlock() + + // Simply checking for `torrentConfig.Enabled` + // isn't enough as there's a possibility that the torrent client + // couldn't be instantiated (for example in case of port conflicts) + return m.torrentConfig != nil && m.torrentConfig.Enabled && m.torrentClientStarted() +} + +func (m *ArchiveManager) IsCodexReady() bool { + m.codexClientMu.RLock() + defer m.codexClientMu.RUnlock() + + // Simply checking for `codexConfig.Enabled` + // isn't enough as there's a possibility that the codex client + // couldn't be instantiated (for example in case of port conflicts) + return m.codexConfig != nil && m.codexConfig.Enabled && m.isCodexClientStarted +} + func (m *ArchiveManager) IsReady() bool { m.codexClientMu.RLock() defer m.codexClientMu.RUnlock() - // Simply checking for `torrentConfig.Enabled` or `codexConfig.Enabled` - // isn't enough as there's a possibility that the torrent client or the - // codex client couldn't be instantiated (for example in case of port conflicts) - return (m.torrentConfig != nil && m.torrentConfig.Enabled && m.torrentClientStarted() || - (m.codexConfig != nil && m.codexConfig.Enabled && m.isCodexClientStarted)) + return m.IsTorrentReady() || m.IsCodexReady() } func (m *ArchiveManager) GetCommunityChatsFilters(communityID types.HexBytes) (messagingtypes.ChatFilters, error) { diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index 4958e12ab96..20284ff545c 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -163,6 +163,7 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) defaultNodeCfg(tempDir strin // false is default, but being explicit here for clarity nodeCfg.CodexConfig.Enabled = false nodeCfg.CodexConfig.CodexNodeConfig.Nat = "none" + // nodeCfg.CodexConfig.CodexNodeConfig.LogLevel = "TRACE" nodeCfg.TorrentConfig.Enabled = false nodeCfg.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference @@ -3174,7 +3175,231 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas s.Assert().True(ownerNodeCfgFromDB2.CodexConfig.Enabled) s.Assert().True(bobNodeCfgFromDB2.CodexConfig.Enabled) - // s.owner.settings.SaveSetting("node-config", ownerNodeCfgFromDB) + // get codex client for owner - cast to concrete type + ownerCodexClient := s.owner.archiveManager.GetCodexClient() + s.Require().NotNil(ownerCodexClient) + + // get PeerId of the owner: + ownerInfo, err := ownerCodexClient.Debug() + s.Require().NoError(err) + s.Require().NotNil(ownerInfo) + + bobCodexClient := s.bob.archiveManager.GetCodexClient() + s.Require().NotNil(bobCodexClient) + + err = bobCodexClient.Connect(ownerInfo.ID, ownerInfo.Addrs) + s.Require().NoError(err) + + // 1.1. Create community + community, chat := s.createCommunity() + + archiveDistributionPreferenceOwner, err := s.owner.GetArchiveDistributionPreference() + s.Require().NoError(err) + log.Println("Archive distribution preference for owner:", archiveDistributionPreferenceOwner) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceOwner) + + archiveDistributionPreferenceBob, err := s.bob.GetArchiveDistributionPreference() + s.Require().NoError(err) + log.Println("Archive distribution preference for bob:", archiveDistributionPreferenceBob) + s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceBob) + + // 1.2. Setup permissions + communityPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_BECOME_MEMBER, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "100000000000000000000", + Decimals: uint64(18), + }, + }, + } + + channelPermission := &requests.CreateCommunityTokenPermission{ + CommunityID: community.ID(), + Type: protobuf.CommunityTokenPermission_CAN_VIEW_AND_POST_CHANNEL, + ChatIds: []string{chat.ID}, + TokenCriteria: []*protobuf.TokenCriteria{ + { + Type: protobuf.CommunityTokenType_ERC20, + ContractAddresses: map[uint64]string{testChainID1: "0x124"}, + Symbol: "TEST2", + AmountInWei: "200000000000000000000", + Decimals: uint64(18), + }, + }, + } + + waitOnChannelKeyAdded := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeyAdd { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.owner.identity.PublicKey)] + return ok + }) + + waitOnCommunityPermissionCreated := waitOnCommunitiesEvent(s.owner, func(sub *communities.Subscription) bool { + return len(sub.Community.TokenPermissions()) == 2 + }) + + response, err := s.owner.CreateCommunityTokenPermission(communityPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + response, err = s.owner.CreateCommunityTokenPermission(channelPermission) + s.Require().NoError(err) + s.Require().NotNil(response) + s.Require().Len(response.Communities(), 1) + + community = response.Communities()[0] + s.Require().True(community.HasTokenPermissions()) + s.Require().Len(community.TokenPermissions(), 2) + + err = <-waitOnCommunityPermissionCreated + s.Require().NoError(err) + s.Require().True(community.Encrypted()) + + err = <-waitOnChannelKeyAdded + s.Require().NoError(err) + + // 2. Owner: Send a message A + messageText1 := RandomLettersString(10) + message1 := s.sendChatMessage(s.owner, chat.ID, messageText1) + + // 2.2. Retrieve own message (to make it stored in the archive later) + _, err = s.owner.RetrieveAll() + s.Require().NoError(err) + + log.Println("Message sent with ID:", message1.ID) + + // 3. Owner: Create community archive + const partition = 2 * time.Minute + messageDate := time.UnixMilli(int64(message1.Timestamp)) + startDate := messageDate.Add(-time.Minute) + endDate := messageDate.Add(time.Minute) + topic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(chat.ID)) + communityCommonTopic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(community.UniversalChatID())) + topics := []messagingtypes.ContentTopic{topic, communityCommonTopic} + + s.owner.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + s.bob.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} + + // this will create archive and push it to codex + archiveIDs, err := s.owner.archiveManager.CreateHistoryArchiveCodexFromDB(community.ID(), topics, startDate, endDate, partition, community.Encrypted()) + s.Require().NoError(err) + s.Require().Len(archiveIDs, 1) + + // Ensure owner has archive + archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) + s.Require().NoError(err) + s.Require().Len(archiveIndex.Archives, 1) + + PrintArchiveIndex(archiveIndex) + + indexCid, err := s.owner.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + s.Require().NoError(err) + + // log + s.T().Logf("Codex archive OWNER index CID: %s", indexCid) + + community, err = s.owner.GetCommunityByID(community.ID()) + s.Require().NoError(err) + + // 4. Bob: join community (satisfying membership, but not channel permissions) + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, communityPermission.TokenCriteria[0]) + s.advertiseCommunityTo(community, s.bob) + + waitForKeysDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action := sub.keyActions.CommunityKeyAction + if action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok := action.Members[s.bob.IdentityPublicKeyString()] + return ok + }) + + s.joinCommunity(community, s.bob) + + err = <-waitForKeysDistributedToBob + s.Require().NoError(err) + + // 5. Bob: Import community archive + // The archive is successfully decrypted, but the message inside is not. + // https://github.com/status-im/status-desktop/issues/13105 can be reproduced at this stage + // by forcing `encryption.ErrHashRatchetGroupIDNotFound` in `ExtractMessagesFromHistoryArchive` after decryption here: + // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4403 + + // Let's trigger actual download from bob's codex node + cancelChan := make(chan struct{}) + defer close(cancelChan) + s.bob.importDelayer.once.Do(func() { + close(s.bob.importDelayer.wait) + }) + + s.bob.downloadAndImportCodexHistoryArchives(community.ID(), indexCid, cancelChan) + s.Require().NoError(err) + + // Ensure owner has archive + archiveIndex, err = s.bob.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.bob.identity, community.ID()) + s.Require().NoError(err) + s.Require().Len(archiveIndex.Archives, 1) + + PrintArchiveIndex(archiveIndex) + + indexCid, err = s.bob.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + s.Require().NoError(err) + + // log + s.T().Logf("Codex archive BOB index CID: %s", indexCid) + + // Ensure message1 wasn't imported, as it's encrypted, and we don't have access to the channel + receivedMessage1, err := s.bob.MessageByID(message1.ID) + s.Require().Nil(receivedMessage1) + s.Require().Error(err) + + chatID := []byte(chat.ID) + hashRatchetMessagesCount, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) + s.Require().NoError(err) + s.Require().Equal(1, hashRatchetMessagesCount) + + // Make bob satisfy channel criteria + waitOnChannelKeyToBeDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { + action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] + if !ok || action.ActionType != communities.EncryptionKeySendToMembers { + return false + } + _, ok = action.Members[crypto.PubkeyToHex(&s.bob.identity.PublicKey)] + return ok + }) + + s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, channelPermission.TokenCriteria[0]) + + // force owner to reevaluate channel members + // in production it will happen automatically, by periodic check + err = s.owner.communitiesManager.ForceMembersReevaluation(community.ID()) + s.Require().NoError(err) + + err = <-waitOnChannelKeyToBeDistributedToBob + s.Require().NoError(err) + + // Finally ensure that the message from archive was retrieved and decrypted + + // NOTE: In theory a single RetrieveAll call should be enough, + // because we immediately process all hash ratchet messages + response, err = s.bob.RetrieveAll() + s.Require().NoError(err) + s.Require().Len(response.Messages(), 1) + + receivedMessage1, ok := response.messages[message1.ID] + log.Printf("Received message: %+v, ok: %v", receivedMessage1, ok) + s.Require().True(ok) + s.Require().Equal(messageText1, receivedMessage1.Text) + } func (s *MessengerCommunitiesTokenPermissionsSuite) TestDeleteChannelWithTokenPermission() { diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 946e30e5784..2c7d190e894 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -1990,7 +1990,7 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ } // The purpose of this torrent code is to get the 'magnetlink' to populate 'requestToJoinResponseProto.MagnetUri' - if m.archiveManager.IsReady() && m.archiveManager.TorrentFileExists(community.IDString()) { + if m.archiveManager.IsTorrentReady() && m.archiveManager.TorrentFileExists(community.IDString()) { magnetlink, err := m.archiveManager.GetHistoryArchiveMagnetlink(community.ID()) if err != nil { m.logger.Warn("couldn't get magnet link for community", zap.Error(err)) @@ -1999,7 +1999,7 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ requestToJoinResponseProto.MagnetUri = magnetlink } - if m.archiveManager.IsReady() && m.archiveManager.CodexIndexCidFileExists(community.ID()) { + if m.archiveManager.IsCodexReady() && m.archiveManager.CodexIndexCidFileExists(community.ID()) { cid, err := m.archiveManager.GetHistoryArchiveIndexCid(community.ID()) if err != nil { m.logger.Warn("couldn't get codex index cid for community", zap.Error(err)) diff --git a/protocol/messenger_communities_import_discord.go b/protocol/messenger_communities_import_discord.go index ae439d14072..88bf24e9248 100644 --- a/protocol/messenger_communities_import_discord.go +++ b/protocol/messenger_communities_import_discord.go @@ -18,6 +18,7 @@ import ( "github.com/status-im/status-go/crypto/types" "github.com/status-im/status-go/images" messagingtypes "github.com/status-im/status-go/messaging/types" + "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/common" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/discord" @@ -963,33 +964,42 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC archiveTorrentCreatedSuccessfully := true archiveCodexCreatedSuccessfully := true - _, err = m.archiveManager.CreateHistoryArchiveTorrentFromMessages( - request.CommunityID, - wakuMessages, - topics, - startDate, - endDate, - messageArchiveInterval, - community.Encrypted(), - ) + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() if err != nil { - m.logger.Error("failed to create history archive torrent", zap.Error(err)) - archiveTorrentCreatedSuccessfully = false - } - - // codex extension - _, err = m.archiveManager.CreateHistoryArchiveCodexFromMessages( - request.CommunityID, - wakuMessages, - topics, - startDate, - endDate, - messageArchiveInterval, - community.Encrypted(), - ) - if err != nil { - m.logger.Error("failed to create history archive codex", zap.Error(err)) - archiveCodexCreatedSuccessfully = false + m.logger.Error("failed to get archive distribution preference", zap.Error(err)) + continue + } + + if archiveDistributionPreference == params.ArchiveDistributionMethodTorrent { + _, err = m.archiveManager.CreateHistoryArchiveTorrentFromMessages( + request.CommunityID, + wakuMessages, + topics, + startDate, + endDate, + messageArchiveInterval, + community.Encrypted(), + ) + if err != nil { + m.logger.Error("failed to create history archive torrent", zap.Error(err)) + archiveTorrentCreatedSuccessfully = false + } + } + + if archiveDistributionPreference == params.ArchiveDistributionMethodCodex { + _, err = m.archiveManager.CreateHistoryArchiveCodexFromMessages( + request.CommunityID, + wakuMessages, + topics, + startDate, + endDate, + messageArchiveInterval, + community.Encrypted(), + ) + if err != nil { + m.logger.Error("failed to create history archive codex", zap.Error(err)) + archiveCodexCreatedSuccessfully = false + } } if !archiveTorrentCreatedSuccessfully && !archiveCodexCreatedSuccessfully { @@ -1001,12 +1011,15 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC m.logger.Error("Failed to get community settings", zap.Error(err)) continue } - if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { + if m.archiveManager.IsTorrentReady() && communitySettings.HistoryArchiveSupportEnabled { err = m.archiveManager.SeedHistoryArchiveTorrent(request.CommunityID) if err != nil { m.logger.Error("failed to seed history archive", zap.Error(err)) } + } + + if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { go m.archiveManager.StartHistoryArchiveTasksInterval(community, messageArchiveInterval) } } @@ -1758,45 +1771,56 @@ func (m *Messenger) RequestImportDiscordCommunity(request *requests.ImportDiscor archiveTorrentCreatedSuccessfully := true archiveCodexCreatedSuccessfully := true - _, err = m.archiveManager.CreateHistoryArchiveTorrentFromMessages( - discordCommunity.ID(), - wakuMessages, - topics, - startDate, - endDate, - messageArchiveInterval, - discordCommunity.Encrypted(), - ) - if err != nil { - m.logger.Error("failed to create history archive torrent", zap.Error(err)) - archiveTorrentCreatedSuccessfully = false - } - - // codex extension - _, err = m.archiveManager.CreateHistoryArchiveCodexFromMessages( - discordCommunity.ID(), - wakuMessages, - topics, - startDate, - endDate, - messageArchiveInterval, - discordCommunity.Encrypted(), - ) + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() if err != nil { - m.logger.Error("failed to create history archive codex", zap.Error(err)) - archiveCodexCreatedSuccessfully = false + m.logger.Error("failed to get archive distribution preference", zap.Error(err)) + continue + } + + if archiveDistributionPreference == params.ArchiveDistributionMethodTorrent { + _, err = m.archiveManager.CreateHistoryArchiveTorrentFromMessages( + discordCommunity.ID(), + wakuMessages, + topics, + startDate, + endDate, + messageArchiveInterval, + discordCommunity.Encrypted(), + ) + if err != nil { + m.logger.Error("failed to create history archive torrent", zap.Error(err)) + archiveTorrentCreatedSuccessfully = false + } + } + + if archiveDistributionPreference == params.ArchiveDistributionMethodCodex { + _, err = m.archiveManager.CreateHistoryArchiveCodexFromMessages( + discordCommunity.ID(), + wakuMessages, + topics, + startDate, + endDate, + messageArchiveInterval, + discordCommunity.Encrypted(), + ) + if err != nil { + m.logger.Error("failed to create history archive codex", zap.Error(err)) + archiveCodexCreatedSuccessfully = false + } } if !archiveTorrentCreatedSuccessfully && !archiveCodexCreatedSuccessfully { continue } - if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { - + if m.archiveManager.IsTorrentReady() && communitySettings.HistoryArchiveSupportEnabled { err = m.archiveManager.SeedHistoryArchiveTorrent(discordCommunity.ID()) if err != nil { m.logger.Error("failed to seed history archive", zap.Error(err)) } + } + + if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { go m.archiveManager.StartHistoryArchiveTasksInterval(discordCommunity, messageArchiveInterval) } } diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index 9e15f68d254..8109649e643 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -1258,7 +1258,7 @@ func (m *Messenger) HandleHistoryArchiveMagnetlinkMessage(state *ReceivedMessage return nil } - if m.archiveManager.IsReady() && settings.HistoryArchiveSupportEnabled { + if m.archiveManager.IsTorrentReady() && settings.HistoryArchiveSupportEnabled { lastMagnetlinkClock, err := m.communitiesManager.GetMagnetlinkMessageClock(id) if err != nil { return err @@ -1336,7 +1336,7 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt return nil } - if m.archiveManager.IsReady() && settings.HistoryArchiveSupportEnabled { + if m.archiveManager.IsCodexReady() && settings.HistoryArchiveSupportEnabled { lastIndexCidClock, err := m.communitiesManager.GetIndexCidMessageClock(id) if err != nil { return err @@ -1776,7 +1776,7 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS } magnetlink := requestToJoinResponseProto.MagnetUri - if m.archiveManager.IsReady() && communitySettings != nil && communitySettings.HistoryArchiveSupportEnabled && magnetlink != "" { + if m.archiveManager.IsTorrentReady() && communitySettings != nil && communitySettings.HistoryArchiveSupportEnabled && magnetlink != "" { currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(community.IDString()) go func(currentTask *communities.HistoryArchiveDownloadTask) { @@ -1805,7 +1805,7 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS } cid := requestToJoinResponseProto.IndexCid - if m.archiveManager.IsReady() && communitySettings != nil && communitySettings.HistoryArchiveSupportEnabled && cid != "" { + if m.archiveManager.IsCodexReady() && communitySettings != nil && communitySettings.HistoryArchiveSupportEnabled && cid != "" { currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(community.IDString()) go func(currentTask *communities.HistoryArchiveDownloadTask) { From 51cd825c3a224506dbc6e631d5989889957c89ad Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 06:28:27 +0100 Subject: [PATCH 38/75] respect distribution preference in InitHistoryArchiveTasks and CreateAndSeedHistoryArchive --- protocol/communities/manager_archive.go | 43 +++++++++++++++++-------- protocol/messenger_communities.go | 31 +++++++++++++----- 2 files changed, 52 insertions(+), 22 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 2816b349197..d4eec70acab 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -503,24 +503,39 @@ func (m *ArchiveManager) GetHistoryArchivePartitionStartTimestamp(communityID ty } func (m *ArchiveManager) CreateAndSeedHistoryArchive(communityID types.HexBytes, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) error { - archiveTorrentCreatedSuccessfully := true - archiveCodexCreatedSuccessfully := true - m.UnseedHistoryArchiveTorrent(communityID) - _, errTorrent := m.ArchiveFileManager.CreateHistoryArchiveTorrentFromDB(communityID, topics, startDate, endDate, partition, encrypt) - if errTorrent != nil { - archiveTorrentCreatedSuccessfully = false - m.logger.Error("failed to create history archive torrent", zap.Error(errTorrent)) - } else { - errTorrent = m.SeedHistoryArchiveTorrent(communityID) + archiveTorrentCreatedSuccessfully := false + archiveCodexCreatedSuccessfully := false + distributionPreference, err := m.persistence.GetArchiveDistributionPreference() + if err != nil { + // fallback to codex + distributionPreference = params.ArchiveDistributionMethodCodex + } + + var errTorrent, errCodex error + + if distributionPreference == params.ArchiveDistributionMethodTorrent { + archiveTorrentCreatedSuccessfully = true + m.UnseedHistoryArchiveTorrent(communityID) + _, errTorrent := m.ArchiveFileManager.CreateHistoryArchiveTorrentFromDB(communityID, topics, startDate, endDate, partition, encrypt) if errTorrent != nil { archiveTorrentCreatedSuccessfully = false - m.logger.Error("failed to seed history archive torrent", zap.Error(errTorrent)) + m.logger.Error("failed to create history archive torrent", zap.Error(errTorrent)) + } else { + errTorrent = m.SeedHistoryArchiveTorrent(communityID) + if errTorrent != nil { + archiveTorrentCreatedSuccessfully = false + m.logger.Error("failed to seed history archive torrent", zap.Error(errTorrent)) + } } } - _, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) - if errCodex != nil { - archiveCodexCreatedSuccessfully = false - m.logger.Error("failed to create history archive codex", zap.Error(errCodex)) + + if distributionPreference == params.ArchiveDistributionMethodCodex { + archiveCodexCreatedSuccessfully = true + _, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) + if errCodex != nil { + archiveCodexCreatedSuccessfully = false + m.logger.Error("failed to create history archive codex", zap.Error(errCodex)) + } } if !archiveTorrentCreatedSuccessfully && !archiveCodexCreatedSuccessfully { return errors.Join(errTorrent, errCodex) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 2c7d190e894..796e2dc681a 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -28,6 +28,7 @@ import ( "github.com/status-im/status-go/images" messagingtypes "github.com/status-im/status-go/messaging/types" multiaccountscommon "github.com/status-im/status-go/multiaccounts/common" + "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/common" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/communities/token" @@ -3798,6 +3799,13 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community peerInfo := m.messaging.GetActiveStorenode() + preference, err := m.GetArchiveDistributionPreference() + if err != nil { + m.logger.Error("failed to get archive distribution preference", zap.Error(err)) + // fallback to codex + preference = params.ArchiveDistributionMethodCodex + } + for _, c := range communities { if c.Joined() { @@ -3811,11 +3819,13 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community continue } - // Check if there's already a torrent file for this community and seed it - if m.archiveManager.TorrentFileExists(c.IDString()) { - err = m.archiveManager.SeedHistoryArchiveTorrent(c.ID()) - if err != nil { - m.logger.Error("failed to seed history archive", zap.Error(err)) + if preference == params.ArchiveDistributionMethodTorrent { + // Check if there's already a torrent file for this community and seed it + if m.archiveManager.TorrentFileExists(c.IDString()) { + err = m.archiveManager.SeedHistoryArchiveTorrent(c.ID()) + if err != nil { + m.logger.Error("failed to seed history archive", zap.Error(err)) + } } } @@ -3889,10 +3899,15 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community // Last archive is less than `interval` old, wait until `interval` is complete, // then create archive and kick off archive creation loop for future archives // Seed current archive in the meantime - err := m.archiveManager.SeedHistoryArchiveTorrent(c.ID()) - if err != nil { - m.logger.Error("failed to seed history archive", zap.Error(err)) + if preference == params.ArchiveDistributionMethodTorrent { + err := m.archiveManager.SeedHistoryArchiveTorrent(c.ID()) + if err != nil { + m.logger.Error("failed to seed history archive", zap.Error(err)) + } } + // we do not have to explicitly seed to codex. If codex is enabled + // and the index cid was not explicitly removed, it will be + // advertised automatically => or maybe we do? timeToNextInterval := messageArchiveInterval - durationSinceLastArchive m.logger.Debug("starting history archive tasks interval in", zap.Any("timeLeft", timeToNextInterval)) From 169dd52774f9406b537aa20f6608d2b528988aae Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 07:22:02 +0100 Subject: [PATCH 39/75] minor fix - more consistent seeding behavior for codex --- protocol/communities/manager.go | 1 + protocol/communities/manager_archive.go | 28 +++++++++++++++++++ protocol/communities/manager_archive_file.go | 12 ++++++++ protocol/communities/manager_archive_nop.go | 4 +++ protocol/messenger_communities.go | 14 ++++++++++ .../messenger_communities_import_discord.go | 14 ++++++++++ 6 files changed, 73 insertions(+) diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index a14cbb65579..5e0e0c888ed 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -226,6 +226,7 @@ type ArchiveService interface { StartHistoryArchiveTasksInterval(community *Community, interval time.Duration) StopHistoryArchiveTasksInterval(communityID types.HexBytes) SeedHistoryArchiveTorrent(communityID types.HexBytes) error + SeedHistoryArchiveIndexCid(communityID types.HexBytes) error UnseedHistoryArchiveTorrent(communityID types.HexBytes) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) IsSeedingHistoryArchiveTorrent(communityID types.HexBytes) bool diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index d4eec70acab..54fbbea9a96 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -531,11 +531,13 @@ func (m *ArchiveManager) CreateAndSeedHistoryArchive(communityID types.HexBytes, if distributionPreference == params.ArchiveDistributionMethodCodex { archiveCodexCreatedSuccessfully = true + m.UnseedHistoryArchiveIndexCid(communityID) _, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) if errCodex != nil { archiveCodexCreatedSuccessfully = false m.logger.Error("failed to create history archive codex", zap.Error(errCodex)) } + // CreateHistoryArchiveCodexFromDB already seeds the index cid to codex } if !archiveTorrentCreatedSuccessfully && !archiveCodexCreatedSuccessfully { return errors.Join(errTorrent, errCodex) @@ -690,6 +692,32 @@ func (m *ArchiveManager) UnseedHistoryArchiveTorrent(communityID types.HexBytes) } } +func (m *ArchiveManager) SeedHistoryArchiveIndexCid(communityID types.HexBytes) error { + if !m.IsCodexReady() { + return nil + } + exists, err := m.codexIndexFileExists(communityID) + if err != nil { + return err + } + if exists { + indexBytes, err := m.readCodexIndexFromFile(communityID) + if err != nil { + return err + } + cid, err := m.codexClient.UploadArchive(indexBytes) + if err != nil { + return err + } + err = m.writeCodexIndexCidToFile(communityID, cid) + if err != nil { + m.codexClient.RemoveCid(cid) + return err + } + } + return nil +} + func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) { // Remove local index file err := m.removeCodexIndexFile(communityID) diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index a03dcd5a5a6..ea0be5c4d37 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -592,6 +592,18 @@ func (m *ArchiveFileManager) readCodexIndexFromFile(communityID types.HexBytes) return os.ReadFile(indexFilePath) } +func (m *ArchiveFileManager) codexIndexFileExists(communityID types.HexBytes) (bool, error) { + indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) + _, err := os.Stat(indexFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, err + } + return true, nil +} + func (m *ArchiveFileManager) removeCodexIndexFile(communityID types.HexBytes) error { indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) err := os.Remove(indexFilePath) diff --git a/protocol/communities/manager_archive_nop.go b/protocol/communities/manager_archive_nop.go index e0fd954ea44..27d4939a7f7 100644 --- a/protocol/communities/manager_archive_nop.go +++ b/protocol/communities/manager_archive_nop.go @@ -78,6 +78,10 @@ func (tmm *ArchiveManagerNop) SeedHistoryArchiveTorrent(communityID types.HexByt return nil } +func (tmm *ArchiveManagerNop) SeedHistoryArchiveIndexCid(communityID types.HexBytes) error { + return nil +} + func (tmm *ArchiveManagerNop) UnseedHistoryArchiveTorrent(communityID types.HexBytes) {} func (tmm *ArchiveManagerNop) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) {} diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 796e2dc681a..f31e5921ac8 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -3829,6 +3829,14 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community } } + if preference == params.ArchiveDistributionMethodCodex { + // Check if there's already a codex file for this community and seed it + err = m.archiveManager.SeedHistoryArchiveIndexCid(c.ID()) + if err != nil { + m.logger.Error("failed to seed history archive", zap.Error(err)) + } + } + filters, err := m.archiveManager.GetCommunityChatsFilters(c.ID()) if err != nil { m.logger.Error("failed to get community chats filters for community", zap.Error(err)) @@ -3905,6 +3913,12 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community m.logger.Error("failed to seed history archive", zap.Error(err)) } } + if preference == params.ArchiveDistributionMethodCodex { + err := m.archiveManager.SeedHistoryArchiveIndexCid(c.ID()) + if err != nil { + m.logger.Error("failed to seed history archive", zap.Error(err)) + } + } // we do not have to explicitly seed to codex. If codex is enabled // and the index cid was not explicitly removed, it will be // advertised automatically => or maybe we do? diff --git a/protocol/messenger_communities_import_discord.go b/protocol/messenger_communities_import_discord.go index 88bf24e9248..ababe8cb667 100644 --- a/protocol/messenger_communities_import_discord.go +++ b/protocol/messenger_communities_import_discord.go @@ -1019,6 +1019,13 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC } } + if m.archiveManager.IsCodexReady() && communitySettings.HistoryArchiveSupportEnabled { + err = m.archiveManager.SeedHistoryArchiveIndexCid(request.CommunityID) + if err != nil { + m.logger.Error("failed to seed history archive index cid", zap.Error(err)) + } + } + if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { go m.archiveManager.StartHistoryArchiveTasksInterval(community, messageArchiveInterval) } @@ -1820,6 +1827,13 @@ func (m *Messenger) RequestImportDiscordCommunity(request *requests.ImportDiscor } } + if m.archiveManager.IsCodexReady() && communitySettings.HistoryArchiveSupportEnabled { + err = m.archiveManager.SeedHistoryArchiveIndexCid(discordCommunity.ID()) + if err != nil { + m.logger.Error("failed to seed history archive index cid", zap.Error(err)) + } + } + if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { go m.archiveManager.StartHistoryArchiveTasksInterval(discordCommunity, messageArchiveInterval) } From fb420768a8683b499606f2d5897b93d4dd0245b1 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 6 Nov 2025 07:49:14 +0100 Subject: [PATCH 40/75] Use UniversalChatID to get the universal content topic instead of looking for chat id filters --- protocol/communities/manager_archive.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 54fbbea9a96..ec0828ad6e5 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -456,25 +456,25 @@ func (m *ArchiveManager) getLastMessageArchiveEndDate(communityID types.HexBytes } func (m *ArchiveManager) GetHistoryArchivePartitionStartTimestamp(communityID types.HexBytes) (uint64, error) { - filters, err := m.GetCommunityChatsFilters(communityID) + community, err := m.persistence.GetByID(&m.identity.PublicKey, communityID) if err != nil { - m.logger.Error("failed to get community chats filters", zap.Error(err)) + m.logger.Error("failed to load community", zap.Error(err)) return 0, err } - if len(filters) == 0 { - // If we don't have chat filters, we likely don't have any chats - // associated to this community, which means there's nothing more - // to do here - return 0, nil - } - topics := []messagingtypes.ContentTopic{} - for _, filter := range filters { + if filter := m.messaging.ChatFilterByChatID(community.UniversalChatID()); filter != nil { topics = append(topics, filter.ContentTopic()) } + if len(topics) == 0 { + // If we don't have universal topic, we likely don't have any chats + // associated to this community, which means there's nothing more + // to do here + return 0, nil + } + lastArchiveEndDateTimestamp, err := m.getLastMessageArchiveEndDate(communityID) if err != nil { m.logger.Error("failed to get last archive end date", zap.Error(err)) @@ -550,6 +550,7 @@ func (m *ArchiveManager) CreateAndSeedHistoryArchive(communityID types.HexBytes, IndexCid: archiveCodexCreatedSuccessfully, // true if codex created successfully }, }) + return nil } @@ -573,6 +574,7 @@ func (m *ArchiveManager) StartHistoryArchiveTasksInterval(community *Community, select { case <-ticker.C: m.logger.Debug("starting archive task...", zap.String("id", id)) + lastArchiveEndDateTimestamp, err := m.GetHistoryArchivePartitionStartTimestamp(community.ID()) if err != nil { m.logger.Error("failed to get last archive end date", zap.Error(err)) From 783395ee0f68f9cb99ac5a28e25751ab1410d45d Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 6 Nov 2025 08:42:07 +0100 Subject: [PATCH 41/75] Add integration test for codex archive --- api/backend_test.go | 1 + api/defaults.go | 7 +- protocol/communities/manager_archive.go | 5 ++ protocol/communities/manager_test.go | 19 ++++- protocol/messenger_communities.go | 4 ++ protocol/requests/create_account.go | 1 + services/ext/api.go | 4 ++ tests-functional/clients/services/wakuext.py | 17 +++++ tests-functional/clients/status_backend.py | 3 + tests-functional/steps/messenger.py | 6 +- .../tests/test_wakuext_community_archives.py | 69 +++++++++++++++++++ 11 files changed, 131 insertions(+), 5 deletions(-) create mode 100644 tests-functional/tests/test_wakuext_community_archives.py diff --git a/api/backend_test.go b/api/backend_test.go index d86a7ec6b97..230060a3859 100644 --- a/api/backend_test.go +++ b/api/backend_test.go @@ -1527,6 +1527,7 @@ func TestRestoreKeycardAccountAndLogin(t *testing.T) { }, "torrentConfigEnabled": false, "torrentConfigPort": 0, + "codexConfigEnabled": false, "keycardInstanceUID": "a84599394887b742eed9a99d3834a797", "keycardPairingDataFile": path.Join(tmpdir, DefaultKeycardPairingDataFileRelativePath), }, diff --git a/api/defaults.go b/api/defaults.go index e82b475905a..bff69c9b72a 100644 --- a/api/defaults.go +++ b/api/defaults.go @@ -345,14 +345,17 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc if request.TorrentConfigEnabled != nil { nodeConfig.TorrentConfig.Enabled = *request.TorrentConfigEnabled - } if request.TorrentConfigPort != nil { nodeConfig.TorrentConfig.Port = *request.TorrentConfigPort } + if request.CodexConfigEnabled != nil { + nodeConfig.CodexConfig.Enabled = *request.CodexConfigEnabled + } + nodeConfig.CodexConfig = params.CodexConfig{ - Enabled: false, + Enabled: nodeConfig.CodexConfig.Enabled, HistoryArchiveDataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "archivedata"), CodexNodeConfig: codex.Config{ DataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "codexdata"), diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index ec0828ad6e5..f6a426903b5 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -462,6 +462,11 @@ func (m *ArchiveManager) GetHistoryArchivePartitionStartTimestamp(communityID ty return 0, err } + if community == nil { + m.logger.Error("community not found for this id") + return 0, err + } + topics := []messagingtypes.ContentTopic{} if filter := m.messaging.ChatFilterByChatID(community.UniversalChatID()); filter != nil { diff --git a/protocol/communities/manager_test.go b/protocol/communities/manager_test.go index 09fb9dd838b..8a9d0b2287d 100644 --- a/protocol/communities/manager_test.go +++ b/protocol/communities/manager_test.go @@ -65,15 +65,31 @@ func (s *ManagerSuite) buildManagers(ownerVerifier OwnerVerifier) (*Manager, *Ar s.Require().NoError(err) s.Require().NoError(m.Start()) + messagingEnv, err := messaging.NewTestMessagingEnvironment() + s.Require().NoError(err) + + appDb, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{}) + s.Require().NoError(err) + + err = sqlite.Migrate(appDb) + s.Require().NoError(err) + + core, err := messagingEnv.NewTestCore( + messaging.CoreParams{}, + messaging.WithSQLitePersistence(appDb), + ) + s.Require().NoError(err) + amc := &ArchiveManagerConfig{ TorrentConfig: buildTorrentConfig(), CodexConfig: buildCodexConfig(s.T()), Logger: logger, Persistence: m.GetPersistence(), - Messaging: nil, + Messaging: core.API(), Identity: key, Publisher: m, } + t := NewArchiveManager(amc) s.Require().NoError(err) @@ -1692,6 +1708,7 @@ func (s *ManagerSuite) buildCommunityWithChat() (*Community, string, error) { if err != nil { return nil, "", err } + chat := &protobuf.CommunityChat{ Identity: &protobuf.ChatIdentity{ DisplayName: "added-chat", diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index f31e5921ac8..58ce9db402b 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -5179,3 +5179,7 @@ func (m *Messenger) SetArchiveDistributionPreference(preference string) error { func (m *Messenger) GetArchiveDistributionPreference() (string, error) { return m.communitiesManager.GetArchiveDistributionPreference() } + +func (m *Messenger) CodexIndexCidFileExists(communityID types.HexBytes) bool { + return m.archiveManager.CodexIndexCidFileExists(communityID) +} diff --git a/protocol/requests/create_account.go b/protocol/requests/create_account.go index a96f59a2a74..5395c0be9c9 100644 --- a/protocol/requests/create_account.go +++ b/protocol/requests/create_account.go @@ -79,6 +79,7 @@ type CreateAccount struct { TorrentConfigEnabled *bool TorrentConfigPort *int + CodexConfigEnabled *bool APIConfig *APIConfig `json:"apiConfig"` diff --git a/services/ext/api.go b/services/ext/api.go index 19b535f9354..60294b5c15c 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -1620,3 +1620,7 @@ func (api *PublicAPI) DeleteCommunityMemberMessages(request *requests.DeleteComm func (api *PublicAPI) PeerID() string { return api.service.messaging.PeerID().String() } + +func (m *PublicAPI) HasCommunityArchive(communityID types.HexBytes) bool { + return m.service.messenger.CodexIndexCidFileExists(communityID) +} diff --git a/tests-functional/clients/services/wakuext.py b/tests-functional/clients/services/wakuext.py index 1441148d3fc..7d3cd56f53e 100644 --- a/tests-functional/clients/services/wakuext.py +++ b/tests-functional/clients/services/wakuext.py @@ -226,6 +226,7 @@ def create_community( membership: CommunityPermissionsAccess = CommunityPermissionsAccess.AUTO_ACCEPT, image="", image_rect=ImageCropRect(), + historyArchiveSupportEnabled=False, ): params = { "membership": membership.value, @@ -237,6 +238,7 @@ def create_community( "imageAy": image_rect.ay, "imageBx": image_rect.bx, "imageBy": image_rect.by, + "historyArchiveSupportEnabled": historyArchiveSupportEnabled, } response = self.rpc_request("createCommunity", [params]) return response @@ -769,3 +771,18 @@ def get_verification_request_sent_to(self, contact_id: str): params = [contact_id] response = self.rpc_request("getVerificationRequestSentTo", params) return response + + def update_message_archive_interval(self, interval_seconds: int): + params = [interval_seconds] + response = self.rpc_request("updateMessageArchiveInterval", params) + return response + + def has_community_archive(self, community_id: str): + params = [community_id] + response = self.rpc_request("hasCommunityArchive", params) + return response + + def set_archive_distribution_preference(self, preference: str): + params = [{"preference": preference}] + response = self.rpc_request("setArchiveDistributionPreference", params) + return response diff --git a/tests-functional/clients/status_backend.py b/tests-functional/clients/status_backend.py index 33728f9e59d..caea723e507 100644 --- a/tests-functional/clients/status_backend.py +++ b/tests-functional/clients/status_backend.py @@ -279,6 +279,9 @@ def _create_account_request(self, user, **kwargs): "wsHost": "0.0.0.0", "wsPort": constants.STATUS_CONNECTOR_WS_PORT, }, + "codexConfigEnabled": kwargs.get("codex_config_enabled", False), + "torrentConfigEnabled": False, + "torrentConfigPort": 9025, "thirdpartyServicesEnabled": True, } if not Config.disable_override_networks: diff --git a/tests-functional/steps/messenger.py b/tests-functional/steps/messenger.py index 591e869cf70..4ad0de2d53f 100644 --- a/tests-functional/steps/messenger.py +++ b/tests-functional/steps/messenger.py @@ -146,8 +146,10 @@ def join_private_group(self, admin=None, member=None) -> str: ) return response.get("chats", [])[0].get("id") - def create_community(self, node): - response = node.wakuext_service.create_community(fake.community_name(), fake.community_description()) + def create_community(self, node, historyArchiveSupportEnabled=False): + response = node.wakuext_service.create_community( + fake.community_name(), fake.community_description(), historyArchiveSupportEnabled=historyArchiveSupportEnabled + ) self.community_id = response.get("communities", [{}])[0].get("id") return self.community_id diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py new file mode 100644 index 00000000000..536e0b62445 --- /dev/null +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -0,0 +1,69 @@ +from uuid import uuid4 +import pytest +import time + +from steps.messenger import MessengerSteps +from clients.signals import SignalType + + +@pytest.mark.rpc +class TestCommunityArchives(MessengerSteps): + @pytest.fixture(autouse=True) + def setup_backends(self, backend_new_profile): + """Initialize three backends (creator, member and another_member) for each test function""" + self.creator = backend_new_profile("creator", codex_config_enabled=True) + self.creator.wakuext_service.set_archive_distribution_preference("codex") + self.creator.wakuext_service.update_message_archive_interval(15) + + self.member = backend_new_profile("member", codex_config_enabled=True) + self.member.wakuext_service.set_archive_distribution_preference("codex") + self.member.wakuext_service.update_message_archive_interval(15) + + self.another_member = backend_new_profile("member", codex_config_enabled=True) + self.another_member.wakuext_service.set_archive_distribution_preference("codex") + self.another_member.wakuext_service.update_message_archive_interval(15) + + self.fake_address = "0x" + str(uuid4())[:8] + self.community_id = self.create_community(self.creator, historyArchiveSupportEnabled=True) + self.join_community(member=self.member, admin=self.creator) + self.display_name = "chat_" + str(uuid4()) + self.chat_payload = { + "identity": { + "displayName": self.display_name, + "emoji": "😀", + "color": "#1f2c75", + "description": self.display_name, + }, + "viewersCanPostReactions": False, + "hideIfPermissionsNotMet": False, + "permissions": {"access": 1}, + } + + def test_community_archive_index_exists(self): + # Create community chat + create_resp = self.creator.wakuext_service.create_community_chat(self.community_id, self.chat_payload) + chat_id = create_resp.get("chats")[0].get("id") + + # Wait for member to receive chat creation signal + self.member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) + # Send a message to the community chat + text = f"Hi @{self.member.public_key}" + send_resp = self.creator.wakuext_service.send_chat_message(chat_id, text) + assert send_resp.get("chats")[0].get("lastMessage").get("text") == text + message_id = send_resp.get("messages", [])[0].get("id", "") + + # Wait for member to receive the new message + self.member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=message_id, timeout=10) + member_msgs_resp = self.member.wakuext_service.chat_messages(chat_id) + assert member_msgs_resp.get("messages")[0].get("text") == text + + # self.join_community(member=self.another_member, admin=self.creator) + # self.another_member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) + # # member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) + # messages = member_msgs_resp.get("messages", []) + + time.sleep(30) + + # Ensure that the community archive is available for the creator + has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) + assert has_archive is True From 6c06722e32ca6d176ad91320990ad87754c9e543 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 6 Nov 2025 08:59:09 +0100 Subject: [PATCH 42/75] Increase timeout for archive test --- tests-functional/tests/test_wakuext_community_archives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 536e0b62445..671cc12c6f0 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -62,7 +62,7 @@ def test_community_archive_index_exists(self): # # member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) # messages = member_msgs_resp.get("messages", []) - time.sleep(30) + time.sleep(60) # Ensure that the community archive is available for the creator has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) From fa6efed314e43676f3e5f9f65bc7c9b2c0176237 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 11:13:39 +0100 Subject: [PATCH 43/75] removes removing index files from UnseedHistoryArchiveIndexCid --- protocol/communities/manager_archive.go | 33 +++++++++---------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index f6a426903b5..397d63e16bc 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -726,30 +726,21 @@ func (m *ArchiveManager) SeedHistoryArchiveIndexCid(communityID types.HexBytes) } func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) { - // Remove local index file - err := m.removeCodexIndexFile(communityID) - if err != nil { - m.logger.Error("failed to remove local index file", zap.Error(err)) - } - - // get currently advertised index Cid - cid, err := m.GetHistoryArchiveIndexCid(communityID) - - if err != nil { - m.logger.Debug("failed to get history archive index CID", zap.Error(err)) - return - } + if m.CodexIndexCidFileExists(communityID) { + // get currently advertised index Cid + cid, err := m.GetHistoryArchiveIndexCid(communityID) - m.logger.Debug("Unseeding index CID for community", zap.String("id", communityID.String()), zap.String("cid", cid)) + if err != nil { + m.logger.Debug("failed to get history archive index CID", zap.Error(err)) + return + } - err = m.codexClient.RemoveCid(cid) - if err != nil { - m.logger.Error("failed to remove CID from Codex", zap.Error(err)) - } + m.logger.Debug("Unseeding index CID for community", zap.String("id", communityID.String()), zap.String("cid", cid)) - err = m.removeCodexIndexCidFile(communityID) - if err != nil { - m.logger.Error("failed to remove local index file", zap.Error(err)) + err = m.codexClient.RemoveCid(cid) + if err != nil { + m.logger.Error("failed to remove CID from Codex", zap.Error(err)) + } } } From dece6abdb6434500dd5daf04b6e55ebcfe0c5c0f Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 12:49:51 +0100 Subject: [PATCH 44/75] Makes (re)seeding of Codex index Cid more consistent --- protocol/communities/manager_archive.go | 28 +++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 397d63e16bc..d43f34adbb9 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -537,12 +537,23 @@ func (m *ArchiveManager) CreateAndSeedHistoryArchive(communityID types.HexBytes, if distributionPreference == params.ArchiveDistributionMethodCodex { archiveCodexCreatedSuccessfully = true m.UnseedHistoryArchiveIndexCid(communityID) - _, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) + codexArchiveIDs, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) if errCodex != nil { archiveCodexCreatedSuccessfully = false m.logger.Error("failed to create history archive codex", zap.Error(errCodex)) + } else { + if len(codexArchiveIDs) == 0 { + // no new codex archives were created - no need to distribute new index cid + // but we need to (re)start seeding what we stopped above + archiveCodexCreatedSuccessfully = false + m.logger.Debug("no codex archive ids were created") + if err = m.SeedHistoryArchiveIndexCid(communityID); err != nil { + m.logger.Error("failed to seed existing history archive codex index cid", zap.Error(err)) + } + } + // else: we created new codex archives and they are already published to codex + // in CreateHistoryArchiveCodexFromDB (thus they are seeded) } - // CreateHistoryArchiveCodexFromDB already seeds the index cid to codex } if !archiveTorrentCreatedSuccessfully && !archiveCodexCreatedSuccessfully { return errors.Join(errTorrent, errCodex) @@ -703,6 +714,10 @@ func (m *ArchiveManager) SeedHistoryArchiveIndexCid(communityID types.HexBytes) if !m.IsCodexReady() { return nil } + // do not seed if already seeding + if m.IsSeedingHistoryArchiveCodex(communityID) { + return nil + } exists, err := m.codexIndexFileExists(communityID) if err != nil { return err @@ -726,6 +741,12 @@ func (m *ArchiveManager) SeedHistoryArchiveIndexCid(communityID types.HexBytes) } func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) { + if !m.IsCodexReady() { + return + } + if !m.IsSeedingHistoryArchiveCodex(communityID) { + return + } if m.CodexIndexCidFileExists(communityID) { // get currently advertised index Cid cid, err := m.GetHistoryArchiveIndexCid(communityID) @@ -752,6 +773,9 @@ func (m *ArchiveManager) IsSeedingHistoryArchiveTorrent(communityID types.HexByt } func (m *ArchiveManager) IsSeedingHistoryArchiveCodex(communityID types.HexBytes) bool { + if !m.IsCodexReady() { + return false + } if m.CodexIndexCidFileExists(communityID) { cid, err := m.GetHistoryArchiveIndexCid(communityID) if err != nil { From dba2e0ea304a8ed609a59a10d3170226638ec79a Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 14:06:18 +0100 Subject: [PATCH 45/75] Use isReady in CreateCommunity as more conservative check --- protocol/messenger_communities.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 58ce9db402b..a834e83df33 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -2544,10 +2544,7 @@ func (m *Messenger) CreateCommunity(request *requests.CreateCommunity, createDef return nil, err } - isTorrentActivated := m.config.torrentConfig != nil && m.config.torrentConfig.Enabled - isCodexActivated := m.config.codexConfig != nil && m.config.codexConfig.Enabled - - if (isTorrentActivated || isCodexActivated) && communitySettings.HistoryArchiveSupportEnabled { + if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { go m.archiveManager.StartHistoryArchiveTasksInterval(community, messageArchiveInterval) } From 4604de5cde49e98d58525be637001f404f88da01 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Thu, 6 Nov 2025 14:10:32 +0100 Subject: [PATCH 46/75] Updates community filters and topics --- protocol/communities/manager_archive.go | 20 +++++++++++++++----- protocol/messenger_communities.go | 10 +++++----- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index d43f34adbb9..af00542b0fa 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -467,19 +467,29 @@ func (m *ArchiveManager) GetHistoryArchivePartitionStartTimestamp(communityID ty return 0, err } - topics := []messagingtypes.ContentTopic{} + filters, err := m.GetCommunityChatsFilters(communityID) + if err != nil { + m.logger.Error("failed to get community chats filters", zap.Error(err)) + return 0, err + } - if filter := m.messaging.ChatFilterByChatID(community.UniversalChatID()); filter != nil { - topics = append(topics, filter.ContentTopic()) + filter := m.messaging.ChatFilterByChatID(community.UniversalChatID()) + if filter != nil { + filters = append(filters, filter) } - if len(topics) == 0 { - // If we don't have universal topic, we likely don't have any chats + if len(filters) == 0 { + // If we don't have chat filters, we likely don't have any chats // associated to this community, which means there's nothing more // to do here return 0, nil } + topics := []messagingtypes.ContentTopic{} + for _, filter := range filters { + topics = append(topics, filter.ContentTopic()) + } + lastArchiveEndDateTimestamp, err := m.getLastMessageArchiveEndDate(communityID) if err != nil { m.logger.Error("failed to get last archive end date", zap.Error(err)) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index a834e83df33..0e084f5f7c0 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -3840,6 +3840,11 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community continue } + filter := m.messaging.ChatFilterByChatID(c.UniversalChatID()) + if filter != nil { + filters = append(filters, filter) + } + if len(filters) == 0 { m.logger.Debug("no filters or chats for this community starting interval", zap.String("id", c.IDString())) go m.archiveManager.StartHistoryArchiveTasksInterval(c, messageArchiveInterval) @@ -3852,11 +3857,6 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community topics = append(topics, filter.ContentTopic()) } - filter := m.messaging.ChatFilterByChatID(c.UniversalChatID()) - if filter != nil { - filters = append(filters, filter) - } - // First we need to know the timestamp of the latest waku message // we've received for this community, so we can request messages we've // possibly missed since then From 3799e2810112894745d5844f8e0d55a53a61d6d8 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 7 Nov 2025 09:48:09 +0100 Subject: [PATCH 47/75] allow archive download retry also for codex --- protocol/messenger_handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index 8109649e643..ac44deb0f5d 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -1441,7 +1441,7 @@ func (m *Messenger) downloadAndImportCodexHistoryArchives(id types.HexBytes, ind downloadTaskInfo, err := m.archiveManager.DownloadHistoryArchivesByIndexCid(id, indexCid, cancel) if err != nil { logMsg := "failed to download history archive data" - if err == communities.ErrTorrentTimedout { + if err == communities.ErrTorrentTimedout || err == communities.ErrIndexCidTimedout { m.logger.Debug("downloading indexCid has timed out, trying once more...") downloadTaskInfo, err = m.archiveManager.DownloadHistoryArchivesByIndexCid(id, indexCid, cancel) if err != nil { From 41e7d4e28f1237d985bb63ba7a6a165f334706ce Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 7 Nov 2025 16:27:50 +0100 Subject: [PATCH 48/75] Use UniversalChatID to dispatch archive message instead of MagnetlinkMessageChannelID --- protocol/messenger_communities.go | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 0e084f5f7c0..5b08a88856a 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/codex-storage/codex-go-bindings/codex" "github.com/golang/protobuf/proto" "github.com/google/uuid" "go.uber.org/zap" @@ -55,13 +56,14 @@ var grantUpdateInterval = 24 * time.Hour // 4 hours interval var grantInvokesProfileDispatchInterval = 4 * time.Hour +var importInitialDelay = time.Minute * 5 + const discordTimestampLayout = time.RFC3339 const ( importSlowRate = time.Second / 1 importFastRate = time.Second / 100 importMessagesChunkSize = 10 - importInitialDelay = time.Minute * 5 ) const ( @@ -3948,6 +3950,7 @@ func (m *Messenger) enableHistoryArchivesImportAfterDelay() { go func() { defer gocommon.LogOnPanic() time.Sleep(importInitialDelay) + m.importDelayer.once.Do(func() { close(m.importDelayer.wait) }) @@ -4158,7 +4161,7 @@ func (m *Messenger) dispatchMagnetlinkMessage(communityID string) error { return err } - chatID := community.MagnetlinkMessageChannelID() + chatID := community.UniversalChatID() rawMessage := messagingtypes.RawMessage{ LocalChatID: chatID, Sender: community.PrivateKey(), @@ -4182,7 +4185,6 @@ func (m *Messenger) dispatchMagnetlinkMessage(communityID string) error { } func (m *Messenger) dispatchIndexCidMessage(communityID string) error { - community, err := m.communitiesManager.GetByIDString(communityID) if err != nil { return err @@ -4203,7 +4205,8 @@ func (m *Messenger) dispatchIndexCidMessage(communityID string) error { return err } - chatID := community.MagnetlinkMessageChannelID() + chatID := community.UniversalChatID() + rawMessage := messagingtypes.RawMessage{ LocalChatID: chatID, Sender: community.PrivateKey(), @@ -4223,6 +4226,7 @@ func (m *Messenger) dispatchIndexCidMessage(communityID string) error { if err != nil { return err } + return m.communitiesManager.UpdateIndexCidMessageClock(community.ID(), indexCidMessage.Clock) } @@ -4258,6 +4262,7 @@ func (m *Messenger) EnableCommunityHistoryArchiveProtocol() error { if archiveDistributionPreference == communities.ArchiveDistributionMethodCodex { nodeConfig.CodexConfig.Enabled = true + err = m.settings.SaveSetting("node-config", nodeConfig) if err != nil { return err @@ -4265,6 +4270,7 @@ func (m *Messenger) EnableCommunityHistoryArchiveProtocol() error { m.config.codexConfig = &nodeConfig.CodexConfig m.archiveManager.SetCodexConfig(&nodeConfig.CodexConfig) + err = m.archiveManager.StartCodexClient() if err != nil { return err @@ -4313,15 +4319,6 @@ func (m *Messenger) DisableCommunityHistoryArchiveProtocol() error { return nil } -func (m *Messenger) UpdateMessageArchiveInterval(duration time.Duration) (time.Duration, error) { - messageArchiveInterval = duration - return messageArchiveInterval, nil -} - -func (m *Messenger) GetMessageArchiveInterval() (time.Duration, error) { - return messageArchiveInterval, nil -} - func (m *Messenger) GetCommunitiesSettings() ([]communities.CommunitySettings, error) { settings, err := m.communitiesManager.GetCommunitiesSettings() if err != nil { @@ -5180,3 +5177,11 @@ func (m *Messenger) GetArchiveDistributionPreference() (string, error) { func (m *Messenger) CodexIndexCidFileExists(communityID types.HexBytes) bool { return m.archiveManager.CodexIndexCidFileExists(communityID) } + +func (m *Messenger) Connect(peerId string, addrs []string) error { + return m.archiveManager.GetCodexClient().Connect(peerId, addrs) +} + +func (m *Messenger) Debug() (codex.DebugInfo, error) { + return m.archiveManager.GetCodexClient().Debug() +} From d8dc074c3b5026c23fb913a87f9b5dac2d96f816 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 7 Nov 2025 16:30:58 +0100 Subject: [PATCH 49/75] Improve archive functional test --- api/defaults.go | 8 +++ params/config.go | 3 + protocol/messenger_config.go | 20 ++++++ protocol/requests/create_account.go | 8 ++- services/ext/api.go | 31 +++------ services/ext/service.go | 2 + tests-functional/clients/services/wakuext.py | 20 ++++-- tests-functional/clients/status_backend.py | 2 + .../tests/test_wakuext_community_archives.py | 63 ++++++++++++++----- 9 files changed, 113 insertions(+), 44 deletions(-) diff --git a/api/defaults.go b/api/defaults.go index bff69c9b72a..9e02af2aa67 100644 --- a/api/defaults.go +++ b/api/defaults.go @@ -354,6 +354,14 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc nodeConfig.CodexConfig.Enabled = *request.CodexConfigEnabled } + if request.ImportInitialDelay != nil { + nodeConfig.ImportInitialDelay = *request.ImportInitialDelay + } + + if request.MessageArchiveInterval != nil { + nodeConfig.MessageArchiveInterval = *request.MessageArchiveInterval + } + nodeConfig.CodexConfig = params.CodexConfig{ Enabled: nodeConfig.CodexConfig.Enabled, HistoryArchiveDataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "archivedata"), diff --git a/params/config.go b/params/config.go index ba44b57930b..83b6eaed365 100644 --- a/params/config.go +++ b/params/config.go @@ -217,6 +217,9 @@ type NodeConfig struct { CodexConfig CodexConfig OutputMessageCSVEnabled bool + + ImportInitialDelay int + MessageArchiveInterval int } // WalletConfig extra configuration for wallet.Service. diff --git a/protocol/messenger_config.go b/protocol/messenger_config.go index 8209bbfd20d..bc2fc1bd9cb 100644 --- a/protocol/messenger_config.go +++ b/protocol/messenger_config.go @@ -348,3 +348,23 @@ func WithCodexConfig(codexConfig *params.CodexConfig) func(c *config) error { return nil } } + +func WithImportInitialDelay(delay int) func(c *config) error { + return func(c *config) error { + if delay <= 0 { + return nil + } + importInitialDelay = time.Duration(delay) * time.Second + return nil + } +} + +func WithMessageArchiveInterval(interval int) func(c *config) error { + return func(c *config) error { + if interval <= 0 { + return nil + } + messageArchiveInterval = time.Duration(interval) * time.Second + return nil + } +} diff --git a/protocol/requests/create_account.go b/protocol/requests/create_account.go index 5395c0be9c9..5864e77b842 100644 --- a/protocol/requests/create_account.go +++ b/protocol/requests/create_account.go @@ -77,9 +77,11 @@ type CreateAccount struct { WalletConfig WalletSecretsConfig - TorrentConfigEnabled *bool - TorrentConfigPort *int - CodexConfigEnabled *bool + TorrentConfigEnabled *bool + TorrentConfigPort *int + CodexConfigEnabled *bool + ImportInitialDelay *int + MessageArchiveInterval *int APIConfig *APIConfig `json:"apiConfig"` diff --git a/services/ext/api.go b/services/ext/api.go index 60294b5c15c..dce4b82ddc4 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -3,9 +3,9 @@ package ext import ( "context" "crypto/ecdsa" - "errors" "time" + "github.com/codex-storage/codex-go-bindings/codex" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" "go.uber.org/zap" @@ -1190,27 +1190,6 @@ func (api *PublicAPI) DisableCommunityHistoryArchiveProtocol() error { return api.service.messenger.DisableCommunityHistoryArchiveProtocol() } -func (api *PublicAPI) GetMessageArchiveInterval() (float64, error) { - interval, err := api.service.messenger.GetMessageArchiveInterval() - if err != nil { - return 0, err - } - return float64(interval) / float64(time.Second), nil -} - -func (api *PublicAPI) UpdateMessageArchiveInterval(duration time.Duration) (time.Duration, error) { - if duration <= 0 { - return 0, errors.New("duration must be greater than zero") - } - - d := duration * time.Second - updatedInterval, err := api.service.messenger.UpdateMessageArchiveInterval(d) - if err != nil { - return 0, err - } - return updatedInterval / time.Second, nil -} - func (api *PublicAPI) SubscribeToPubsubTopic(topic string, optPublicKey string) error { var publicKey *ecdsa.PublicKey if optPublicKey != "" { @@ -1624,3 +1603,11 @@ func (api *PublicAPI) PeerID() string { func (m *PublicAPI) HasCommunityArchive(communityID types.HexBytes) bool { return m.service.messenger.CodexIndexCidFileExists(communityID) } + +func (m *PublicAPI) Connect(peerId string, addrs []string) error { + return m.service.messenger.Connect(peerId, addrs) +} + +func (m *PublicAPI) Debug() (codex.DebugInfo, error) { + return m.service.messenger.Debug() +} diff --git a/services/ext/service.go b/services/ext/service.go index 8b03924d8d2..c82c2d258c2 100644 --- a/services/ext/service.go +++ b/services/ext/service.go @@ -342,6 +342,8 @@ func buildMessengerOptions( protocol.WithClusterConfig(config.ClusterConfig), protocol.WithTorrentConfig(&config.TorrentConfig), protocol.WithCodexConfig(&config.CodexConfig), + protocol.WithImportInitialDelay(config.ImportInitialDelay), + protocol.WithMessageArchiveInterval(config.MessageArchiveInterval), protocol.WithHTTPServer(httpServer), protocol.WithRPCClient(rpcClient), protocol.WithMessageCSV(config.OutputMessageCSVEnabled), diff --git a/tests-functional/clients/services/wakuext.py b/tests-functional/clients/services/wakuext.py index 7d3cd56f53e..4fe1e2c4162 100644 --- a/tests-functional/clients/services/wakuext.py +++ b/tests-functional/clients/services/wakuext.py @@ -772,11 +772,6 @@ def get_verification_request_sent_to(self, contact_id: str): response = self.rpc_request("getVerificationRequestSentTo", params) return response - def update_message_archive_interval(self, interval_seconds: int): - params = [interval_seconds] - response = self.rpc_request("updateMessageArchiveInterval", params) - return response - def has_community_archive(self, community_id: str): params = [community_id] response = self.rpc_request("hasCommunityArchive", params) @@ -786,3 +781,18 @@ def set_archive_distribution_preference(self, preference: str): params = [{"preference": preference}] response = self.rpc_request("setArchiveDistributionPreference", params) return response + + def toggle_use_mail_servers(self, enabled: bool): + params = [enabled] + response = self.rpc_request("toggleUseMailservers", params) + return response + + def connect(self, peerId: str, addrs: list = []): + params = [peerId, addrs] + response = self.rpc_request("connect", params) + return response + + def debug(self): + params = [] + response = self.rpc_request("debug", params) + return response diff --git a/tests-functional/clients/status_backend.py b/tests-functional/clients/status_backend.py index caea723e507..c3f1b0c286f 100644 --- a/tests-functional/clients/status_backend.py +++ b/tests-functional/clients/status_backend.py @@ -280,6 +280,8 @@ def _create_account_request(self, user, **kwargs): "wsPort": constants.STATUS_CONNECTOR_WS_PORT, }, "codexConfigEnabled": kwargs.get("codex_config_enabled", False), + "importInitialDelay": kwargs.get("import_initial_delay", None), + "messageArchiveInterval": kwargs.get("message_archive_interval", None), "torrentConfigEnabled": False, "torrentConfigPort": 9025, "thirdpartyServicesEnabled": True, diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 671cc12c6f0..27ce2d0dc12 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -11,21 +11,38 @@ class TestCommunityArchives(MessengerSteps): @pytest.fixture(autouse=True) def setup_backends(self, backend_new_profile): """Initialize three backends (creator, member and another_member) for each test function""" - self.creator = backend_new_profile("creator", codex_config_enabled=True) + # Community owner + self.creator = backend_new_profile("creator", codex_config_enabled=True, message_archive_interval=10) + # Define codex as archive distribution preference self.creator.wakuext_service.set_archive_distribution_preference("codex") - self.creator.wakuext_service.update_message_archive_interval(15) - self.member = backend_new_profile("member", codex_config_enabled=True) + # Create a first member that will join the community first + self.member = backend_new_profile("member", codex_config_enabled=True, import_initial_delay=5) + # Define codex as archive distribution preference self.member.wakuext_service.set_archive_distribution_preference("codex") - self.member.wakuext_service.update_message_archive_interval(15) - self.another_member = backend_new_profile("member", codex_config_enabled=True) + # Create another member that will join the community later after the first message is sent + self.another_member = backend_new_profile("member", codex_config_enabled=True, import_initial_delay=5) + # Define codex as archive distribution preference self.another_member.wakuext_service.set_archive_distribution_preference("codex") - self.another_member.wakuext_service.update_message_archive_interval(15) self.fake_address = "0x" + str(uuid4())[:8] self.community_id = self.create_community(self.creator, historyArchiveSupportEnabled=True) - self.join_community(member=self.member, admin=self.creator) + + # Ensure that no community archive exists initially + has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) + assert has_archive is False, "Creator should not have community archive initially" + has_archive = self.member.wakuext_service.has_community_archive(self.community_id) + assert has_archive is False, "Member should not have community archive initially" + has_archive = self.another_member.wakuext_service.has_community_archive(self.community_id) + assert has_archive is False, "Another member should not have community archive initially" + + # Connect members to community codex client + # In the real life, this would be done via DHT discovery + info = self.creator.wakuext_service.debug() + self.member.wakuext_service.connect(info["id"], info["addrs"]) + self.another_member.wakuext_service.connect(info["id"], info["addrs"]) + self.display_name = "chat_" + str(uuid4()) self.chat_payload = { "identity": { @@ -45,7 +62,9 @@ def test_community_archive_index_exists(self): chat_id = create_resp.get("chats")[0].get("id") # Wait for member to receive chat creation signal + self.join_community(member=self.member, admin=self.creator) self.member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) + # Send a message to the community chat text = f"Hi @{self.member.public_key}" send_resp = self.creator.wakuext_service.send_chat_message(chat_id, text) @@ -57,13 +76,29 @@ def test_community_archive_index_exists(self): member_msgs_resp = self.member.wakuext_service.chat_messages(chat_id) assert member_msgs_resp.get("messages")[0].get("text") == text - # self.join_community(member=self.another_member, admin=self.creator) - # self.another_member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) - # # member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) - # messages = member_msgs_resp.get("messages", []) - - time.sleep(60) + # Just make sure that archive are generated + time.sleep(10) # Ensure that the community archive is available for the creator has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) - assert has_archive is True + assert has_archive is True, "Creator should have community archive after messages are sent" + + # TODO: try to disable the store node + # self.member.wakuext_service.toggle_use_mail_servers(enabled=False) + + # Another member joins and checks for the message + self.join_community(member=self.another_member, admin=self.creator) + self.another_member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) + member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) + assert member_msgs_resp.get("messages")[0].get("text") == text + + # Ensure that the another member received the archive dispatch message + time.sleep(5) + + has_archive = self.member.wakuext_service.has_community_archive(self.community_id) + assert has_archive is True, "Member should have community archive after messages are sent" + + has_archive = self.another_member.wakuext_service.has_community_archive(self.community_id) + assert has_archive is True, "Another member should have community archive after messages are sent" + + # TODO: Verify in db From c4b398f8b2f33c6f3d15f95ee8583215b11ba001 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 7 Nov 2025 16:55:58 +0100 Subject: [PATCH 50/75] Fix archive integration test --- tests-functional/tests/test_wakuext_community_archives.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 27ce2d0dc12..eb6e56b5381 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -83,14 +83,14 @@ def test_community_archive_index_exists(self): has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) assert has_archive is True, "Creator should have community archive after messages are sent" - # TODO: try to disable the store node + # TODO: try to disable the store node ?? # self.member.wakuext_service.toggle_use_mail_servers(enabled=False) # Another member joins and checks for the message self.join_community(member=self.another_member, admin=self.creator) self.another_member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) - assert member_msgs_resp.get("messages")[0].get("text") == text + assert member_msgs_resp.get("messages") is None, "Another member should not have messages before archive is dispatched" # Ensure that the another member received the archive dispatch message time.sleep(5) @@ -101,4 +101,7 @@ def test_community_archive_index_exists(self): has_archive = self.another_member.wakuext_service.has_community_archive(self.community_id) assert has_archive is True, "Another member should have community archive after messages are sent" + member_msgs_resp = self.member.wakuext_service.chat_messages(chat_id) + assert member_msgs_resp.get("messages")[0].get("text") == text, "Member should have the message after archive is dispatched" + # TODO: Verify in db From c4f5074f61419e372d0659fd27b36172108547ce Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Sun, 9 Nov 2025 22:32:53 +0100 Subject: [PATCH 51/75] Improves the healthcheck so that the tests start as soon as possible --- tests-functional/docker-compose.waku.yml | 113 +++++++++++++---------- 1 file changed, 62 insertions(+), 51 deletions(-) diff --git a/tests-functional/docker-compose.waku.yml b/tests-functional/docker-compose.waku.yml index 49a555ebe89..27648055be6 100644 --- a/tests-functional/docker-compose.waku.yml +++ b/tests-functional/docker-compose.waku.yml @@ -1,62 +1,73 @@ services: boot-1: image: wakuorg/nwaku:v0.36.0 - entrypoint: [ - "/usr/bin/wakunode", - "--discv5-discovery=true", - "--discv5-enr-auto-update=True", - "--discv5-udp-port=9000", - "--cluster-id=16", - # Docker DNS server - "--dns-addrs-name-server=127.0.0.11", - "--dns4-domain-name=boot-1", - "--filter=true", - "--keep-alive=true", - "--lightpush=true", - "--log-level=INFO", - "--max-connections=18000", - "--nodekey=03ce9122016be1f80a23df36525103bed1c7c4b9a0ff7605d97553ed8ed96bcf", - "--peer-exchange=true", - "--relay=true", - "--rest-address=0.0.0.0", - "--rest-admin", - "--shard=32", - "--shard=64", - "--staticnode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", - "--storenode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", - "--tcp-port=60001" - ] + entrypoint: + [ + "/usr/bin/wakunode", + "--discv5-discovery=true", + "--discv5-enr-auto-update=True", + "--discv5-udp-port=9000", + "--cluster-id=16", + # Docker DNS server + "--dns-addrs-name-server=127.0.0.11", + "--dns4-domain-name=boot-1", + "--filter=true", + "--keep-alive=true", + "--lightpush=true", + "--log-level=INFO", + "--max-connections=18000", + "--nodekey=03ce9122016be1f80a23df36525103bed1c7c4b9a0ff7605d97553ed8ed96bcf", + "--peer-exchange=true", + "--relay=true", + "--rest-address=0.0.0.0", + "--rest-admin", + "--shard=32", + "--shard=64", + "--staticnode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", + "--storenode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", + "--tcp-port=60001" + ] depends_on: - store healthcheck: - test: ["CMD-SHELL", "curl -X GET -H 'Content-Type: application/json' http://0.0.0.0:8645 || exit 0"] + test: [ "CMD-SHELL", "wget --spider -q http://0.0.0.0:8645/debug/v1/info || exit 1" ] + interval: 2s + timeout: 2s + start_period: 20s + start_interval: 2s + retries: 10 store: image: wakuorg/nwaku:v0.36.0 - entrypoint: [ - "/usr/bin/wakunode", - "--discv5-discovery=true", - "--discv5-enr-auto-update=True", - "--discv5-udp-port=9000", - "--cluster-id=16", - # Docker DNS server - "--dns-addrs-name-server=127.0.0.11", - "--dns4-domain-name=store", - "--keep-alive=true", - "--log-level=INFO", - "--max-connections=18000", - "--nodekey=3190bc9b55b18dbc171997a7a67abcd5bbf0c81002ad9617b1cb67f2f15daa64", - "--peer-exchange=false", - "--relay=true", - "--rest-address=0.0.0.0", - "--rest-admin", - "--shard=32", - "--shard=64", - "--staticnode=/dns4/boot-1/tcp/60001/p2p/16Uiu2HAm3vFYHkGRURyJ6F7bwDyzMLtPEuCg4DU89T7km2u8Fjyb", - "--store=true", - "--tcp-port=60002" - ] + entrypoint: + [ + "/usr/bin/wakunode", + "--discv5-discovery=true", + "--discv5-enr-auto-update=True", + "--discv5-udp-port=9000", + "--cluster-id=16", + # Docker DNS server + "--dns-addrs-name-server=127.0.0.11", + "--dns4-domain-name=store", + "--keep-alive=true", + "--log-level=INFO", + "--max-connections=18000", + "--nodekey=3190bc9b55b18dbc171997a7a67abcd5bbf0c81002ad9617b1cb67f2f15daa64", + "--peer-exchange=false", + "--relay=true", + "--rest-address=0.0.0.0", + "--rest-admin", + "--shard=32", + "--shard=64", + "--staticnode=/dns4/boot-1/tcp/60001/p2p/16Uiu2HAm3vFYHkGRURyJ6F7bwDyzMLtPEuCg4DU89T7km2u8Fjyb", + "--store=true", + "--tcp-port=60002" + ] healthcheck: - test: ["CMD-SHELL", "curl -X GET -H 'Content-Type: application/json' http://0.0.0.0:8645 || exit 0"] - + test: [ "CMD-SHELL", "wget --spider -q http://0.0.0.0:8645/debug/v1/info || exit 1" ] + interval: 2s + timeout: 2s + start_period: 20s + start_interval: 2s + retries: 10 From 6c352b2421f4951bdee1975c7588e75e07469c0f Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Sun, 9 Nov 2025 22:33:39 +0100 Subject: [PATCH 52/75] Adds more logs and makes the easier to find thanks to [CODEX] prefix in relevant logs --- .../communities/codex_archive_downloader.go | 10 ++-- .../communities/codex_index_downloader.go | 12 ++-- protocol/communities/manager.go | 14 +++++ protocol/communities/manager_archive.go | 60 ++++++++++--------- protocol/communities/manager_archive_file.go | 24 +++++--- protocol/messenger_communities.go | 47 +++++++++++---- .../messenger_communities_import_discord.go | 14 +++-- protocol/messenger_handler.go | 47 +++++++++++---- .../tests/test_wakuext_community_archives.py | 2 +- 9 files changed, 155 insertions(+), 75 deletions(-) diff --git a/protocol/communities/codex_archive_downloader.go b/protocol/communities/codex_archive_downloader.go index c56adb88df6..98b072fa277 100644 --- a/protocol/communities/codex_archive_downloader.go +++ b/protocol/communities/codex_archive_downloader.go @@ -246,7 +246,7 @@ func (d *CodexArchiveDownloader) downloadAllArchives() { err := d.triggerSingleArchiveDownload(archiveHash, archiveCid, archiveCancel) if err != nil { // Don't proceed to polling if trigger failed (could be cancellation or other error) - d.logger.Debug("failed to trigger download", + d.logger.Debug("[CODEX] failed to trigger download", zap.String("cid", archiveCid), zap.String("hash", archiveHash), zap.Error(err)) @@ -262,13 +262,13 @@ func (d *CodexArchiveDownloader) downloadAllArchives() { for { select { case <-timeout: - d.logger.Debug("timeout waiting for CID to be available locally", + d.logger.Debug("[CODEX] timeout waiting for CID to be available locally", zap.String("cid", archiveCid), zap.String("hash", archiveHash), zap.Duration("timeout", d.pollingTimeout)) return // Exit without success callback or count increment case <-archiveCancel: - d.logger.Debug("download cancelled", + d.logger.Debug("[CODEX] download cancelled", zap.String("cid", archiveCid), zap.String("hash", archiveHash)) return // Exit without success callback or count increment @@ -276,7 +276,7 @@ func (d *CodexArchiveDownloader) downloadAllArchives() { hasCid, err := d.codexClient.HasCid(archiveCid) if err != nil { // Log error but continue polling - d.logger.Debug("error checking CID availability", + d.logger.Debug("[CODEX] error checking CID availability", zap.String("cid", archiveCid), zap.String("hash", archiveHash), zap.Error(err)) @@ -288,7 +288,7 @@ func (d *CodexArchiveDownloader) downloadAllArchives() { d.totalDownloadedArchivesCount++ d.mu.Unlock() - d.logger.Debug("archive download completed", + d.logger.Debug("[CODEX] archive download completed", zap.String("cid", archiveCid), zap.String("totalDownloadedArchivesCount", fmt.Sprintf("%d", d.totalDownloadedArchivesCount)), ) diff --git a/protocol/communities/codex_index_downloader.go b/protocol/communities/codex_index_downloader.go index e375229af8a..7f21b8cd91c 100644 --- a/protocol/communities/codex_index_downloader.go +++ b/protocol/communities/codex_index_downloader.go @@ -88,7 +88,7 @@ func (d *CodexIndexDownloader) GotManifest() <-chan struct{} { d.mu.Lock() d.downloadError = err d.mu.Unlock() - d.logger.Debug("failed to fetch manifest", + d.logger.Debug("[CODEX] failed to fetch manifest", zap.String("indexCid", d.indexCid), zap.Error(err)) // Don't close channel on error - let timeout handle it @@ -100,7 +100,7 @@ func (d *CodexIndexDownloader) GotManifest() <-chan struct{} { d.mu.Lock() d.downloadError = fmt.Errorf("manifest CID mismatch: expected %s, got %s", d.indexCid, manifest.Cid) d.mu.Unlock() - d.logger.Debug("manifest CID mismatch", + d.logger.Debug("[CODEX] manifest CID mismatch", zap.String("expected", d.indexCid), zap.String("got", manifest.Cid)) // Don't close channel on error - let timeout handle it @@ -160,7 +160,7 @@ func (d *CodexIndexDownloader) DownloadIndexFile() { d.mu.Lock() d.downloadError = fmt.Errorf("failed to create temporary file: %w", err) d.mu.Unlock() - d.logger.Debug("failed to create temporary file", + d.logger.Debug("[CODEX] failed to create temporary file", zap.String("filePath", d.filePath), zap.Error(err)) return @@ -185,7 +185,7 @@ func (d *CodexIndexDownloader) DownloadIndexFile() { d.mu.Lock() d.downloadError = fmt.Errorf("failed to download index file: %w", err) d.mu.Unlock() - d.logger.Debug("failed to download index file", + d.logger.Debug("[CODEX] failed to download index file", zap.String("indexCid", d.indexCid), zap.String("filePath", d.filePath), zap.String("tmpPath", tmpPath), @@ -198,7 +198,7 @@ func (d *CodexIndexDownloader) DownloadIndexFile() { d.mu.Lock() d.downloadError = fmt.Errorf("failed to close temporary file: %w", err) d.mu.Unlock() - d.logger.Debug("failed to close temporary file", + d.logger.Debug("[CODEX] failed to close temporary file", zap.String("tmpPath", tmpPath), zap.Error(err)) return @@ -210,7 +210,7 @@ func (d *CodexIndexDownloader) DownloadIndexFile() { d.mu.Lock() d.downloadError = fmt.Errorf("failed to rename temporary file to final destination: %w", err) d.mu.Unlock() - d.logger.Debug("failed to rename temporary file to final destination", + d.logger.Debug("[CODEX] failed to rename temporary file to final destination", zap.String("tmpPath", tmpPath), zap.String("filePath", d.filePath), zap.Error(err)) diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index 5e0e0c888ed..9aa6e514e6c 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -2327,7 +2327,15 @@ func (m *Manager) handleCommunityDescriptionMessageCommon(community *Community, cdMagnetlinkClock := community.config.CommunityDescription.ArchiveMagnetlinkClock cdIndexCidClock := community.config.CommunityDescription.ArchiveIndexCidClock + + m.logger.Debug("[CODEX][handleCommunityDescription] handling community description archive info", + zap.String("communityID", community.IDString()), + zap.Uint64("magnetlinkClock", cdMagnetlinkClock), + zap.Uint64("indexCidClock", cdIndexCidClock), + ) + if !hasCommunityArchiveInfo { + m.logger.Debug("[CODEX][handleCommunityDescription] saving community archive info: hasCommunityArchiveInfo=false") err = m.persistence.SaveCommunityArchiveInfo(community.ID(), cdMagnetlinkClock, 0, cdIndexCidClock) if err != nil { return nil, err @@ -2349,7 +2357,13 @@ func (m *Manager) handleCommunityDescriptionMessageCommon(community *Community, if err != nil { return nil, err } + m.logger.Debug("[CODEX][handleCommunityDescription] comparing index CID clocks", + zap.String("communityID", community.IDString()), + zap.Uint64("cdIndexCidClock", cdIndexCidClock), + zap.Uint64("indexCidClock", indexCidClock), + ) if cdIndexCidClock > indexCidClock { + m.logger.Debug("[CODEX][handleCommunityDescription] updating index CID clock (cdIndexCidClock > indexCidClock)") err = m.persistence.UpdateIndexCidMessageClock(community.ID(), cdIndexCidClock) if err != nil { return nil, err diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index af00542b0fa..0b153175eb6 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -122,7 +122,7 @@ func (m *ArchiveManager) SetOnline(online bool) { if m.codexConfig != nil && m.codexConfig.Enabled && !codexStarted { err := m.StartCodexClient() if err != nil { - m.logger.Error("couldn't start codex client", zap.Error(err)) + m.logger.Error("[CODEX] couldn't start codex client", zap.Error(err)) } } } @@ -227,7 +227,7 @@ func (m *ArchiveManager) ensureCodexDiscoveryPort(config *params.CodexConfig) er if available { return nil } - m.logger.Warn("codex discovery port already in use, selecting a free one", zap.Int("port", port)) + m.logger.Warn("[CODEX] discovery port already in use, selecting a free one", zap.Int("port", port)) } for range 10 { @@ -325,7 +325,7 @@ func (m *ArchiveManager) StopCodexClient() error { errs := []error{} if m.isCodexClientStarted { - m.logger.Info("Stopping codex client") + m.logger.Info("[CODEX] Stopping codex client") e := m.codexClient.Stop() if e != nil { @@ -523,6 +523,7 @@ func (m *ArchiveManager) CreateAndSeedHistoryArchive(communityID types.HexBytes, distributionPreference, err := m.persistence.GetArchiveDistributionPreference() if err != nil { // fallback to codex + m.logger.Debug("[CODEX][CreateAndSeedHistoryArchive] failed to get archive distribution preference - falling back to codex", zap.Error(err)) distributionPreference = params.ArchiveDistributionMethodCodex } @@ -550,15 +551,15 @@ func (m *ArchiveManager) CreateAndSeedHistoryArchive(communityID types.HexBytes, codexArchiveIDs, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) if errCodex != nil { archiveCodexCreatedSuccessfully = false - m.logger.Error("failed to create history archive codex", zap.Error(errCodex)) + m.logger.Error("[CODEX][CreateAndSeedHistoryArchive] failed to create history archive codex", zap.Error(errCodex)) } else { if len(codexArchiveIDs) == 0 { // no new codex archives were created - no need to distribute new index cid // but we need to (re)start seeding what we stopped above archiveCodexCreatedSuccessfully = false - m.logger.Debug("no codex archive ids were created") + m.logger.Debug("[CODEX][CreateAndSeedHistoryArchive] no new codex archive ids were created - re-seeding existing index cid") if err = m.SeedHistoryArchiveIndexCid(communityID); err != nil { - m.logger.Error("failed to seed existing history archive codex index cid", zap.Error(err)) + m.logger.Error("[CODEX][CreateAndSeedHistoryArchive] failed to seed existing history archive codex index cid", zap.Error(err)) } } // else: we created new codex archives and they are already published to codex @@ -762,15 +763,15 @@ func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes cid, err := m.GetHistoryArchiveIndexCid(communityID) if err != nil { - m.logger.Debug("failed to get history archive index CID", zap.Error(err)) + m.logger.Debug("[CODEX] failed to get history archive index CID", zap.Error(err)) return } - m.logger.Debug("Unseeding index CID for community", zap.String("id", communityID.String()), zap.String("cid", cid)) + m.logger.Debug("[CODEX] Unseeding index CID for community", zap.String("id", communityID.String()), zap.String("cid", cid)) err = m.codexClient.RemoveCid(cid) if err != nil { - m.logger.Error("failed to remove CID from Codex", zap.Error(err)) + m.logger.Error("[CODEX] failed to remove CID from Codex", zap.Error(err)) } } } @@ -789,12 +790,12 @@ func (m *ArchiveManager) IsSeedingHistoryArchiveCodex(communityID types.HexBytes if m.CodexIndexCidFileExists(communityID) { cid, err := m.GetHistoryArchiveIndexCid(communityID) if err != nil { - m.logger.Debug("failed to read Codex index CID", zap.String("communityID", communityID.String()), zap.Error(err)) + m.logger.Debug("[CODEX] failed to read Codex index CID", zap.String("communityID", communityID.String()), zap.Error(err)) return false } hasCid, err := m.codexClient.HasCid(cid) if err != nil { - m.logger.Debug("failed to verify Codex CID availability", zap.String("communityID", communityID.String()), zap.String("cid", cid), zap.Error(err)) + m.logger.Debug("[CODEX] failed to verify Codex CID availability", zap.String("communityID", communityID.String()), zap.String("cid", cid), zap.Error(err)) return false } return hasCid @@ -999,7 +1000,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex indexDownloaderCancel := make(chan struct{}) if err := m.ensureCodexCommunityDir(communityID); err != nil { - m.logger.Error("failed to ensure Codex archive directory", zap.String("communityID", id), zap.Error(err)) + m.logger.Error("[CODEX] failed to ensure Codex archive directory", zap.String("communityID", id), zap.Error(err)) return nil, err } @@ -1007,12 +1008,12 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) indexDownloader := NewCodexIndexDownloader(m.codexClient, indexCid, indexFilePath, indexDownloaderCancel, m.logger) - m.logger.Debug("fetching history index from Codex", zap.String("indexCid", indexCid)) + m.logger.Debug("[CODEX] fetching history index from Codex", zap.String("indexCid", indexCid)) select { case <-timeout: return nil, ErrIndexCidTimedout case <-cancelTask: - m.logger.Debug("cancelled fetching history index from Codex") + m.logger.Debug("[CODEX] cancelled fetching history index from Codex") close(indexDownloaderCancel) // Forward cancellation to index downloader downloadTaskInfo.Cancelled = true return downloadTaskInfo, nil @@ -1021,13 +1022,15 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex err := indexDownloader.GetError() if indexDownloader.GetDatasetSize() == 0 || err != nil { if err != nil { - m.logger.Error("failed to fetch Codex manifest", zap.Error(err)) + m.logger.Error("[CODEX] failed to fetch Codex manifest", zap.Error(err)) } else { - m.logger.Error("failed to fetch Codex manifest - dataset size is 0") + m.logger.Error("[CODEX] failed to fetch Codex manifest - dataset size is 0") } return nil, fmt.Errorf("failed to fetch Codex manifest for CID %s: %w", indexCid, err) } + m.logger.Debug("[CODEX] got manifest of the index file from Codex", zap.String("indexCid", indexCid), zap.Int64("datasetSize", indexDownloader.GetDatasetSize())) + // Publish manifest fetched signal m.publisher.publish(&Subscription{ ManifestFetchedSignal: &signal.ManifestFetchedSignal{ @@ -1039,7 +1042,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex // Start downloading the index file indexDownloader.DownloadIndexFile() - m.logger.Debug("downloading history archive index with CID:", zap.String("indexCid", indexCid)) + m.logger.Debug("[CODEX] downloading history archive index with CID:", zap.String("indexCid", indexCid)) ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() @@ -1047,18 +1050,19 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex for { select { case <-cancelTask: - m.logger.Debug("cancelled downloading archive index") + m.logger.Debug("[CODEX] cancelled downloading archive index") close(indexDownloaderCancel) // Forward cancellation to index downloader downloadTaskInfo.Cancelled = true return downloadTaskInfo, nil case <-ticker.C: err := indexDownloader.GetError() if err != nil { - m.logger.Error("error during index download", zap.Error(err)) + m.logger.Error("[CODEX] error during index download", zap.Error(err)) return nil, err } if indexDownloader.IsDownloadComplete() { + m.logger.Info("[CODEX] history archive index download completed", zap.String("indexCid", indexCid)) // Publish index download completed signal m.publisher.publish(&Subscription{ @@ -1070,7 +1074,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex err := m.writeCodexIndexCidToFile(communityID, indexCid) if err != nil { - m.logger.Error("failed to write Codex index CID to file", zap.Error(err)) + m.logger.Error("[CODEX] failed to write Codex index CID to file", zap.Error(err)) return nil, err } @@ -1085,7 +1089,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex } if len(existingArchiveIDs) == len(index.Archives) { - m.logger.Debug("download cancelled, no new archives") + m.logger.Debug("[CODEX] aborting download, no new archives") return downloadTaskInfo, nil } @@ -1102,7 +1106,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex archiveDownloader.SetOnArchiveDownloaded(func(hash string, from, to uint64) { err = m.persistence.SaveMessageArchiveID(communityID, hash) if err != nil { - m.logger.Error("couldn't save message archive ID", zap.Error(err)) + m.logger.Error("[CODEX] couldn't save message archive ID", zap.Error(err)) } m.publisher.publish(&Subscription{ HistoryArchiveDownloadedSignal: &signal.HistoryArchiveDownloadedSignal{ @@ -1112,12 +1116,14 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex }, }) - m.logger.Debug("archive downloaded successfully", + m.logger.Debug("[CODEX] archive downloaded successfully", zap.String("hash", hash), zap.Uint64("from", from), zap.Uint64("to", to)) }) + m.logger.Debug("[CODEX] starting downloading individual archives from Codex") + archiveDownloader.StartDownload() m.publisher.publish(&Subscription{ @@ -1133,7 +1139,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex for { select { case <-cancelTask: - m.logger.Debug("cancelled downloading individual archives") + m.logger.Debug("[CODEX] cancelled downloading individual archives") close(archiveDownloaderCancel) downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() downloadTaskInfo.Cancelled = true @@ -1152,7 +1158,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex // Always update final progress downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() - m.logger.Info("Downloading archives from Codex finished", + m.logger.Info("[CODEX] downloading archives from Codex completed", zap.Int("totalArchives", downloadTaskInfo.TotalArchivesCount), zap.Int("downloadedArchives", downloadTaskInfo.TotalDownloadedArchivesCount)) @@ -1167,7 +1173,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex if archiveDownloader.IsCancelled() { // archive was cancelled, but it does not mean that // no single archive was downloaded before cancellation - m.logger.Debug("archive download was cancelled") + m.logger.Debug("[CODEX] archive download was cancelled") downloadTaskInfo.Cancelled = true return downloadTaskInfo, nil } @@ -1176,7 +1182,7 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex } else { // Update progress downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() - m.logger.Debug("downloading archives", + m.logger.Debug("[CODEX] downloading archives in progress", zap.Int("completed", downloadTaskInfo.TotalDownloadedArchivesCount), zap.Int("total", downloadTaskInfo.TotalArchivesCount), zap.Int("inProgress in this session", archiveDownloader.GetPendingArchivesCount()), diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index ea0be5c4d37..087ac42f1cd 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -407,7 +407,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte if len(messages) == 0 { // No need to create an archive with zero messages - m.logger.Debug("no messages in this partition") + m.logger.Debug("[CODEX] no messages in this partition") from = to to = to.Add(partition) if to.After(endDate) { @@ -416,7 +416,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte continue } - m.logger.Debug("creating Codex archive with messages", zap.Int("messagesCount", len(messages))) + m.logger.Debug("[CODEX] creating Codex archive with messages", zap.Int("messagesCount", len(messages))) // Not only do we partition messages, we also chunk them // roughly by size, such that each chunk will not exceed a given @@ -427,8 +427,14 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte for _, msg := range messages { msgSize := len(msg.Payload) + len(msg.Sig) + m.logger.Debug("[CODEX] message size", + zap.Int("messageSize", msgSize), + zap.String("contentTopic", string(msg.Topic[:])), + zap.ByteString("payload[0:31]", msg.Payload[:min(32, len(msg.Payload))]), + ) if msgSize > maxArchiveSizeInBytes { // we drop messages this big + m.logger.Debug("[CODEX] dropping message due to size", zap.Int("messageSize", msgSize)) continue } @@ -459,11 +465,11 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte // upload archive to codex and get CID back cid, err := m.codexClient.UploadArchive(encodedArchive) if err != nil { - m.logger.Error("failed to upload to codex", zap.Error(err)) + m.logger.Error("[CODEX] failed to upload to codex", zap.Error(err)) return codexArchiveIDs, err } - m.logger.Debug("archive uploaded to codex", zap.String("cid", cid)) + m.logger.Debug("[CODEX] archive uploaded to codex", zap.String("cid", cid)) codexWakuMessageArchiveIndexMetadata := &protobuf.CodexWakuMessageArchiveIndexMetadata{ Metadata: wakuMessageArchive.Metadata, @@ -504,7 +510,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte // upload index file to codex cid, err := m.codexClient.UploadArchive(codexIndexBytes) if err != nil { - m.logger.Error("failed to upload to codex", zap.Error(err)) + m.logger.Error("[CODEX] failed to upload to codex", zap.Error(err)) return codexArchiveIDs, err } @@ -518,7 +524,9 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte return codexArchiveIDs, err } - m.logger.Debug("archives uploaded to Codex", zap.Any("from", startDate.Unix()), zap.Any("to", endDate.Unix())) + m.logger.Debug("[CODEX] index uploaded to codex", zap.String("cid", cid)) + + m.logger.Debug("[CODEX] archives uploaded to Codex", zap.Any("from", startDate.Unix()), zap.Any("to", endDate.Unix())) m.publisher.publish(&Subscription{ HistoryArchivesCreatedSignal: &signal.HistoryArchivesCreatedSignal{ @@ -528,7 +536,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte }, }) } else { - m.logger.Debug("no archives created") + m.logger.Debug("[CODEX] no archives created") m.publisher.publish(&Subscription{ NoHistoryArchivesCreatedSignal: &signal.NoHistoryArchivesCreatedSignal{ CommunityID: communityID.String(), @@ -789,7 +797,7 @@ func (m *ArchiveFileManager) ExtractMessagesFromCodexHistoryArchive(communityID var buf bytes.Buffer err = m.codexClient.LocalDownload(cid, &buf) if err != nil { - m.logger.Error("failed to download archive from codex", zap.Error(err)) + m.logger.Error("[CODEX] failed to download archive from codex", zap.Error(err)) return nil, err } data := buf.Bytes() diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 5b08a88856a..78a85cbaf74 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -270,7 +270,7 @@ func (m *Messenger) handleCommunitiesHistoryArchivesSubscription(c chan *communi if sub.HistoryArchivesSeedingSignal.IndexCid { err := m.dispatchIndexCidMessage(sub.HistoryArchivesSeedingSignal.CommunityID) if err != nil { - m.logger.Debug("failed to dispatch index cid message", zap.Error(err)) + m.logger.Debug("[CODEX] failed to dispatch index cid message", zap.Error(err)) } } } @@ -1944,7 +1944,7 @@ func (m *Messenger) CancelRequestToJoinCommunity(ctx context.Context, request *r } func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.RequestToJoin) (*MessengerResponse, error) { - m.logger.Debug("accept request to join community", + m.logger.Debug("[CODEX][acceptRequestToJoinCommunity] accept request to join community", zap.String("community", requestToJoin.CommunityID.String()), zap.String("pubkey", requestToJoin.PublicKey)) @@ -2003,11 +2003,13 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ } if m.archiveManager.IsCodexReady() && m.archiveManager.CodexIndexCidFileExists(community.ID()) { + m.logger.Debug("[CODEX][acceptRequestToJoinCommunity] calling GetHistoryArchiveIndexCid", zap.String("communityID", community.IDString())) cid, err := m.archiveManager.GetHistoryArchiveIndexCid(community.ID()) if err != nil { m.logger.Warn("couldn't get codex index cid for community", zap.Error(err)) return nil, err } + m.logger.Debug("[CODEX][acceptRequestToJoinCommunity] setting requestToJoinResponseProto.IndexCid", zap.String("communityID", community.IDString()), zap.String("cid", cid)) requestToJoinResponseProto.IndexCid = cid } @@ -4036,6 +4038,8 @@ func (m *Messenger) importHistoryArchives(communityID types.HexBytes, cancel cha cancelFunc() }() + m.logger.Debug("[CODEX][importHistoryArchives] waiting to start importing history archive messages (importDelayer.wait)", zap.String("communityID", types.EncodeHex(communityID))) + // don't proceed until initial import delay has passed select { case <-m.importDelayer.wait: @@ -4045,12 +4049,14 @@ func (m *Messenger) importHistoryArchives(communityID types.HexBytes, cancel cha delayImport := false + m.logger.Info("[CODEX][importHistoryArchives] starting to import history archive messages", zap.String("communityID", types.EncodeHex(communityID))) + importMessageArchivesLoop: for { if delayImport { select { case <-ctx.Done(): - m.logger.Debug("interrupted importing history archive messages") + m.logger.Debug("[CODEX][importHistoryArchives] interrupted importing history archive messages") return nil case <-time.After(1 * time.Hour): delayImport = false @@ -4059,7 +4065,7 @@ importMessageArchivesLoop: select { case <-ctx.Done(): - m.logger.Debug("interrupted importing history archive messages") + m.logger.Debug("[CODEX][importHistoryArchives] interrupted importing history archive messages") return nil case <-importTicker.C: err := m.checkIfIMemberOfCommunity(communityID) @@ -4068,16 +4074,16 @@ importMessageArchivesLoop: } archiveIDsToImport, err := m.archiveManager.GetMessageArchiveIDsToImport(communityID) if err != nil { - m.logger.Error("couldn't get message archive IDs to import", zap.Error(err)) + m.logger.Error("[CODEX][importHistoryArchives] couldn't get message archive IDs to import", zap.Error(err)) return err } if len(archiveIDsToImport) == 0 { - m.logger.Debug("no message archives to import") + m.logger.Debug("[CODEX][importHistoryArchives] no message archives to import") break importMessageArchivesLoop } - m.logger.Info("importing message archive", zap.Int("left", len(archiveIDsToImport))) + m.logger.Info("[CODEX][importHistoryArchives] importing message archive", zap.Int("left", len(archiveIDsToImport))) // only process one archive at a time, so in case of cancel we don't // wait for all archives to be processed first @@ -4086,10 +4092,11 @@ importMessageArchivesLoop: var archiveMessages []*protobuf.WakuMessage preference, err := m.GetArchiveDistributionPreference() if err != nil { - m.logger.Warn("failed to get archive distribution preference, using codex", zap.Error(err)) + m.logger.Warn("[CODEX][importHistoryArchives] failed to get archive distribution preference, using codex", zap.Error(err)) preference = communities.ArchiveDistributionMethodCodex } if preference == communities.ArchiveDistributionMethodCodex { + m.logger.Debug("[CODEX][importHistoryArchives] using codex to extract messages") archiveMessages, err = m.archiveManager.ExtractMessagesFromCodexHistoryArchive(communityID, downloadedArchiveID) } else { archiveMessages, err = m.archiveManager.ExtractMessagesFromHistoryArchive(communityID, downloadedArchiveID) @@ -4101,7 +4108,7 @@ importMessageArchivesLoop: delayImport = true continue } - m.logger.Error("failed to extract history archive messages", zap.Error(err)) + m.logger.Error("[CODEX][importHistoryArchives] failed to extract history archive messages", zap.Error(err)) continue } @@ -4110,14 +4117,14 @@ importMessageArchivesLoop: for _, messagesChunk := range chunkSlice(archiveMessages, importMessagesChunkSize) { if err := m.importRateLimiter.Wait(ctx); err != nil { if !errors.Is(err, context.Canceled) { - m.logger.Error("rate limiter error when handling archive messages", zap.Error(err)) + m.logger.Error("[CODEX][importHistoryArchives] rate limiter error when handling archive messages", zap.Error(err)) } continue importMessageArchivesLoop } response, err := m.handleArchiveMessages(messagesChunk) if err != nil { - m.logger.Error("failed to handle archive messages", zap.Error(err)) + m.logger.Error("[CODEX][importHistoryArchives] failed to handle archive messages", zap.Error(err)) continue importMessageArchivesLoop } @@ -4131,7 +4138,7 @@ importMessageArchivesLoop: err = m.archiveManager.SetMessageArchiveIDImported(communityID, downloadedArchiveID, true) if err != nil { - m.logger.Error("failed to mark history message archive as imported", zap.Error(err)) + m.logger.Error("[CODEX][importHistoryArchives] failed to mark history message archive as imported", zap.Error(err)) continue } } @@ -4217,16 +4224,32 @@ func (m *Messenger) dispatchIndexCidMessage(communityID string) error { Priority: &messagingtypes.LowPriority, } + m.logger.Info("[CODEX][dispatchIndexCidMessage] dispatching index cid message", + zap.String("communityID", communityID), + zap.String("chatID", chatID), + zap.String("cid", indexCid), + ) + _, err = m.messaging.SendPublic(context.Background(), chatID, rawMessage) if err != nil { return err } + m.logger.Info("[CODEX][dispatchIndexCidMessage] dispatched index cid message", + zap.String("communityID", communityID), + zap.String("chatID", chatID), + zap.String("cid", indexCid), + ) + + m.logger.Info("[CODEX][dispatchIndexCidMessage] calling UpdateCommunityDescriptionIndexCidMessageClock", zap.String("communityID", community.IDString()), zap.Uint64("clock", indexCidMessage.Clock)) + err = m.communitiesManager.UpdateCommunityDescriptionIndexCidMessageClock(community.ID(), indexCidMessage.Clock) if err != nil { return err } + m.logger.Info("[CODEX][dispatchIndexCidMessage] calling UpdateIndexCidMessageClock", zap.String("communityID", community.IDString()), zap.Uint64("clock", indexCidMessage.Clock)) + return m.communitiesManager.UpdateIndexCidMessageClock(community.ID(), indexCidMessage.Clock) } diff --git a/protocol/messenger_communities_import_discord.go b/protocol/messenger_communities_import_discord.go index ababe8cb667..030f32ebbfd 100644 --- a/protocol/messenger_communities_import_discord.go +++ b/protocol/messenger_communities_import_discord.go @@ -966,7 +966,7 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC archiveDistributionPreference, err := m.GetArchiveDistributionPreference() if err != nil { - m.logger.Error("failed to get archive distribution preference", zap.Error(err)) + m.logger.Error("[CODEX] failed to get archive distribution preference", zap.Error(err)) continue } @@ -987,6 +987,7 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC } if archiveDistributionPreference == params.ArchiveDistributionMethodCodex { + m.logger.Debug("[CODEX][RequestImportDiscordChannel] creating history archive codex from messages") _, err = m.archiveManager.CreateHistoryArchiveCodexFromMessages( request.CommunityID, wakuMessages, @@ -997,7 +998,7 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC community.Encrypted(), ) if err != nil { - m.logger.Error("failed to create history archive codex", zap.Error(err)) + m.logger.Error("[CODEX][RequestImportDiscordChannel] failed to create history archive codex", zap.Error(err)) archiveCodexCreatedSuccessfully = false } } @@ -1022,11 +1023,12 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC if m.archiveManager.IsCodexReady() && communitySettings.HistoryArchiveSupportEnabled { err = m.archiveManager.SeedHistoryArchiveIndexCid(request.CommunityID) if err != nil { - m.logger.Error("failed to seed history archive index cid", zap.Error(err)) + m.logger.Error("[CODEX][RequestImportDiscordChannel] failed to seed history archive index cid", zap.Error(err)) } } if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { + m.logger.Debug("[CODEX][TORRENT][RequestImportDiscordChannel] starting history archive tasks interval") go m.archiveManager.StartHistoryArchiveTasksInterval(community, messageArchiveInterval) } } @@ -1801,6 +1803,7 @@ func (m *Messenger) RequestImportDiscordCommunity(request *requests.ImportDiscor } if archiveDistributionPreference == params.ArchiveDistributionMethodCodex { + m.logger.Debug("[CODEX][RequestImportDiscordCommunity] creating history archive codex from messages") _, err = m.archiveManager.CreateHistoryArchiveCodexFromMessages( discordCommunity.ID(), wakuMessages, @@ -1811,7 +1814,7 @@ func (m *Messenger) RequestImportDiscordCommunity(request *requests.ImportDiscor discordCommunity.Encrypted(), ) if err != nil { - m.logger.Error("failed to create history archive codex", zap.Error(err)) + m.logger.Error("[CODEX][RequestImportDiscordCommunity] failed to create history archive codex", zap.Error(err)) archiveCodexCreatedSuccessfully = false } } @@ -1830,11 +1833,12 @@ func (m *Messenger) RequestImportDiscordCommunity(request *requests.ImportDiscor if m.archiveManager.IsCodexReady() && communitySettings.HistoryArchiveSupportEnabled { err = m.archiveManager.SeedHistoryArchiveIndexCid(discordCommunity.ID()) if err != nil { - m.logger.Error("failed to seed history archive index cid", zap.Error(err)) + m.logger.Error("[CODEX][RequestImportDiscordCommunity] failed to seed history archive index cid", zap.Error(err)) } } if m.archiveManager.IsReady() && communitySettings.HistoryArchiveSupportEnabled { + m.logger.Debug("[CODEX][TORRENT][RequestImportDiscordCommunity] starting history archive tasks interval") go m.archiveManager.StartHistoryArchiveTasksInterval(discordCommunity, messageArchiveInterval) } } diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index ac44deb0f5d..8d430bda997 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -1316,11 +1316,12 @@ func (m *Messenger) HandleHistoryArchiveMagnetlinkMessage(state *ReceivedMessage } func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageState, communityPubKey *ecdsa.PublicKey, cid string, clock uint64) error { + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Handling history archive index CID message", zap.String("cid", cid)) id := types.HexBytes(crypto.CompressPubkey(communityPubKey)) community, err := m.communitiesManager.GetByID(id) if err != nil && err != communities.ErrOrgNotFound { - m.logger.Debug("Couldn't get community for community with id: ", zap.Any("id", id)) + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Couldn't get community for community with id: ", zap.Any("id", id)) return err } if community == nil { @@ -1329,7 +1330,7 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt settings, err := m.communitiesManager.GetCommunitySettingsByID(id) if err != nil { - m.logger.Debug("Couldn't get community settings for community with id: ", zap.Any("id", id)) + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Couldn't get community settings for community with id: ", zap.Any("id", id)) return err } if settings == nil { @@ -1337,6 +1338,8 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt } if m.archiveManager.IsCodexReady() && settings.HistoryArchiveSupportEnabled { + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Codex is ready and history archive support is enabled", zap.String("communityID", community.IDString())) + lastIndexCidClock, err := m.communitiesManager.GetIndexCidMessageClock(id) if err != nil { return err @@ -1349,17 +1352,22 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt // We are only interested in a community archive index CID // if it originates from a community that the current account is // part of and doesn't own the private key at the same time + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Checking community membership and lastIndexCidClock", zap.String("communityID", community.IDString()), zap.Uint64("lastIndexCidClock", lastIndexCidClock), zap.Uint64("messageClock", clock)) + if !community.IsControlNode() && community.Joined() && clock >= lastIndexCidClock { + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] clock >= lastIndexCidClock)") if lastSeenCid == cid { - m.logger.Debug("already processed this index cid") + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] already processed this index cid") return nil } // All checks passed - proceed with download - + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Unseeding existing history archive index CID for community (if any)", zap.String("communityID", community.IDString())) m.archiveManager.UnseedHistoryArchiveIndexCid(id) currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(id.String()) + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Starting download and import of history archives", zap.String("cid", cid)) + go func(currentTask *communities.HistoryArchiveDownloadTask, communityID types.HexBytes) { defer gocommon.LogOnPanic() // Cancel ongoing download/import task @@ -1384,9 +1392,13 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt // this wait groups tracks all ongoing tasks across communities m.shutdownWaitGroup.Add(1) defer m.shutdownWaitGroup.Done() + + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Calling downloadAndImportCodexHistoryArchives", zap.String("cid", cid)) + m.downloadAndImportCodexHistoryArchives(communityID, cid, task.CancelChan) }(currentTask, id) + m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Updating index CID message clock", zap.String("communityID", community.IDString()), zap.Uint64("clock", clock)) return m.communitiesManager.UpdateIndexCidMessageClock(id, clock) } } @@ -1440,9 +1452,9 @@ func (m *Messenger) downloadAndImportHistoryArchives(id types.HexBytes, magnetli func (m *Messenger) downloadAndImportCodexHistoryArchives(id types.HexBytes, indexCid string, cancel chan struct{}) { downloadTaskInfo, err := m.archiveManager.DownloadHistoryArchivesByIndexCid(id, indexCid, cancel) if err != nil { - logMsg := "failed to download history archive data" - if err == communities.ErrTorrentTimedout || err == communities.ErrIndexCidTimedout { - m.logger.Debug("downloading indexCid has timed out, trying once more...") + logMsg := "[CODEX][downloadAndImportCodexHistoryArchives] failed to download history archive data" + if err == communities.ErrIndexCidTimedout { + m.logger.Debug("[CODEX][downloadAndImportCodexHistoryArchives] downloading indexCid has timed out, trying once more...") downloadTaskInfo, err = m.archiveManager.DownloadHistoryArchivesByIndexCid(id, indexCid, cancel) if err != nil { m.logger.Error(logMsg, zap.Error(err)) @@ -1456,14 +1468,14 @@ func (m *Messenger) downloadAndImportCodexHistoryArchives(id types.HexBytes, ind if downloadTaskInfo.Cancelled { if downloadTaskInfo.TotalDownloadedArchivesCount > 0 { - m.logger.Debug(fmt.Sprintf("downloaded %d of %d archives so far", downloadTaskInfo.TotalDownloadedArchivesCount, downloadTaskInfo.TotalArchivesCount)) + m.logger.Debug(fmt.Sprintf("[CODEX][downloadAndImportCodexHistoryArchives] downloaded %d of %d archives so far", downloadTaskInfo.TotalDownloadedArchivesCount, downloadTaskInfo.TotalArchivesCount)) } return } err = m.communitiesManager.UpdateLastSeenIndexCid(id, indexCid) if err != nil { - m.logger.Error("couldn't update last seen indexCid", zap.Error(err)) + m.logger.Error("[CODEX][downloadAndImportCodexHistoryArchives] couldn't update last seen indexCid", zap.Error(err)) } err = m.checkIfIMemberOfCommunity(id) @@ -1471,9 +1483,11 @@ func (m *Messenger) downloadAndImportCodexHistoryArchives(id types.HexBytes, ind return } + m.logger.Debug("[CODEX][downloadAndImportCodexHistoryArchives] Importing history archives now") + err = m.importHistoryArchives(id, cancel) if err != nil { - m.logger.Error("failed to import history archives", zap.Error(err)) + m.logger.Error("[CODEX][downloadAndImportCodexHistoryArchives] failed to import history archives", zap.Error(err)) m.config.messengerSignalsHandler.DownloadingHistoryArchivesFinished(types.EncodeHex(id)) return } @@ -1571,6 +1585,7 @@ func (m *Messenger) HandleCommunityCancelRequestToJoin(state *ReceivedMessageSta // HandleCommunityRequestToJoin handles an community request to join func (m *Messenger) HandleCommunityRequestToJoin(state *ReceivedMessageState, requestToJoinProto *protobuf.CommunityRequestToJoin, statusMessage *messagingtypes.Message) error { + m.logger.Debug("[CODEX][HandleCommunityRequestToJoin] Handling community request to join") signer := state.CurrentMessageState.PublicKey community, requestToJoin, err := m.communitiesManager.HandleCommunityRequestToJoin(signer, statusMessage.TransportLayer.Dst, requestToJoinProto) if err != nil { @@ -1610,6 +1625,7 @@ func (m *Messenger) HandleCommunityRequestToJoin(state *ReceivedMessageState, re } case communities.RequestToJoinStateDeclined: + m.logger.Debug("[CODEX][HandleCommunityRequestToJoin] Community request to join declined") response, err := m.declineRequestToJoinCommunity(requestToJoin) if err == nil { err := state.Response.Merge(response) @@ -1621,6 +1637,7 @@ func (m *Messenger) HandleCommunityRequestToJoin(state *ReceivedMessageState, re } case communities.RequestToJoinStateAccepted: + m.logger.Debug("[CODEX][HandleCommunityRequestToJoin] Community request to join accepted") response, err := m.acceptRequestToJoinCommunity(requestToJoin) if err == nil { err := state.Response.Merge(response) // new member has been added @@ -1628,6 +1645,7 @@ func (m *Messenger) HandleCommunityRequestToJoin(state *ReceivedMessageState, re return err } } else if err == communities.ErrNoPermissionToJoin { + m.logger.Debug("[CODEX][HandleCommunityRequestToJoin] No permission to join community") // only control node will end up here as it's the only one that // performed token permission checks response, err = m.declineRequestToJoinCommunity(requestToJoin) @@ -1680,6 +1698,7 @@ func (m *Messenger) HandleCommunityEditSharedAddresses(state *ReceivedMessageSta } func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageState, requestToJoinResponseProto *protobuf.CommunityRequestToJoinResponse, statusMessage *messagingtypes.Message) error { + m.logger.Debug("[CODEX][HandleCommunityRequestToJoinResponse] Handling community request to join response") signer := state.CurrentMessageState.PublicKey if requestToJoinResponseProto.CommunityId == nil { return ErrInvalidCommunityID @@ -1807,6 +1826,8 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS cid := requestToJoinResponseProto.IndexCid if m.archiveManager.IsCodexReady() && communitySettings != nil && communitySettings.HistoryArchiveSupportEnabled && cid != "" { + m.logger.Debug("[CODEX][HandleCommunityRequestToJoinResponse] Received index CID to download history archives", zap.String("cid", cid)) + currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(community.IDString()) go func(currentTask *communities.HistoryArchiveDownloadTask) { defer gocommon.LogOnPanic() @@ -1829,6 +1850,8 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS m.shutdownWaitGroup.Add(1) defer m.shutdownWaitGroup.Done() + m.logger.Debug("[CODEX][HandleCommunityRequestToJoinResponse] Starting download and import of history archives", zap.String("cid", cid)) + m.downloadAndImportCodexHistoryArchives(community.ID(), cid, task.CancelChan) }(currentTask) } @@ -3733,13 +3756,15 @@ func (m *Messenger) HandleSyncTrustedUser(state *ReceivedMessageState, message * } func (m *Messenger) HandleCommunityMessageArchiveIndexCid(state *ReceivedMessageState, message *protobuf.CommunityMessageArchiveIndexCid, statusMessage *messagingtypes.Message) error { + m.logger.Debug("[CODEX][HandleCommunityMessageArchiveIndexCid] received CommunityMessageArchiveIndexCid", zap.String("cid", message.Cid)) + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() if err != nil { return err } if archiveDistributionPreference == communities.ArchiveDistributionMethodTorrent { // Ignore Cid messages when torrent distribution is selected - m.logger.Debug("skipping cid processing due to torrent-only preference") + m.logger.Debug("[CODEX][HandleCommunityMessageArchiveIndexCid] skipping cid processing due to torrent-only preference") return nil } return m.HandleHistoryArchiveIndexCidMessage(state, state.CurrentMessageState.PublicKey, message.Cid, message.Clock) diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index eb6e56b5381..fcbd79a6b0a 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -38,7 +38,7 @@ def setup_backends(self, backend_new_profile): assert has_archive is False, "Another member should not have community archive initially" # Connect members to community codex client - # In the real life, this would be done via DHT discovery + # In the real life, this would be done via discovery info = self.creator.wakuext_service.debug() self.member.wakuext_service.connect(info["id"], info["addrs"]) self.another_member.wakuext_service.connect(info["id"], info["addrs"]) From 7c5cc6f4e3b9e3574460195c459cfd2a6dfee624 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 10 Nov 2025 04:48:56 +0100 Subject: [PATCH 53/75] adds even more logging --- protocol/communities/manager_archive_file.go | 22 +++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index 087ac42f1cd..5194a4db887 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -349,7 +349,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte codexArchiveDir := m.codexHistoryArchiveDataDirPath(communityID) codexIndexPath := m.codexHistoryArchiveIndexFilePath(communityID) - m.logger.Debug("codexArchiveDir", zap.String("codexArchiveDir", codexArchiveDir)) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] codexArchiveDir", zap.String("codexArchiveDir", codexArchiveDir)) codexWakuMessageArchiveIndexProto := &protobuf.CodexWakuMessageArchiveIndex{} codexWakuMessageArchiveIndex := make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata) @@ -361,6 +361,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte _, err := os.Stat(codexIndexPath) if err == nil { + m.logger.Debug("[CODEX][createHistoryArchiveCodex] codex index file exists, loading from file") codexWakuMessageArchiveIndexProto, err = m.CodexLoadHistoryArchiveIndexFromFile(m.identity, communityID) if err != nil { return codexArchiveIDs, err @@ -377,7 +378,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte CommunityID: communityID.String(), }}) - m.logger.Debug("creating archives", + m.logger.Debug("[CODEX][createHistoryArchiveCodex] creating archives", zap.Any("startDate", startDate), zap.Any("endDate", endDate), zap.Duration("partition", partition), @@ -416,7 +417,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte continue } - m.logger.Debug("[CODEX] creating Codex archive with messages", zap.Int("messagesCount", len(messages))) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] creating Codex archive with messages", zap.Int("messagesCount", len(messages))) // Not only do we partition messages, we also chunk them // roughly by size, such that each chunk will not exceed a given @@ -427,14 +428,14 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte for _, msg := range messages { msgSize := len(msg.Payload) + len(msg.Sig) - m.logger.Debug("[CODEX] message size", + m.logger.Debug("[CODEX][createHistoryArchiveCodex] message size", zap.Int("messageSize", msgSize), zap.String("contentTopic", string(msg.Topic[:])), zap.ByteString("payload[0:31]", msg.Payload[:min(32, len(msg.Payload))]), ) if msgSize > maxArchiveSizeInBytes { // we drop messages this big - m.logger.Debug("[CODEX] dropping message due to size", zap.Int("messageSize", msgSize)) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] dropping message due to size", zap.Int("messageSize", msgSize)) continue } @@ -469,7 +470,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte return codexArchiveIDs, err } - m.logger.Debug("[CODEX] archive uploaded to codex", zap.String("cid", cid)) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] archive uploaded to codex", zap.String("cid", cid)) codexWakuMessageArchiveIndexMetadata := &protobuf.CodexWakuMessageArchiveIndexMetadata{ Metadata: wakuMessageArchive.Metadata, @@ -510,7 +511,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte // upload index file to codex cid, err := m.codexClient.UploadArchive(codexIndexBytes) if err != nil { - m.logger.Error("[CODEX] failed to upload to codex", zap.Error(err)) + m.logger.Error("[CODEX][createHistoryArchiveCodex] failed to upload to codex", zap.Error(err)) return codexArchiveIDs, err } @@ -524,9 +525,9 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte return codexArchiveIDs, err } - m.logger.Debug("[CODEX] index uploaded to codex", zap.String("cid", cid)) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] index uploaded to codex", zap.String("cid", cid)) - m.logger.Debug("[CODEX] archives uploaded to Codex", zap.Any("from", startDate.Unix()), zap.Any("to", endDate.Unix())) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] archives uploaded to Codex", zap.Any("from", startDate.Unix()), zap.Any("to", endDate.Unix())) m.publisher.publish(&Subscription{ HistoryArchivesCreatedSignal: &signal.HistoryArchivesCreatedSignal{ @@ -536,7 +537,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte }, }) } else { - m.logger.Debug("[CODEX] no archives created") + m.logger.Debug("[CODEX][createHistoryArchiveCodex] no archives created") m.publisher.publish(&Subscription{ NoHistoryArchivesCreatedSignal: &signal.NoHistoryArchivesCreatedSignal{ CommunityID: communityID.String(), @@ -551,6 +552,7 @@ func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexByte return codexArchiveIDs, err } + m.logger.Debug("[CODEX][createHistoryArchiveCodex] updating/setting lastMessageArchiveEndDate", zap.Uint64("lastMessageArchiveEndDate", lastMessageArchiveEndDate)) if lastMessageArchiveEndDate > 0 { err = m.persistence.UpdateLastMessageArchiveEndDate(communityID, uint64(from.Unix())) } else { From ee1b8b95dcadd6fdb90ba76d8a310cff212a007c Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 10 Nov 2025 07:59:13 +0100 Subject: [PATCH 54/75] adds a compact document attempting to explain filters and topics --- _docs/filters-topics.md | 146 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 _docs/filters-topics.md diff --git a/_docs/filters-topics.md b/_docs/filters-topics.md new file mode 100644 index 00000000000..2878c973802 --- /dev/null +++ b/_docs/filters-topics.md @@ -0,0 +1,146 @@ +# Filters, Topics, Channels, and Chat IDs + +_Last updated: 2025-11-10_ + +Status-Go juggles several identifiers while shuttling messages between the protocol layer and Waku. This note ties the names together so future readers can reason about filters, channels, and archives without spelunking through `messenger.go`. + +## Quick Glossary + +| Term | Defined in | Purpose | +| --- | --- | --- | +| `ChatID` | `messaging/types/filters.go:23-48` | Status-level label for a logical chat/channel (e.g., `communityID-memberUpdate`, `communityID-general`, `0x…` contact IDs). | +| `LocalChatID` | `messagingtypes.RawMessage` (various call sites) | Field embedded in outgoing raw messages so higher layers know which chat to update; does **not** change network routing. | +| Content Topic | `messaging/layers/transport/topic.go:18-21` | Waku topic (4 bytes) derived from `ChatID` via `Keccak256(chatID)[:4]`. Real network “channel.” | +| Pubsub Topic | `messaging/types/pubsub_topics.go` (see helpers in `messagingtypes`) | Waku v2 gossipsub domain (e.g., `/waku/2/rs/16/32`, `/waku/2/default-waku/proto`). Same content topic on different pubsub topics → distinct subscriptions. | +| `transport.Filter` | `messaging/layers/transport/filter.go` | Stores `ChatID`, `FilterID`, content topic, pubsub topic, symmetric key, and flags. Returned by transport code to upper layers. | +| `messagingtypes.ChatFilter` | `messaging/types/filters.go` | Thin wrapper exposed to the protocol (`messenger.go`); created from `transport.Filter`. | + +> **Rule of thumb:** The `chatID` that created a filter is the only input to `ToTopic`, so _picking a chatID_ at send time uniquely determines the Waku content topic. + +## Where Chat IDs Come From + +Community helpers mint deterministic chat IDs (`protocol/communities/community.go:1544-1590`): + +- `Community.ChatIDs()` returns legacy per-channel identifiers (one per Status channel). +- `Community.MemberUpdateChannelID()` produces `communityID-memberUpdate`. +- `Community.UniversalChatID()` aliases the member-update channel so one topic can carry **all** community messages during the universal-channel rollout. +- Contact/discovery/chat code helpers live in `messaging/layers/transport/topic.go:27-45`. + +When a community loads, `Messenger.DefaultFilters` asks to subscribe to: + +1. `communityID` on the community shard’s pubsub topic. +2. `communityID-memberUpdate` (universal channel) on the same pubsub topic. +3. The hex-encoded community pubkey on both the global content topic and the default non-protected topic. +4. Optional fallbacks when the community does not publish a shard (`protocol/messenger_communities.go:2463-2480`). + +These `ChatID` + pubsub pairs become actual Waku subscriptions via the FiltersManager. + +## From Chat ID to Transport Filter + +`FiltersManager.LoadPublic` is the main entry point (`messaging/layers/transport/filters_manager.go:540-591`): + +1. Derive a map key (`chatID` or `chatID||pubsub` when `distinctByPubsub` is true). +2. If no filter exists yet, call `addSymmetric(chatID, pubsubTopic)` which: + - Computes `ToTopic(chatID)` → content topic. + - Calls into the Waku service (`filters_service.Subscribe`) to register the subscription. + - Returns Waku’s `FilterID`, symmetric key id, and topic bytes. +3. Store and return the populated `transport.Filter`. + +`InitCommunities` / `InitPublicChats` simply loop over `ChatsToInitialize` and call `LoadPublic` for each entry, so a single community normally yields several transport filters (legacy per-channel, universal, control/pubkey, etc.). + +### Diagram: Subscription Lifecycle + +```mermaid +flowchart TD + A[Messenger.DefaultFilters
community.go helpers] --> B[ChatsToInitialize] + B --> C[Transport.InitPublicChats] + C --> D["FiltersManager.LoadPublic(chatID, pubsub)"] + D -->|compute Keccak| E["ToTopic(chatID)"] + E --> F["filters_service.Subscribe
(content topic, pubsub)"] + F --> G["transport.Filter stored
filters → chatID key"] + G --> H["messagingtypes.NewChatFilter
exposed to messenger"] +``` + +## Sending Flow + +All public/community traffic eventually funnels through `MessageSender.SendPublic` (`messaging/common/message_sender.go:565-681`). Important details: + +1. The caller supplies `chatName` (usually `community.UniversalChatID()`). +2. After wrapping/encrypting, SendPublic calls `transport.SendPublic(ctx, newMessage, chatName)` (`messaging/layers/transport/transport.go:263-280`). +3. `transport.SendPublic` loads the filter keyed by `chatName`, then copies its symmetric key, content topic, and pubsub topic into the Waku message before posting. + +Therefore **every** universal-channel message (chat, pin, magnetlink, indexCID, etc.) shares a content topic derived from `communityID-memberUpdate`. Legacy per-channel messages keep using their old chat IDs until migration completes. + +### Diagram: Send Path + +```mermaid +sequenceDiagram + participant Proto as protocol/messenger_communities.go + participant Msg as messaging/common/message_sender.go + participant Trans as messaging/layers/transport/transport.go + participant FM as FiltersManager + participant W as Waku + + Proto->>Msg: SendPublic(chatID = communityID-memberUpdate, rawMessage) + Msg->>Trans: SendPublic(ctx, newMessage, chatID) + Trans->>FM: LoadPublic(chatID, pubsub, distinct=false) + FM-->>Trans: transport.Filter{ContentTopic, SymKeyID, PubsubTopic} + Trans->>W: Post(message with ContentTopic=ToTopic(chatID)) + W-->>Trans: Hash + Trans-->>Msg: Hash + Msg-->>Proto: MessageID/Hash +``` + +## Receiving Flow + +Incoming envelopes land inside Waku filter queues. Retrieval proceeds as follows: + +1. `transport.RetrieveRawAll` iterates over **every** registered filter, calls `api.GetFilterMessages(filter.FilterID)`, drops cached duplicates, and groups results by filter (`messaging/layers/transport/transport.go:213-258`). +2. `messenger.RetrieveAll` converts transport filters into `messagingtypes.ChatFilter` objects and feeds the map into `handleRetrievedMessages` (`protocol/messenger.go:2610`, `3042-3230`). +3. For each `(filter, []*ReceivedMessage)` pair: + - If `filter.ChatID()` matches an owned community (legacy ID or universal ID) and `storeWakuMessages == true`, the raw Waku message is persisted for archive building (`protocol/messenger.go:3051-3082`, `protocol/communities/manager.go:4372-4405`). + - `messaging.HandleReceivedMessages` decodes the payload(s). + - Each decoded Status message is dispatched by type (`dispatchToHandler`), eventually ending up in chat history, member updates, archive downloads, etc. + +### Diagram: Receive Path + +```mermaid +flowchart LR + Waku["Waku subscription queues
(per content topic & pubsub)"] -->|GetFilterMessages| Transport + Transport -->|"map[Filter][]Message"| Messenger.RetrieveAll + Messenger.RetrieveAll -->|handleRetrievedMessages| Loop["for each filter batch"] + Loop --> Decision{"owned chat & storeWakuMessages?"} + Decision -->|Yes| Store[StoreWakuMessage] + Decision -->|No| Skip["(skip storage)"] + Store --> Decode["messaging.HandleReceivedMessages"] + Skip --> Decode + Decode --> Dispatch["dispatchToHandler
(type-specific logic)"] + Dispatch --> DB["User DB / UI updates / archive triggers"] +``` + +## Persistence & Archives + +- Community owners call `GetOwnedCommunitiesChatIDs()` to load every legacy per-channel ID and `GetOwnedCommunitiesUniversalChatIDs()` for the universal ID (`protocol/communities/manager.go:4372-4400`). The union is the allowlist. +- `handleRetrievedMessages` is invoked in two distinct contexts: + 1. **Live retrieval loop** (`RetrieveAll`): `storeWakuMessages = true`, `fromArchive = false`. Raw envelopes that match the allowlist are stored in `waku_messages`, and handlers run with `fromArchive=false`. + 2. **Archive replay** (`handleArchiveMessages`): `storeWakuMessages = false`, `fromArchive = true`. No new rows are written, but handlers see `fromArchive=true` to suppress live-only side effects. +- Therefore, the code only writes to `waku_messages` when both the allowlist check and the “live retrieval” flags match (case 1 above). +- Stored rows retain raw payloads, timestamps, hashes, and third-party IDs (`protocol/communities/persistence.go:889-934`). Later, archive import/export code queries by content topic and timestamp windows. + +Because magnetlink/indexCID/chat traffic now shares the universal channel, one subscription per community is enough to capture everything needed for archive creation; legacy filters stay in place until every client migrates. + +## Debugging Checklist + +1. **Which filter saw the message?** Log `filter.ChatID()` in `handleRetrievedMessages` to confirm whether it was the universal channel, a legacy chat, or the admin/pubkey topic. +2. **Did the message get stored?** Ensure the `ChatID` appears in either `GetOwnedCommunitiesChatIDs()` or `GetOwnedCommunitiesUniversalChatIDs()` and that `storeWakuMessages` was true for this pass. +3. **Unexpected content topic?** Remember that changing the `chatID` you pass into `SendPublic` changes the `ToTopic` hash. Verify the string passed to `SendPublic` (search `LocalChatID:` in the caller). +4. **Multiple filters per community?** Enumerate `t.filters.Filters()` at runtime; it’s normal to see several entries per community because the migration keeps both universal and legacy subscriptions alive. + +## appendix: Useful References + +- `messaging/layers/transport/topic.go` – helpers that convert strings/public keys into Waku topics. +- `messaging/layers/transport/filters_manager.go` – how filters are registered, keyed, and updated. +- `messaging/common/message_sender.go` – wrapping, encrypting, and handing messages to the transport. +- `messaging/layers/transport/transport.go` – message retrieval, cache checks, and calls into messenger. +- `protocol/messenger_communities.go` – default chat IDs per community and subscription bootstrap. +- `protocol/messenger.go` – storage/dispatch logic (`handleRetrievedMessages`). From e019c96698f5fafe442a73e95546fe01ba36cdee Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Mon, 10 Nov 2025 08:55:43 +0100 Subject: [PATCH 55/75] fixes linting --- .../codex_manager_archive_cancellation_test.go | 2 +- protocol/communities/codex_manager_archive_test.go | 2 +- protocol/communities/manager_archive.go | 9 +++++++-- protocol/communities_messenger_token_permissions_test.go | 4 ++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/protocol/communities/codex_manager_archive_cancellation_test.go b/protocol/communities/codex_manager_archive_cancellation_test.go index 3a7b8e8a8b6..bdabf2b123e 100644 --- a/protocol/communities/codex_manager_archive_cancellation_test.go +++ b/protocol/communities/codex_manager_archive_cancellation_test.go @@ -313,7 +313,7 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringArchive DoAndReturn(func(ctx context.Context, cid string, output interface{}) error { // Write the index bytes to whatever writer we receive if w, ok := output.(io.Writer); ok { - w.Write(codexIndexBytes) + _, _ = w.Write(codexIndexBytes) } return nil }). diff --git a/protocol/communities/codex_manager_archive_test.go b/protocol/communities/codex_manager_archive_test.go index add918aa266..01c6d4215ec 100644 --- a/protocol/communities/codex_manager_archive_test.go +++ b/protocol/communities/codex_manager_archive_test.go @@ -117,7 +117,7 @@ func (s *CodexArchiveManagerSuite) TearDownTest() { s.T().Logf("Successfully removed CID: %s", cid) } } - s.archiveManager.StopCodexClient() + s.Require().NoError(s.archiveManager.StopCodexClient()) s.Require().NoError(s.manager.Stop()) } diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 0b153175eb6..6319d6fd547 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -743,9 +743,14 @@ func (m *ArchiveManager) SeedHistoryArchiveIndexCid(communityID types.HexBytes) return err } err = m.writeCodexIndexCidToFile(communityID, cid) + var errs []error if err != nil { - m.codexClient.RemoveCid(cid) - return err + errs = append(errs, err) + err := m.codexClient.RemoveCid(cid) + if err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) } } return nil diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index 20284ff545c..c0bc131ba2e 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -3164,8 +3164,8 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas s.Assert().False(ownerNodeCfgFromDB.CodexConfig.Enabled) s.Assert().False(bobNodeCfgFromDB.CodexConfig.Enabled) - s.owner.EnableCommunityHistoryArchiveProtocol() - s.bob.EnableCommunityHistoryArchiveProtocol() + s.Require().NoError(s.owner.EnableCommunityHistoryArchiveProtocol()) + s.Require().NoError(s.bob.EnableCommunityHistoryArchiveProtocol()) ownerNodeCfgFromDB2, err := s.owner.settings.GetNodeConfig() s.Require().NoError(err) From 5d235373d8ff74a394420169abfc68a1069ba2a0 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 10 Nov 2025 17:16:25 +0100 Subject: [PATCH 56/75] Improve functional test --- protocol/communities/manager.go | 1 + protocol/communities/manager_archive.go | 4 ++ protocol/messenger.go | 8 +++ services/ext/api.go | 8 +++ tests-functional/clients/services/wakuext.py | 15 +++-- tests-functional/clients/signals.py | 3 + tests-functional/docker-compose.waku.yml | 3 +- tests-functional/steps/messenger.py | 3 + .../tests/test_wakuext_community_archives.py | 64 ++++++++++++++----- 9 files changed, 86 insertions(+), 23 deletions(-) diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index 9aa6e514e6c..2334b1faaa2 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -237,6 +237,7 @@ type ArchiveService interface { DownloadHistoryArchivesByIndexCid(communityID types.HexBytes, indexCid string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) TorrentFileExists(communityID string) bool CodexIndexCidFileExists(communityID types.HexBytes) bool + GetDownloadedMessageArchiveIDs(communityID types.HexBytes) ([]string, error) } type ArchiveManagerConfig struct { diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 6319d6fd547..1a02874287c 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -1232,3 +1232,7 @@ func findIndexFile(files []*torrent.File) (index int, ok bool) { func torrentFile(torrentDir, communityID string) string { return path.Join(torrentDir, communityID+".torrent") } + +func (m *ArchiveManager) GetDownloadedMessageArchiveIDs(communityID types.HexBytes) ([]string, error) { + return m.persistence.GetDownloadedMessageArchiveIDs(communityID) +} diff --git a/protocol/messenger.go b/protocol/messenger.go index 772b206b89e..8d62d01a938 100644 --- a/protocol/messenger.go +++ b/protocol/messenger.go @@ -4608,3 +4608,11 @@ func (m *Messenger) FindStatusMessageIDForBridgeMessageID(bridgeMessageID string func (m *Messenger) Messaging() *messaging.API { return m.messaging } + +func (m *Messenger) GetDownloadedMessageArchiveIDs(communityID types.HexBytes) ([]string, error) { + return m.archiveManager.GetDownloadedMessageArchiveIDs(communityID) +} + +func (m *Messenger) GetMessageArchiveIDsToImport(communityID types.HexBytes) ([]string, error) { + return m.archiveManager.GetMessageArchiveIDsToImport(communityID) +} diff --git a/services/ext/api.go b/services/ext/api.go index dce4b82ddc4..3732e272e48 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -1611,3 +1611,11 @@ func (m *PublicAPI) Connect(peerId string, addrs []string) error { func (m *PublicAPI) Debug() (codex.DebugInfo, error) { return m.service.messenger.Debug() } + +func (m *PublicAPI) GetDownloadedMessageArchiveIDs(communityID types.HexBytes) ([]string, error) { + return m.service.messenger.GetDownloadedMessageArchiveIDs(communityID) +} + +func (m *PublicAPI) GetMessageArchiveIDsToImport(communityID types.HexBytes) ([]string, error) { + return m.service.messenger.GetMessageArchiveIDsToImport(communityID) +} diff --git a/tests-functional/clients/services/wakuext.py b/tests-functional/clients/services/wakuext.py index 4fe1e2c4162..abdaf45a4ef 100644 --- a/tests-functional/clients/services/wakuext.py +++ b/tests-functional/clients/services/wakuext.py @@ -782,11 +782,6 @@ def set_archive_distribution_preference(self, preference: str): response = self.rpc_request("setArchiveDistributionPreference", params) return response - def toggle_use_mail_servers(self, enabled: bool): - params = [enabled] - response = self.rpc_request("toggleUseMailservers", params) - return response - def connect(self, peerId: str, addrs: list = []): params = [peerId, addrs] response = self.rpc_request("connect", params) @@ -796,3 +791,13 @@ def debug(self): params = [] response = self.rpc_request("debug", params) return response + + def get_downloaded_message_archive_ids(self, community_id: str): + params = [community_id] + response = self.rpc_request("getDownloadedMessageArchiveIDs", params) + return response + + def get_message_archive_ids_to_import(self, community_id: str): + params = [community_id] + response = self.rpc_request("getMessageArchiveIDsToImport", params) + return response diff --git a/tests-functional/clients/signals.py b/tests-functional/clients/signals.py index 9405cb0ae12..eedb831d1ce 100644 --- a/tests-functional/clients/signals.py +++ b/tests-functional/clients/signals.py @@ -33,6 +33,9 @@ class SignalType(Enum): CONNECTOR_DAPP_PERMISSION_GRANTED = "connector.dAppPermissionGranted" CONNECTOR_DAPP_PERMISSION_REVOKED = "connector.dAppPermissionRevoked" CONNECTOR_DAPP_CHAIN_ID_SWITCHED = "connector.dAppChainIdSwitched" + COMMUNITY_HISTORY_ARCHIVES_CREATED = "community.historyArchivesCreated" + COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED = "community.historyArchiveDownloaded" + COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES = "community.importingHistoryArchiveMessages" class WalletEventType(Enum): diff --git a/tests-functional/docker-compose.waku.yml b/tests-functional/docker-compose.waku.yml index 27648055be6..17eaca64383 100644 --- a/tests-functional/docker-compose.waku.yml +++ b/tests-functional/docker-compose.waku.yml @@ -25,7 +25,8 @@ services: "--shard=64", "--staticnode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", "--storenode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", - "--tcp-port=60001" + "--tcp-port=60001", + "--store-message-retention-policy=120s" ] depends_on: - store diff --git a/tests-functional/steps/messenger.py b/tests-functional/steps/messenger.py index 4ad0de2d53f..ee94c151cc7 100644 --- a/tests-functional/steps/messenger.py +++ b/tests-functional/steps/messenger.py @@ -20,6 +20,9 @@ class MessengerSteps(NetworkConditionsSteps): SignalType.MESSAGE_DELIVERED.value, SignalType.NODE_LOGIN.value, SignalType.NODE_STOPPED.value, + SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, + SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, + SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES.value, ] def send_contact_request_and_wait_for_signal_to_be_received(self, sender=None, receiver=None) -> str: diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index fcbd79a6b0a..2bffa1164a8 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -1,6 +1,5 @@ from uuid import uuid4 import pytest -import time from steps.messenger import MessengerSteps from clients.signals import SignalType @@ -11,8 +10,10 @@ class TestCommunityArchives(MessengerSteps): @pytest.fixture(autouse=True) def setup_backends(self, backend_new_profile): """Initialize three backends (creator, member and another_member) for each test function""" + self.message_archive_interval = 130 + # Community owner - self.creator = backend_new_profile("creator", codex_config_enabled=True, message_archive_interval=10) + self.creator = backend_new_profile("creator", codex_config_enabled=True, message_archive_interval=self.message_archive_interval) # Define codex as archive distribution preference self.creator.wakuext_service.set_archive_distribution_preference("codex") @@ -26,6 +27,7 @@ def setup_backends(self, backend_new_profile): # Define codex as archive distribution preference self.another_member.wakuext_service.set_archive_distribution_preference("codex") + # Create the community self.fake_address = "0x" + str(uuid4())[:8] self.community_id = self.create_community(self.creator, historyArchiveSupportEnabled=True) @@ -74,34 +76,62 @@ def test_community_archive_index_exists(self): # Wait for member to receive the new message self.member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=message_id, timeout=10) member_msgs_resp = self.member.wakuext_service.chat_messages(chat_id) - assert member_msgs_resp.get("messages")[0].get("text") == text + assert member_msgs_resp.get("messages") is not None, "Member should have messages after receiving signal" + message_text = member_msgs_resp.get("messages")[0].get("text") + assert message_text == text, "Member should have received the message" - # Just make sure that archive are generated - time.sleep(10) + # Wait for the community archive to be created for the community owner + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) - # Ensure that the community archive is available for the creator + # Ensure that the community archive exists in the file system for the community owner has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) assert has_archive is True, "Creator should have community archive after messages are sent" - # TODO: try to disable the store node ?? - # self.member.wakuext_service.toggle_use_mail_servers(enabled=False) + # Wait for the community archive to be created for the first member + self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=self.message_archive_interval + 10) + + # Ensure that the member has the archive IDs to import in database + archive_ids = self.member.wakuext_service.get_message_archive_ids_to_import(self.community_id) + assert len(archive_ids) > 0, "Member should have archive IDs to import" + archive_id = archive_ids[0] + + # Ensure that the community archive exists in the file system for the member + has_archive = self.member.wakuext_service.has_community_archive(self.community_id) + assert has_archive is True, "Member should have community archive after messages are sent" + + # Ensure that the member has downloaded the community archive and the database is updated + download_archive_ids = self.member.wakuext_service.get_downloaded_message_archive_ids(self.community_id) + assert archive_id in download_archive_ids, "Member should have downloaded the community archive" - # Another member joins and checks for the message + # Wait for another member to join the community self.join_community(member=self.another_member, admin=self.creator) self.another_member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) + + # Ensure that another member does not have the message before archive import member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) - assert member_msgs_resp.get("messages") is None, "Another member should not have messages before archive is dispatched" + message = member_msgs_resp.get("messages") + assert message is None, "Another member should not have messages before archive is dispatched" - # Ensure that the another member received the archive dispatch message - time.sleep(5) + # Wait for the community archive to be downloaded for another member + self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=self.message_archive_interval + 10) - has_archive = self.member.wakuext_service.has_community_archive(self.community_id) - assert has_archive is True, "Member should have community archive after messages are sent" + # Get the archive IDs to import for another member + archive_ids = self.another_member.wakuext_service.get_message_archive_ids_to_import(self.community_id) + assert len(archive_ids) > 0, "Another member should have archive IDs to import" + archive_id = archive_ids[0] + # Ensure that the community archive exists in the file system for another member has_archive = self.another_member.wakuext_service.has_community_archive(self.community_id) assert has_archive is True, "Another member should have community archive after messages are sent" - member_msgs_resp = self.member.wakuext_service.chat_messages(chat_id) - assert member_msgs_resp.get("messages")[0].get("text") == text, "Member should have the message after archive is dispatched" + # Ensure that another member has downloaded the community archive and the database is updated + download_archive_ids = self.another_member.wakuext_service.get_downloaded_message_archive_ids(self.community_id) + assert archive_id in download_archive_ids, "Another member should have downloaded the community archive" + + # Wait for the archive import to complete for another member + self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES.value, timeout=self.message_archive_interval + 10) - # TODO: Verify in db + # Verify that another member has the message after archive import + another_member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) + assert another_member_msgs_resp.get("messages") is not None, "Another member should have messages after archive is dispatched" + assert another_member_msgs_resp.get("messages")[0].get("text") == text, "Another member should have the message after archive is dispatched" From 6ebd8e6d47cda97fac0cd0d87a1b44ee1b935693 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 10 Nov 2025 17:31:04 +0100 Subject: [PATCH 57/75] Try to reduce store-message-retention-policy --- tests-functional/docker-compose.waku.yml | 2 +- tests-functional/tests/test_wakuext_community_archives.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests-functional/docker-compose.waku.yml b/tests-functional/docker-compose.waku.yml index 17eaca64383..e4a3e3780bf 100644 --- a/tests-functional/docker-compose.waku.yml +++ b/tests-functional/docker-compose.waku.yml @@ -26,7 +26,7 @@ services: "--staticnode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", "--storenode=/dns4/store/tcp/60002/p2p/16Uiu2HAmCDqxtfF1DwBqs7UJ4TgSnjoh6j1RtE1hhQxLLao84jLi", "--tcp-port=60001", - "--store-message-retention-policy=120s" + "--store-message-retention-policy=60s" ] depends_on: - store diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 2bffa1164a8..b495bf3e027 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -10,7 +10,7 @@ class TestCommunityArchives(MessengerSteps): @pytest.fixture(autouse=True) def setup_backends(self, backend_new_profile): """Initialize three backends (creator, member and another_member) for each test function""" - self.message_archive_interval = 130 + self.message_archive_interval = 70 # Community owner self.creator = backend_new_profile("creator", codex_config_enabled=True, message_archive_interval=self.message_archive_interval) From bfe46a4ae50eb015f679b088e0ad148946ed68c3 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 11 Nov 2025 06:54:24 +0100 Subject: [PATCH 58/75] Provide better timeout --- .../tests/test_wakuext_community_archives.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index b495bf3e027..0da571bf2fc 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -10,7 +10,7 @@ class TestCommunityArchives(MessengerSteps): @pytest.fixture(autouse=True) def setup_backends(self, backend_new_profile): """Initialize three backends (creator, member and another_member) for each test function""" - self.message_archive_interval = 70 + self.message_archive_interval = 80 # Community owner self.creator = backend_new_profile("creator", codex_config_enabled=True, message_archive_interval=self.message_archive_interval) @@ -87,8 +87,12 @@ def test_community_archive_index_exists(self): has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) assert has_archive is True, "Creator should have community archive after messages are sent" + # The timeout is arbitrary set to 10 seconds + # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds + archive_timeout = 10 + # Wait for the community archive to be created for the first member - self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=self.message_archive_interval + 10) + self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) # Ensure that the member has the archive IDs to import in database archive_ids = self.member.wakuext_service.get_message_archive_ids_to_import(self.community_id) @@ -113,7 +117,7 @@ def test_community_archive_index_exists(self): assert message is None, "Another member should not have messages before archive is dispatched" # Wait for the community archive to be downloaded for another member - self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=self.message_archive_interval + 10) + self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) # Get the archive IDs to import for another member archive_ids = self.another_member.wakuext_service.get_message_archive_ids_to_import(self.community_id) @@ -129,7 +133,7 @@ def test_community_archive_index_exists(self): assert archive_id in download_archive_ids, "Another member should have downloaded the community archive" # Wait for the archive import to complete for another member - self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES.value, timeout=self.message_archive_interval + 10) + self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES.value, timeout=archive_timeout) # Verify that another member has the message after archive import another_member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) From e48a5b13cc0516e2a4881e9b61b7b186dce041ce Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 11 Nov 2025 11:55:56 +0100 Subject: [PATCH 59/75] Update Go bindings to the latest version --- go.mod | 2 +- go.sum | 4 ++-- nix/pkgs/status-go/library/default.nix | 10 +++++----- .../codex-storage/codex-go-bindings/codex/testutil.go | 6 +++++- vendor/modules.txt | 2 +- 5 files changed, 14 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 35c23285f74..ed54009bb89 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( github.com/btcsuite/btcd/btcutil v1.1.6 github.com/cenkalti/backoff/v4 v4.2.1 github.com/cockroachdb/errors v1.11.3 - github.com/codex-storage/codex-go-bindings v0.0.26 + github.com/codex-storage/codex-go-bindings v0.0.27 github.com/getsentry/sentry-go v0.29.1 github.com/golang-migrate/migrate/v4 v4.15.2 github.com/gorilla/sessions v1.2.1 diff --git a/go.sum b/go.sum index a3688988322..6fbb1f9ea13 100644 --- a/go.sum +++ b/go.sum @@ -548,8 +548,8 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codex-storage/codex-go-bindings v0.0.26 h1:v7PgwJq+dTb7YF4i19Dgx7iHJv+frqDbO9AP7qh2N1k= -github.com/codex-storage/codex-go-bindings v0.0.26/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= +github.com/codex-storage/codex-go-bindings v0.0.27 h1:SdOvqK1e+NfdMmtIWSnWYM2/gbyYuvA4Or7kSmraxIg= +github.com/codex-storage/codex-go-bindings v0.0.27/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= diff --git a/nix/pkgs/status-go/library/default.nix b/nix/pkgs/status-go/library/default.nix index ceda0f93f75..fddc34c5135 100644 --- a/nix/pkgs/status-go/library/default.nix +++ b/nix/pkgs/status-go/library/default.nix @@ -8,7 +8,7 @@ let optionalString = pkgs.lib.optionalString; - codexVersion = "v0.0.26"; + codexVersion = "v0.0.27"; arch = if stdenv.hostPlatform.isx86_64 then "amd64" else if stdenv.hostPlatform.isAarch64 then "arm64" @@ -16,10 +16,10 @@ let os = if stdenv.isDarwin then "macos" else "Linux"; hash = if stdenv.hostPlatform.isDarwin - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.26/codex-macos-arm64.zip | jq -r .hash - then "sha256-3CHIWoSjo0plsYqzXQWm1EtY1STcljV4yfXTPon90uE=" - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.26/codex-Linux-amd64.zip | jq -r .hash - else "sha256-YxW2vFZlcLrOx1PYgWW4MIstH/oFBRF0ooS0sl3v6ig="; + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.27/codex-macos-arm64.zip | jq -r .hash + then "sha256-9E4NTmJU8+Mz8fHMmex7C0c/ppdMkBZ/dc3iesCbw+A=" + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.27/codex-Linux-amd64.zip | jq -r .hash + else "sha256-ozpOPXJBHhpYElbtVISmWGq+gsQNPMvsNmJ4BLVA7Zk="; # Pre-fetch libcodex to avoid network during build codexLib = pkgs.fetchzip { diff --git a/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go b/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go index 274495b2af4..fb0471c7701 100644 --- a/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go +++ b/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go @@ -14,7 +14,7 @@ func defaultConfigHelper(t *testing.T) Config { LogFormat: LogFormatNoColors, MetricsEnabled: false, BlockRetries: 3000, - LogLevel: "ERROR", + Nat: "none", } } @@ -43,6 +43,10 @@ func newCodexNode(t *testing.T, opts ...Config) *CodexNode { if c.DiscoveryPort != 0 { config.DiscoveryPort = c.DiscoveryPort } + + if c.StorageQuota != 0 { + config.StorageQuota = c.StorageQuota + } } node, err := New(config) diff --git a/vendor/modules.txt b/vendor/modules.txt index bb8a6aa63c1..0da82434c6c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -269,7 +269,7 @@ github.com/cockroachdb/redact/internal/rfmt/fmtsort # github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 ## explicit; go 1.19 github.com/cockroachdb/tokenbucket -# github.com/codex-storage/codex-go-bindings v0.0.26 +# github.com/codex-storage/codex-go-bindings v0.0.27 ## explicit; go 1.24.0 github.com/codex-storage/codex-go-bindings/codex # github.com/consensys/gnark-crypto v0.18.0 From 97ae5c5aee0dcdb147f64d8004826121f24635dd Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Tue, 11 Nov 2025 14:33:29 +0100 Subject: [PATCH 60/75] improves test chronology and adds logging --- protocol/communities/manager_archive.go | 12 +- protocol/messenger_communities.go | 14 ++ protocol/messenger_config.go | 2 + protocol/messenger_testing_utils.go | 2 + services/ext/signal.go | 8 + tests-functional/clients/signals.py | 6 +- tests-functional/steps/messenger.py | 6 +- .../tests/test_wakuext_community_archives.py | 148 +++++++++++++----- 8 files changed, 147 insertions(+), 51 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 1a02874287c..903c53338c3 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -1069,6 +1069,12 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex if indexDownloader.IsDownloadComplete() { m.logger.Info("[CODEX] history archive index download completed", zap.String("indexCid", indexCid)) + err := m.writeCodexIndexCidToFile(communityID, indexCid) + if err != nil { + m.logger.Error("[CODEX] failed to write Codex index CID to file", zap.Error(err)) + return nil, err + } + // Publish index download completed signal m.publisher.publish(&Subscription{ IndexDownloadCompletedSignal: &signal.IndexDownloadCompletedSignal{ @@ -1077,12 +1083,6 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex }, }) - err := m.writeCodexIndexCidToFile(communityID, indexCid) - if err != nil { - m.logger.Error("[CODEX] failed to write Codex index CID to file", zap.Error(err)) - return nil, err - } - index, err := m.CodexLoadHistoryArchiveIndexFromFile(m.identity, communityID) if err != nil { return nil, err diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 78a85cbaf74..764a07328a8 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -280,6 +280,20 @@ func (m *Messenger) handleCommunitiesHistoryArchivesSubscription(c chan *communi m.config.messengerSignalsHandler.HistoryArchivesUnseeded(sub.HistoryArchivesUnseededSignal.CommunityID) } + if sub.ManifestFetchedSignal != nil { + m.config.messengerSignalsHandler.ManifestFetched( + sub.ManifestFetchedSignal.CommunityID, + sub.ManifestFetchedSignal.IndexCid, + ) + } + + if sub.IndexDownloadCompletedSignal != nil { + m.config.messengerSignalsHandler.IndexDownloadCompleted( + sub.IndexDownloadCompletedSignal.CommunityID, + sub.IndexDownloadCompletedSignal.IndexCid, + ) + } + if sub.HistoryArchiveDownloadedSignal != nil { m.config.messengerSignalsHandler.HistoryArchiveDownloaded( sub.HistoryArchiveDownloadedSignal.CommunityID, diff --git a/protocol/messenger_config.go b/protocol/messenger_config.go index bc2fc1bd9cb..ff6729b9a38 100644 --- a/protocol/messenger_config.go +++ b/protocol/messenger_config.go @@ -42,6 +42,8 @@ type MessengerSignalsHandler interface { HistoryArchivesSeeding(communityID string, magnetLink bool, indexCid bool) HistoryArchivesUnseeded(communityID string) HistoryArchiveDownloaded(communityID string, from int, to int) + ManifestFetched(communityID string, indexCid string) + IndexDownloadCompleted(communityID string, indexCid string) DownloadingHistoryArchivesStarted(communityID string) DownloadingHistoryArchivesFinished(communityID string) ImportingHistoryArchiveMessages(communityID string) diff --git a/protocol/messenger_testing_utils.go b/protocol/messenger_testing_utils.go index 392cea2f901..4c9cb41f32a 100644 --- a/protocol/messenger_testing_utils.go +++ b/protocol/messenger_testing_utils.go @@ -64,6 +64,8 @@ func (m *MessengerSignalsHandlerMock) NoHistoryArchivesCreated(string, int, int) func (m *MessengerSignalsHandlerMock) HistoryArchivesCreated(string, int, int) {} func (m *MessengerSignalsHandlerMock) HistoryArchivesSeeding(string, bool, bool) {} func (m *MessengerSignalsHandlerMock) HistoryArchivesUnseeded(string) {} +func (m *MessengerSignalsHandlerMock) ManifestFetched(string, string) {} +func (m *MessengerSignalsHandlerMock) IndexDownloadCompleted(string, string) {} func (m *MessengerSignalsHandlerMock) HistoryArchiveDownloaded(string, int, int) {} func (m *MessengerSignalsHandlerMock) DownloadingHistoryArchivesStarted(string) {} func (m *MessengerSignalsHandlerMock) DownloadingHistoryArchivesFinished(string) {} diff --git a/services/ext/signal.go b/services/ext/signal.go index 895e51a10f7..6db4362baa3 100644 --- a/services/ext/signal.go +++ b/services/ext/signal.go @@ -109,6 +109,14 @@ func (m *MessengerSignalsHandler) HistoryArchiveDownloaded(communityID string, f signal.SendHistoryArchiveDownloaded(communityID, from, to) } +func (m *MessengerSignalsHandler) ManifestFetched(communityID string, indexCid string) { + signal.SendManifestFetched(communityID, indexCid) +} + +func (m *MessengerSignalsHandler) IndexDownloadCompleted(communityID string, indexCid string) { + signal.SendIndexDownloadCompleted(communityID, indexCid) +} + func (m *MessengerSignalsHandler) DownloadingHistoryArchivesStarted(communityID string) { signal.SendDownloadingHistoryArchivesStarted(communityID) } diff --git a/tests-functional/clients/signals.py b/tests-functional/clients/signals.py index eedb831d1ce..0f673ca9eb4 100644 --- a/tests-functional/clients/signals.py +++ b/tests-functional/clients/signals.py @@ -34,8 +34,12 @@ class SignalType(Enum): CONNECTOR_DAPP_PERMISSION_REVOKED = "connector.dAppPermissionRevoked" CONNECTOR_DAPP_CHAIN_ID_SWITCHED = "connector.dAppChainIdSwitched" COMMUNITY_HISTORY_ARCHIVES_CREATED = "community.historyArchivesCreated" + COMMUNITY_ARCHIVE_MANIFEST_FETCHED = "community.manifestFetched" + COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED = "community.indexDownloadCompleted" COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED = "community.historyArchiveDownloaded" - COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES = "community.importingHistoryArchiveMessages" + COMMUNITY_HISTORY_ARCHIVES_SEEDING = "community.historyArchivesSeeding" + COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_STARTED = "community.importingHistoryArchiveMessages" + COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_FINISHED = "community.downloadingHistoryArchivesFinished" class WalletEventType(Enum): diff --git a/tests-functional/steps/messenger.py b/tests-functional/steps/messenger.py index ee94c151cc7..aaf90b88d20 100644 --- a/tests-functional/steps/messenger.py +++ b/tests-functional/steps/messenger.py @@ -21,8 +21,12 @@ class MessengerSteps(NetworkConditionsSteps): SignalType.NODE_LOGIN.value, SignalType.NODE_STOPPED.value, SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, + SignalType.COMMUNITY_ARCHIVE_MANIFEST_FETCHED.value, + SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, - SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES.value, + SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, + SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_STARTED.value, + SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_FINISHED.value, ] def send_contact_request_and_wait_for_signal_to_be_received(self, sender=None, receiver=None) -> str: diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 0da571bf2fc..db506d17a1f 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -1,3 +1,4 @@ +import logging from uuid import uuid4 import pytest @@ -32,12 +33,12 @@ def setup_backends(self, backend_new_profile): self.community_id = self.create_community(self.creator, historyArchiveSupportEnabled=True) # Ensure that no community archive exists initially - has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) - assert has_archive is False, "Creator should not have community archive initially" - has_archive = self.member.wakuext_service.has_community_archive(self.community_id) - assert has_archive is False, "Member should not have community archive initially" - has_archive = self.another_member.wakuext_service.has_community_archive(self.community_id) - assert has_archive is False, "Another member should not have community archive initially" + has_archive_index = self.creator.wakuext_service.has_community_archive(self.community_id) + assert has_archive_index is False, "Creator should not have community archive initially" + has_archive_index = self.member.wakuext_service.has_community_archive(self.community_id) + assert has_archive_index is False, "Member should not have community archive initially" + has_archive_index = self.another_member.wakuext_service.has_community_archive(self.community_id) + assert has_archive_index is False, "Another member should not have community archive initially" # Connect members to community codex client # In the real life, this would be done via discovery @@ -80,62 +81,123 @@ def test_community_archive_index_exists(self): message_text = member_msgs_resp.get("messages")[0].get("text") assert message_text == text, "Member should have received the message" + logging.info(f"Waiting {self.message_archive_interval + 10}s for community owner to create archive...") + # Wait for the community archive to be created for the community owner self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) - # Ensure that the community archive exists in the file system for the community owner - has_archive = self.creator.wakuext_service.has_community_archive(self.community_id) - assert has_archive is True, "Creator should have community archive after messages are sent" + logging.info("Checking that community owner has local index CID file...") + + # Ensure that the community archive index exists in the file system of the community owner. + # We test this by checking the corresponding archive index CID file exists. + # This index CID file contains the Codex CID of the archive index. + has_archive_index = self.creator.wakuext_service.has_community_archive(self.community_id) + assert has_archive_index is True, "Creator should have community archive index after messages are sent" + + logging.info("Success! History archive created and dispatched!") # The timeout is arbitrary set to 10 seconds # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds archive_timeout = 10 - # Wait for the community archive to be created for the first member - self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) - - # Ensure that the member has the archive IDs to import in database - archive_ids = self.member.wakuext_service.get_message_archive_ids_to_import(self.community_id) - assert len(archive_ids) > 0, "Member should have archive IDs to import" - archive_id = archive_ids[0] - - # Ensure that the community archive exists in the file system for the member - has_archive = self.member.wakuext_service.has_community_archive(self.community_id) - assert has_archive is True, "Member should have community archive after messages are sent" - - # Ensure that the member has downloaded the community archive and the database is updated + logging.info("Waiting for community member to download manifest of the archive index...") + # Wait for the community member to download the archive index manifest + self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_MANIFEST_FETCHED.value, timeout=archive_timeout) + logging.info("Success! Manifest of the archive index fetched!") + + # When wait for index download completed signal - at this stage the index and index CID files + # should both exist in the file system of the member. + logging.info("Waiting for community member to download archive index...") + self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) + logging.info("Success! Archive index downloaded!") + + # Ensure that the community archive index CID file exists in the file system for the member. + # After successfully downloading the archive index, its CID is stored in the the + # index CID file and the file is written immediately after the archive index has been downloaded. + # Notice that at this stage, the node still does not have any single archive downloaded. + logging.info("Verifying that community member has index CID file...") + has_archive_index = self.member.wakuext_service.has_community_archive(self.community_id) + assert has_archive_index is True, "Member should have community archive index after messages are sent" + logging.info("Success! Community member has index CID file!") + + # Wait for the community archives to be downloaded for the first member. + logging.info("Waiting for community member to download ALL history archives...") + self.member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) + logging.info("Success! Community member has downloaded ALL history archives!") + + # Once the historyArchivesSeeding signal is received, the database + # should be already updated: archive ID (HASH) should be stored in the database. + logging.info("Verifying that archive ID (HASH) is recorded in the database...") download_archive_ids = self.member.wakuext_service.get_downloaded_message_archive_ids(self.community_id) - assert archive_id in download_archive_ids, "Member should have downloaded the community archive" + assert len(download_archive_ids) == 1, "Member should have exactly 1 archive ID downloaded" + logging.info("Success! Archive ID (HASH) is recorded in the database!") + + # Note: We don't check get_message_archive_ids_to_import here because archives are automatically + # imported in the background, and by the time we check, they might already be marked as imported. + # The important thing is that the archive was downloaded (checked above) and will be imported. + # Wait for another member to join the community + logging.info("Another member is joining the community...") self.join_community(member=self.another_member, admin=self.creator) self.another_member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) + logging.info("Another member has joined the community.") # Ensure that another member does not have the message before archive import member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) message = member_msgs_resp.get("messages") - assert message is None, "Another member should not have messages before archive is dispatched" - - # Wait for the community archive to be downloaded for another member - self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) - - # Get the archive IDs to import for another member - archive_ids = self.another_member.wakuext_service.get_message_archive_ids_to_import(self.community_id) - assert len(archive_ids) > 0, "Another member should have archive IDs to import" - archive_id = archive_ids[0] - - # Ensure that the community archive exists in the file system for another member - has_archive = self.another_member.wakuext_service.has_community_archive(self.community_id) - assert has_archive is True, "Another member should have community archive after messages are sent" - - # Ensure that another member has downloaded the community archive and the database is updated + assert message is None, "Another member should not have messages before archive is dispatched, downloaded and imported" + logging.info("Verified that another member does not have the message before archive import.") + + # Wait for another community member to download the archive index manifest + logging.info("Waiting for another member to download manifest of the archive index...") + self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_MANIFEST_FETCHED.value, timeout=archive_timeout) + logging.info("Success! Manifest of the archive index fetched by another member!") + + # Then wait for index download completed signal - at this stage the index and index CID files + # should both exist in the file system of another member. + logging.info("Waiting for another member to download archive index...") + self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) + logging.info("Success! Archive index downloaded by another member!") + + # Ensure that the community archive index exists in the file system of another member + logging.info("Verifying that another member has index CID file...") + has_archive_index = self.another_member.wakuext_service.has_community_archive(self.community_id) + assert has_archive_index is True, "Another member should have community archive index after messages are sent" + logging.info("Success! Another member has index CID file.") + + # Wait for the community archives to be downloaded for another member. + logging.info("Waiting for another member to download ALL history archives...") + self.another_member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) + logging.info("Success! Another member has downloaded ALL history archives!") + + # Wait for the archive to be downloaded by another member + # self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) + + # Ensure that another member has downloaded the community archive and stored its ID in database + logging.info("Verifying that another member has archive ID (HASH) recorded in the database...") download_archive_ids = self.another_member.wakuext_service.get_downloaded_message_archive_ids(self.community_id) - assert archive_id in download_archive_ids, "Another member should have downloaded the community archive" - + assert len(download_archive_ids) == 1, "Another member should have exactly 1 archive ID downloaded" + download_archive_id = download_archive_ids[0] + logging.info("Success! Another member has archive ID (HASH) recorded in the database!") + + # Note: Same as above - archives are automatically imported, so we skip checking + # get_message_archive_ids_to_import to avoid racing with the import process. + + # Wait for the archive import to begin for another member + logging.info("Waiting for another member to start importing history archive messages...") + self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_STARTED.value, timeout=archive_timeout) + logging.info("Another member has started importing history archive messages.") + # Wait for the archive import to complete for another member - self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES.value, timeout=archive_timeout) + logging.info("Waiting for another member to finish importing history archive messages...") + self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_FINISHED.value, + timeout=archive_timeout) + logging.info("Another member has finished importing history archive messages.") # Verify that another member has the message after archive import + logging.info("Verifying that another member has the message after archive import...") another_member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) - assert another_member_msgs_resp.get("messages") is not None, "Another member should have messages after archive is dispatched" - assert another_member_msgs_resp.get("messages")[0].get("text") == text, "Another member should have the message after archive is dispatched" + assert another_member_msgs_resp.get("messages") is not None, "Another member should have messages after importing history archive" + assert another_member_msgs_resp.get("messages")[0].get("text") == text, "Another member should have the message after importing history archive" + logging.info("Success! Another member has the message after importing history archive.") From 066488ea2f106a8696223bfad804097c493523f6 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Tue, 11 Nov 2025 23:51:43 +0100 Subject: [PATCH 61/75] more consistent issuing of the HistoryArchivesSeedingSignal + other small improvements --- .../communities/codex_manager_archive_test.go | 202 +++++++++++++++++- protocol/communities/manager_archive.go | 21 +- protocol/messenger_handler.go | 10 +- 3 files changed, 218 insertions(+), 15 deletions(-) diff --git a/protocol/communities/codex_manager_archive_test.go b/protocol/communities/codex_manager_archive_test.go index 01c6d4215ec..7bf8cc315a6 100644 --- a/protocol/communities/codex_manager_archive_test.go +++ b/protocol/communities/codex_manager_archive_test.go @@ -345,6 +345,7 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationBeforeManifestFetch() downloadStartedReceived := false manifestFetchedReceived := false signalDone := make(chan struct{}) + doneMarker := make(chan struct{}) go func() { timeout := time.After(10 * time.Second) for { @@ -360,6 +361,7 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationBeforeManifestFetch() close(signalDone) return case <-signalDone: + doneMarker <- struct{}{} return } } @@ -375,7 +377,14 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationBeforeManifestFetch() s.Require().Equal(0, taskInfo.TotalDownloadedArchivesCount, "No archives should be downloaded") close(signalDone) - time.Sleep(100 * time.Millisecond) + + // Wait for signal goroutine to finish with timeout + select { + case <-doneMarker: + // Goroutine finished successfully + case <-time.After(200 * time.Millisecond): + s.T().Fatal("Timeout waiting for signal goroutine to finish") + } // Verify that neither signal was received s.Require().False(downloadStartedReceived, "DownloadingHistoryArchivesStartedSignal should not be received when cancelled early") @@ -426,6 +435,7 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringIndexDownload() indexDownloadCompletedReceived := false downloadStartedReceived := false signalDone := make(chan struct{}) + doneMarker := make(chan struct{}) go func() { timeout := time.After(10 * time.Second) @@ -448,6 +458,7 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringIndexDownload() close(signalDone) return case <-signalDone: + doneMarker <- struct{}{} return } } @@ -473,7 +484,14 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringIndexDownload() s.Require().True(result.taskInfo.Cancelled, "Download should be marked as cancelled") close(signalDone) - time.Sleep(100 * time.Millisecond) + + // Wait for signal goroutine to finish with timeout + select { + case <-doneMarker: + // Goroutine finished successfully + case <-time.After(200 * time.Millisecond): + s.T().Fatal("Timeout waiting for signal goroutine to finish") + } // Verify signals s.Require().True(manifestFetchedReceived, "Should have received ManifestFetchedSignal") @@ -541,6 +559,7 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringArchiveDownload indexDownloadCompletedReceived := false archivesDownloaded := 0 signalDone := make(chan struct{}) + doneMarker := make(chan struct{}) go func() { timeout := time.After(15 * time.Second) @@ -568,6 +587,7 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringArchiveDownload close(signalDone) return case <-signalDone: + doneMarker <- struct{}{} return } } @@ -593,7 +613,14 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringArchiveDownload s.Require().True(result.taskInfo.Cancelled, "Download should be marked as cancelled") close(signalDone) - time.Sleep(100 * time.Millisecond) + + // Wait for signal goroutine to finish with timeout + select { + case <-doneMarker: + // Goroutine finished successfully + case <-time.After(200 * time.Millisecond): + s.T().Fatal("Timeout waiting for signal goroutine to finish") + } // Verify signals s.Require().True(downloadStartedReceived, "Should have received DownloadingHistoryArchivesStartedSignal") @@ -613,6 +640,175 @@ func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringArchiveDownload archivesDownloaded, result.taskInfo.TotalDownloadedArchivesCount) } +func (s *CodexArchiveManagerSuite) TestHistoryArchivesSeedingSignalWhenAllArchivesExist() { + // Subscribe to signals before starting the test + subscription := s.manager.Subscribe() + + // Create test archive data and upload archives to Codex + archives := []struct { + hash string + from uint64 + to uint64 + data []byte + }{ + {"existing-archive-1", 1000, 2000, make([]byte, 512)}, + {"existing-archive-2", 2000, 3000, make([]byte, 768)}, + } + + // Generate random data for each archive + archiveCIDs := make(map[string]string) + for i := range archives { + if _, err := rand.Read(archives[i].data); err != nil { + s.T().Fatalf("Failed to generate random data for %s: %v", archives[i].hash, err) + } + s.T().Logf("Generated %s data (first 16 bytes hex): %s", + archives[i].hash, hex.EncodeToString(archives[i].data[:16])) + } + + // Upload all archives to Codex + for _, archive := range archives { + cid, err := s.codexClient.Upload(bytes.NewReader(archive.data), archive.hash+".bin") + require.NoError(s.T(), err, "Failed to upload %s", archive.hash) + + archiveCIDs[archive.hash] = cid + s.uploadedCIDs = append(s.uploadedCIDs, cid) + s.T().Logf("Uploaded %s to CID: %s", archive.hash, cid) + + // Verify upload succeeded + exists, err := s.codexClient.HasCid(cid) + require.NoError(s.T(), err, "Failed to check CID existence for %s", archive.hash) + require.True(s.T(), exists, "CID %s should exist after upload", cid) + } + + // Create archive index + index := &protobuf.CodexWakuMessageArchiveIndex{ + Archives: make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata), + } + + for _, archive := range archives { + cid := archiveCIDs[archive.hash] + index.Archives[archive.hash] = &protobuf.CodexWakuMessageArchiveIndexMetadata{ + Cid: cid, + Metadata: &protobuf.WakuMessageArchiveMetadata{ + From: archive.from, + To: archive.to, + }, + } + } + + // Upload archive index to codex + codexIndexBytes, err := proto.Marshal(index) + s.Require().NoError(err, "Failed to marshal index") + + indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) + s.Require().NoError(err, "Failed to upload archive index to Codex") + s.Require().NotEmpty(indexCid, "Uploaded index CID should not be empty") + + s.T().Logf("Uploaded archive index to CID: %s", indexCid) + + communityID := types.HexBytes("existing-archives-test") + cancelChan := make(chan struct{}) + + // FIRST RUN: Download all archives normally + s.T().Logf("=== FIRST RUN: Downloading archives for the first time ===") + + taskInfo1, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) + s.Require().NoError(err, "Failed to download archives on first run") + s.Require().NotNil(taskInfo1, "Download task info should not be nil") + s.Require().Equal(len(archives), taskInfo1.TotalArchivesCount, "Should report all archives") + s.Require().Equal(len(archives), taskInfo1.TotalDownloadedArchivesCount, "Should have downloaded all archives") + s.Require().False(taskInfo1.Cancelled, "Download should not be cancelled") + + // Verify that archives are stored in persistence + for _, archive := range archives { + exists, err := s.manager.GetPersistence().HasMessageArchiveID(communityID, archive.hash) + s.Require().NoError(err, "Failed to check archive ID %s in persistence", archive.hash) + s.Require().True(exists, "Archive hash %s should be stored in persistence", archive.hash) + } + + s.T().Logf("First run completed successfully, all archives downloaded and stored") + + // SECOND RUN: Download again with same index - should abort early and emit seeding signal + s.T().Logf("=== SECOND RUN: Re-downloading with same index (all archives exist) ===") + + // Create a fresh subscription for the second run to avoid picking up signals from the first run + subscription = s.manager.Subscribe() + + // Track received signals for second run + receivedSeedingSignal := false + receivedDownloadingStarted := false + receivedArchiveDownloaded := false + signalDone := make(chan struct{}) + doneMarker := make(chan struct{}) + + go func() { + timeout := time.After(10 * time.Second) + for { + select { + case event := <-subscription: + if event.HistoryArchivesSeedingSignal != nil { + s.T().Logf("Received HistoryArchivesSeedingSignal for community: %s, MagnetLink: %v, IndexCid: %v", + event.HistoryArchivesSeedingSignal.CommunityID, + event.HistoryArchivesSeedingSignal.MagnetLink, + event.HistoryArchivesSeedingSignal.IndexCid) + receivedSeedingSignal = true + + // Verify the signal has correct values + s.Require().Equal(communityID.String(), event.HistoryArchivesSeedingSignal.CommunityID, + "CommunityID should match") + s.Require().False(event.HistoryArchivesSeedingSignal.MagnetLink, + "MagnetLink should be false") + s.Require().True(event.HistoryArchivesSeedingSignal.IndexCid, + "IndexCid should be true") + } + if event.DownloadingHistoryArchivesStartedSignal != nil { + s.T().Logf("WARNING: Received unexpected DownloadingHistoryArchivesStartedSignal") + receivedDownloadingStarted = true + } + if event.HistoryArchiveDownloadedSignal != nil { + s.T().Logf("WARNING: Received unexpected HistoryArchiveDownloadedSignal") + receivedArchiveDownloaded = true + } + case <-timeout: + close(signalDone) + return + case <-signalDone: + doneMarker <- struct{}{} + return + } + } + }() + + // Create a new cancel channel for the second run + cancelChan2 := make(chan struct{}) + taskInfo2, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan2) + s.Require().NoError(err, "Second download should succeed without error") + s.Require().NotNil(taskInfo2, "Task info should not be nil") + + // Stop signal collection + close(signalDone) + + // Wait for signal goroutine to finish with timeout + select { + case <-doneMarker: + // Goroutine finished successfully + case <-time.After(200 * time.Millisecond): + s.T().Fatal("Timeout waiting for signal goroutine to finish") + } + + // Verify task info for second run + s.Require().Equal(len(archives), taskInfo2.TotalArchivesCount, "Should report all archives") + s.Require().Equal(len(archives), taskInfo2.TotalDownloadedArchivesCount, "Should report all archives as already downloaded") + s.Require().False(taskInfo2.Cancelled, "Download should not be marked as cancelled") + + // Verify that the seeding signal was received and no download signals were sent + s.Require().True(receivedSeedingSignal, "Should have received HistoryArchivesSeedingSignal when all archives exist") + s.Require().False(receivedDownloadingStarted, "Should NOT have received DownloadingHistoryArchivesStartedSignal when aborting") + s.Require().False(receivedArchiveDownloaded, "Should NOT have received HistoryArchiveDownloadedSignal when aborting") + + s.T().Logf("Second run completed successfully - seeding signal emitted correctly when all archives exist!") +} + // Run the integration test suite func TestCodexArchiveManagerSuite(t *testing.T) { suite.Run(t, new(CodexArchiveManagerSuite)) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 903c53338c3..598db1ff956 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -1093,14 +1093,21 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex return nil, err } + downloadTaskInfo.TotalDownloadedArchivesCount = len(existingArchiveIDs) + downloadTaskInfo.TotalArchivesCount = len(index.Archives) + if len(existingArchiveIDs) == len(index.Archives) { m.logger.Debug("[CODEX] aborting download, no new archives") + m.publisher.publish(&Subscription{ + HistoryArchivesSeedingSignal: &signal.HistoryArchivesSeedingSignal{ + CommunityID: communityID.String(), + MagnetLink: false, // Not downloaded via magnet link + IndexCid: true, // Downloaded via Codex CID + }, + }) return downloadTaskInfo, nil } - downloadTaskInfo.TotalDownloadedArchivesCount = len(existingArchiveIDs) - downloadTaskInfo.TotalArchivesCount = len(index.Archives) - // Create separate cancel channel for the archive downloader to avoid channel competition archiveDownloaderCancel := make(chan struct{}) @@ -1175,14 +1182,6 @@ func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.Hex }, }) - if archiveDownloader.IsCancelled() { - // archive was cancelled, but it does not mean that - // no single archive was downloaded before cancellation - m.logger.Debug("[CODEX] archive download was cancelled") - downloadTaskInfo.Cancelled = true - return downloadTaskInfo, nil - } - return downloadTaskInfo, nil } else { // Update progress diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index 8d430bda997..6a6ca53cacf 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -1470,6 +1470,7 @@ func (m *Messenger) downloadAndImportCodexHistoryArchives(id types.HexBytes, ind if downloadTaskInfo.TotalDownloadedArchivesCount > 0 { m.logger.Debug(fmt.Sprintf("[CODEX][downloadAndImportCodexHistoryArchives] downloaded %d of %d archives so far", downloadTaskInfo.TotalDownloadedArchivesCount, downloadTaskInfo.TotalArchivesCount)) } + m.archiveManager.UnseedHistoryArchiveIndexCid(id) return } @@ -1790,7 +1791,7 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS if communitySettings == nil { communitySettings, err = m.communitiesManager.GetCommunitySettingsByID(requestToJoinResponseProto.CommunityId) if err != nil { - return nil + return err } } @@ -1798,6 +1799,9 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS if m.archiveManager.IsTorrentReady() && communitySettings != nil && communitySettings.HistoryArchiveSupportEnabled && magnetlink != "" { currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(community.IDString()) + if err := m.communitiesManager.UpdateMagnetlinkMessageClock(requestToJoinResponseProto.CommunityId, requestToJoinResponseProto.Clock); err != nil { + return err + } go func(currentTask *communities.HistoryArchiveDownloadTask) { defer gocommon.LogOnPanic() // Cancel ongoing download/import task @@ -1828,6 +1832,10 @@ func (m *Messenger) HandleCommunityRequestToJoinResponse(state *ReceivedMessageS m.logger.Debug("[CODEX][HandleCommunityRequestToJoinResponse] Received index CID to download history archives", zap.String("cid", cid)) + if err := m.communitiesManager.UpdateIndexCidMessageClock(requestToJoinResponseProto.CommunityId, requestToJoinResponseProto.Clock); err != nil { + return err + } + currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(community.IDString()) go func(currentTask *communities.HistoryArchiveDownloadTask) { defer gocommon.LogOnPanic() From efe07ce0b6378e3d59a10c206f1788ca56df8b6c Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 12 Nov 2025 13:37:54 +0100 Subject: [PATCH 62/75] Add archive test using community default chat --- .../tests/test_wakuext_community_archives.py | 73 ++++++++++++------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index db506d17a1f..003ceaa8a1b 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -28,24 +28,14 @@ def setup_backends(self, backend_new_profile): # Define codex as archive distribution preference self.another_member.wakuext_service.set_archive_distribution_preference("codex") - # Create the community - self.fake_address = "0x" + str(uuid4())[:8] - self.community_id = self.create_community(self.creator, historyArchiveSupportEnabled=True) - - # Ensure that no community archive exists initially - has_archive_index = self.creator.wakuext_service.has_community_archive(self.community_id) - assert has_archive_index is False, "Creator should not have community archive initially" - has_archive_index = self.member.wakuext_service.has_community_archive(self.community_id) - assert has_archive_index is False, "Member should not have community archive initially" - has_archive_index = self.another_member.wakuext_service.has_community_archive(self.community_id) - assert has_archive_index is False, "Another member should not have community archive initially" - # Connect members to community codex client # In the real life, this would be done via discovery info = self.creator.wakuext_service.debug() self.member.wakuext_service.connect(info["id"], info["addrs"]) self.another_member.wakuext_service.connect(info["id"], info["addrs"]) + # Create the community + self.fake_address = "0x" + str(uuid4())[:8] self.display_name = "chat_" + str(uuid4()) self.chat_payload = { "identity": { @@ -60,8 +50,18 @@ def setup_backends(self, backend_new_profile): } def test_community_archive_index_exists(self): + community_id = self.create_community(self.creator, historyArchiveSupportEnabled=True) + + # Ensure that no community archive exists initially + has_archive_index = self.creator.wakuext_service.has_community_archive(community_id) + assert has_archive_index is False, "Creator should not have community archive initially" + has_archive_index = self.member.wakuext_service.has_community_archive(community_id) + assert has_archive_index is False, "Member should not have community archive initially" + has_archive_index = self.another_member.wakuext_service.has_community_archive(community_id) + assert has_archive_index is False, "Another member should not have community archive initially" + # Create community chat - create_resp = self.creator.wakuext_service.create_community_chat(self.community_id, self.chat_payload) + create_resp = self.creator.wakuext_service.create_community_chat(community_id, self.chat_payload) chat_id = create_resp.get("chats")[0].get("id") # Wait for member to receive chat creation signal @@ -90,8 +90,8 @@ def test_community_archive_index_exists(self): # Ensure that the community archive index exists in the file system of the community owner. # We test this by checking the corresponding archive index CID file exists. - # This index CID file contains the Codex CID of the archive index. - has_archive_index = self.creator.wakuext_service.has_community_archive(self.community_id) + # This index CID file contains the Codex CID of the archive index. + has_archive_index = self.creator.wakuext_service.has_community_archive(community_id) assert has_archive_index is True, "Creator should have community archive index after messages are sent" logging.info("Success! History archive created and dispatched!") @@ -116,7 +116,7 @@ def test_community_archive_index_exists(self): # index CID file and the file is written immediately after the archive index has been downloaded. # Notice that at this stage, the node still does not have any single archive downloaded. logging.info("Verifying that community member has index CID file...") - has_archive_index = self.member.wakuext_service.has_community_archive(self.community_id) + has_archive_index = self.member.wakuext_service.has_community_archive(community_id) assert has_archive_index is True, "Member should have community archive index after messages are sent" logging.info("Success! Community member has index CID file!") @@ -128,14 +128,13 @@ def test_community_archive_index_exists(self): # Once the historyArchivesSeeding signal is received, the database # should be already updated: archive ID (HASH) should be stored in the database. logging.info("Verifying that archive ID (HASH) is recorded in the database...") - download_archive_ids = self.member.wakuext_service.get_downloaded_message_archive_ids(self.community_id) + download_archive_ids = self.member.wakuext_service.get_downloaded_message_archive_ids(community_id) assert len(download_archive_ids) == 1, "Member should have exactly 1 archive ID downloaded" logging.info("Success! Archive ID (HASH) is recorded in the database!") # Note: We don't check get_message_archive_ids_to_import here because archives are automatically # imported in the background, and by the time we check, they might already be marked as imported. # The important thing is that the archive was downloaded (checked above) and will be imported. - # Wait for another member to join the community logging.info("Another member is joining the community...") @@ -161,8 +160,8 @@ def test_community_archive_index_exists(self): logging.info("Success! Archive index downloaded by another member!") # Ensure that the community archive index exists in the file system of another member - logging.info("Verifying that another member has index CID file...") - has_archive_index = self.another_member.wakuext_service.has_community_archive(self.community_id) + logging.info("Verifying that another member has index CID file...") + has_archive_index = self.another_member.wakuext_service.has_community_archive(community_id) assert has_archive_index is True, "Another member should have community archive index after messages are sent" logging.info("Success! Another member has index CID file.") @@ -176,9 +175,8 @@ def test_community_archive_index_exists(self): # Ensure that another member has downloaded the community archive and stored its ID in database logging.info("Verifying that another member has archive ID (HASH) recorded in the database...") - download_archive_ids = self.another_member.wakuext_service.get_downloaded_message_archive_ids(self.community_id) + download_archive_ids = self.another_member.wakuext_service.get_downloaded_message_archive_ids(community_id) assert len(download_archive_ids) == 1, "Another member should have exactly 1 archive ID downloaded" - download_archive_id = download_archive_ids[0] logging.info("Success! Another member has archive ID (HASH) recorded in the database!") # Note: Same as above - archives are automatically imported, so we skip checking @@ -188,16 +186,39 @@ def test_community_archive_index_exists(self): logging.info("Waiting for another member to start importing history archive messages...") self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_STARTED.value, timeout=archive_timeout) logging.info("Another member has started importing history archive messages.") - + # Wait for the archive import to complete for another member logging.info("Waiting for another member to finish importing history archive messages...") - self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_FINISHED.value, - timeout=archive_timeout) + self.another_member.wait_for_signal(SignalType.COMMUNITY_IMPORTING_HISTORY_ARCHIVE_MESSAGES_FINISHED.value, timeout=archive_timeout) logging.info("Another member has finished importing history archive messages.") # Verify that another member has the message after archive import logging.info("Verifying that another member has the message after archive import...") another_member_msgs_resp = self.another_member.wakuext_service.chat_messages(chat_id) assert another_member_msgs_resp.get("messages") is not None, "Another member should have messages after importing history archive" - assert another_member_msgs_resp.get("messages")[0].get("text") == text, "Another member should have the message after importing history archive" + assert ( + another_member_msgs_resp.get("messages")[0].get("text") == text + ), "Another member should have the message after importing history archive" logging.info("Success! Another member has the message after importing history archive.") + + def test_community_archive_exists_for_default_chat(self): + # Create a community + response = self.creator.wakuext_service.create_community("Codex community", "No one should join", historyArchiveSupportEnabled=True) + community_id = response.get("communities", [{}])[0].get("id") + default_chat_id = response.get("chats", [{}])[0].get("id") + + # Ensure that no community archive exists initially + has_archive_index = self.creator.wakuext_service.has_community_archive(community_id) + assert has_archive_index is False, "Creator should not have community archive initially" + + # Send a message to the default community chat + text = "Hi myself!" + send_resp = self.creator.wakuext_service.send_chat_message(default_chat_id, text) + assert send_resp.get("chats")[0].get("lastMessage").get("text") == text + + # Wait for the community archive to be created for the community owner + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) + + # Ensure that the community archive exists in the file system for the community owner + has_archive = self.creator.wakuext_service.has_community_archive(community_id) + assert has_archive is True, "Creator should have community archive after messages are sent" From ee22f3556b010a395d399772ee171139cef3fffd Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 12 Nov 2025 15:52:28 +0100 Subject: [PATCH 63/75] Fix snake case for historyArchiveSupportEnabled --- tests-functional/clients/services/wakuext.py | 4 ++-- tests-functional/steps/messenger.py | 4 ++-- tests-functional/tests/test_wakuext_community_archives.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests-functional/clients/services/wakuext.py b/tests-functional/clients/services/wakuext.py index abdaf45a4ef..50a40cf523d 100644 --- a/tests-functional/clients/services/wakuext.py +++ b/tests-functional/clients/services/wakuext.py @@ -226,7 +226,7 @@ def create_community( membership: CommunityPermissionsAccess = CommunityPermissionsAccess.AUTO_ACCEPT, image="", image_rect=ImageCropRect(), - historyArchiveSupportEnabled=False, + history_archive_support_enabled=False, ): params = { "membership": membership.value, @@ -238,7 +238,7 @@ def create_community( "imageAy": image_rect.ay, "imageBx": image_rect.bx, "imageBy": image_rect.by, - "historyArchiveSupportEnabled": historyArchiveSupportEnabled, + "historyArchiveSupportEnabled": history_archive_support_enabled, } response = self.rpc_request("createCommunity", [params]) return response diff --git a/tests-functional/steps/messenger.py b/tests-functional/steps/messenger.py index aaf90b88d20..e8b401952be 100644 --- a/tests-functional/steps/messenger.py +++ b/tests-functional/steps/messenger.py @@ -153,9 +153,9 @@ def join_private_group(self, admin=None, member=None) -> str: ) return response.get("chats", [])[0].get("id") - def create_community(self, node, historyArchiveSupportEnabled=False): + def create_community(self, node, history_archive_support_enabled=False): response = node.wakuext_service.create_community( - fake.community_name(), fake.community_description(), historyArchiveSupportEnabled=historyArchiveSupportEnabled + fake.community_name(), fake.community_description(), history_archive_support_enabled=history_archive_support_enabled ) self.community_id = response.get("communities", [{}])[0].get("id") return self.community_id diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 003ceaa8a1b..9e4aa5262e1 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -50,7 +50,7 @@ def setup_backends(self, backend_new_profile): } def test_community_archive_index_exists(self): - community_id = self.create_community(self.creator, historyArchiveSupportEnabled=True) + community_id = self.create_community(self.creator, history_archive_support_enabled=True) # Ensure that no community archive exists initially has_archive_index = self.creator.wakuext_service.has_community_archive(community_id) @@ -203,7 +203,7 @@ def test_community_archive_index_exists(self): def test_community_archive_exists_for_default_chat(self): # Create a community - response = self.creator.wakuext_service.create_community("Codex community", "No one should join", historyArchiveSupportEnabled=True) + response = self.creator.wakuext_service.create_community("Codex community", "No one should join", history_archive_support_enabled=True) community_id = response.get("communities", [{}])[0].get("id") default_chat_id = response.get("chats", [{}])[0].get("id") From f8a2a6ae78211763c68409023d14b2f1b0d586aa Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 13 Nov 2025 08:50:52 +0100 Subject: [PATCH 64/75] Add more tests --- .../tests/test_wakuext_community_archives.py | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 9e4aa5262e1..e1a6c897e99 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -222,3 +222,64 @@ def test_community_archive_exists_for_default_chat(self): # Ensure that the community archive exists in the file system for the community owner has_archive = self.creator.wakuext_service.has_community_archive(community_id) assert has_archive is True, "Creator should have community archive after messages are sent" + + def test_archive_is_not_created_without_messages(self): + # Create a community + response = self.creator.wakuext_service.create_community("Codex community", "No one should join", history_archive_support_enabled=True) + community_id = response.get("communities", [{}])[0].get("id") + + # Wait to be sure that the archive creation signal is not sent + with pytest.raises(TimeoutError): + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) + + # Ensure that the community archive exists in the file system for the community owner + has_archive = self.creator.wakuext_service.has_community_archive(community_id) + assert has_archive is False, "Creator should not have community archive without message" + + def test_different_archives_are_created_with_multiple_messages(self): + community_id = self.create_community(self.creator, history_archive_support_enabled=True) + + # Create community chat + create_resp = self.creator.wakuext_service.create_community_chat(community_id, self.chat_payload) + chat_id = create_resp.get("chats")[0].get("id") + + # Join community so the member can receive messages + self.join_community(member=self.member, admin=self.creator) + + for i in range(2): + # Send a message to the default community chat + text = f"Hi @{self.member.public_key}!" + self.creator.wakuext_service.send_chat_message(chat_id, text) + + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) + + # The timeout is arbitrary set to 10 seconds + # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds + archive_timeout = 10 + + # When wait for index download completed signal - at this stage the index and index CID files + # should both exist in the file system of the member. + logging.info("Waiting for community member to download archive index...") + self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) + logging.info("Success! Archive index downloaded!") + + # Ensure that the community archive index CID file exists in the file system for the member. + # After successfully downloading the archive index, its CID is stored in the the + # index CID file and the file is written immediately after the archive index has been downloaded. + # Notice that at this stage, the node still does not have any single archive downloaded. + logging.info("Verifying that community member has index CID file...") + has_archive_index = self.member.wakuext_service.has_community_archive(community_id) + assert has_archive_index is True, "Member should have community archive index after messages are sent" + logging.info("Success! Community member has index CID file!") + + # Wait for the community archives to be downloaded for the first member. + logging.info("Waiting for community member to download ALL history archives...") + self.member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) + logging.info("Success! Community member has downloaded ALL history archives!") + + # Once the historyArchivesSeeding signal is received, the database + # should be already updated: archive ID (HASH) should be stored in the database. + logging.info("Verifying that archive ID (HASH) is recorded in the database...") + download_archive_ids = self.member.wakuext_service.get_downloaded_message_archive_ids(community_id) + assert len(download_archive_ids) == i + 1, "Member should have exactly 1 archive ID downloaded" + logging.info("Success! Archive ID (HASH) is recorded in the database!") From bf006704bc6aebd5adfa3142171698e06db87d87 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 13 Nov 2025 13:36:39 +0100 Subject: [PATCH 65/75] Add codex_config_bootstrap_node configuration --- api/defaults.go | 5 +++++ protocol/requests/create_account.go | 11 ++++++----- tests-functional/clients/status_backend.py | 1 + 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/api/defaults.go b/api/defaults.go index 9e02af2aa67..d070d42cc96 100644 --- a/api/defaults.go +++ b/api/defaults.go @@ -354,6 +354,10 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc nodeConfig.CodexConfig.Enabled = *request.CodexConfigEnabled } + if request.CodexConfigBootstrapNode != nil { + nodeConfig.CodexConfig.CodexNodeConfig.BootstrapNodes = []string{*request.CodexConfigBootstrapNode} + } + if request.ImportInitialDelay != nil { nodeConfig.ImportInitialDelay = *request.ImportInitialDelay } @@ -370,6 +374,7 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc BlockRetries: params.BlockRetries, MetricsEnabled: false, LogFormat: codex.LogFormatNoColors, + BootstrapNodes: nodeConfig.CodexConfig.CodexNodeConfig.BootstrapNodes, }, } diff --git a/protocol/requests/create_account.go b/protocol/requests/create_account.go index 5864e77b842..23443aaf8f4 100644 --- a/protocol/requests/create_account.go +++ b/protocol/requests/create_account.go @@ -77,11 +77,12 @@ type CreateAccount struct { WalletConfig WalletSecretsConfig - TorrentConfigEnabled *bool - TorrentConfigPort *int - CodexConfigEnabled *bool - ImportInitialDelay *int - MessageArchiveInterval *int + TorrentConfigEnabled *bool + TorrentConfigPort *int + CodexConfigEnabled *bool + CodexConfigBootstrapNode *string + ImportInitialDelay *int + MessageArchiveInterval *int APIConfig *APIConfig `json:"apiConfig"` diff --git a/tests-functional/clients/status_backend.py b/tests-functional/clients/status_backend.py index c3f1b0c286f..eb7e0f6b9e2 100644 --- a/tests-functional/clients/status_backend.py +++ b/tests-functional/clients/status_backend.py @@ -280,6 +280,7 @@ def _create_account_request(self, user, **kwargs): "wsPort": constants.STATUS_CONNECTOR_WS_PORT, }, "codexConfigEnabled": kwargs.get("codex_config_enabled", False), + "codexConfigBootstrapNode": kwargs.get("codex_config_bootstrap_node", None), "importInitialDelay": kwargs.get("import_initial_delay", None), "messageArchiveInterval": kwargs.get("message_archive_interval", None), "torrentConfigEnabled": False, From 39242189a2a1f03754c1330d35fd2cae16f8898e Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 14 Nov 2025 06:13:56 +0100 Subject: [PATCH 66/75] chore: update codex version --- go.mod | 2 +- go.sum | 4 ++-- nix/pkgs/status-go/library/default.nix | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index ed54009bb89..cb71b820f40 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( github.com/btcsuite/btcd/btcutil v1.1.6 github.com/cenkalti/backoff/v4 v4.2.1 github.com/cockroachdb/errors v1.11.3 - github.com/codex-storage/codex-go-bindings v0.0.27 + github.com/codex-storage/codex-go-bindings v0.0.28 github.com/getsentry/sentry-go v0.29.1 github.com/golang-migrate/migrate/v4 v4.15.2 github.com/gorilla/sessions v1.2.1 diff --git a/go.sum b/go.sum index 6fbb1f9ea13..4c7d0c9a8d7 100644 --- a/go.sum +++ b/go.sum @@ -548,8 +548,8 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codex-storage/codex-go-bindings v0.0.27 h1:SdOvqK1e+NfdMmtIWSnWYM2/gbyYuvA4Or7kSmraxIg= -github.com/codex-storage/codex-go-bindings v0.0.27/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= +github.com/codex-storage/codex-go-bindings v0.0.28 h1:pTi3KY5/MXh2hRzgU5JOu+1lNZEs7SxDoLYtUQWJhq4= +github.com/codex-storage/codex-go-bindings v0.0.28/go.mod h1:hP/n9iDZqQP4MytkgUepl3yMMsZy5Jbk9lQbbbVJ51Q= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= diff --git a/nix/pkgs/status-go/library/default.nix b/nix/pkgs/status-go/library/default.nix index fddc34c5135..b088dbfeb6c 100644 --- a/nix/pkgs/status-go/library/default.nix +++ b/nix/pkgs/status-go/library/default.nix @@ -8,7 +8,7 @@ let optionalString = pkgs.lib.optionalString; - codexVersion = "v0.0.27"; + codexVersion = "v0.0.28"; arch = if stdenv.hostPlatform.isx86_64 then "amd64" else if stdenv.hostPlatform.isAarch64 then "arm64" @@ -16,10 +16,10 @@ let os = if stdenv.isDarwin then "macos" else "Linux"; hash = if stdenv.hostPlatform.isDarwin - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.27/codex-macos-arm64.zip | jq -r .hash - then "sha256-9E4NTmJU8+Mz8fHMmex7C0c/ppdMkBZ/dc3iesCbw+A=" - # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.27/codex-Linux-amd64.zip | jq -r .hash - else "sha256-ozpOPXJBHhpYElbtVISmWGq+gsQNPMvsNmJ4BLVA7Zk="; + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.28/codex-macos-arm64.zip | jq -r .hash + then "sha256-GcerkH8izZ5QHG5ARNNrM1fktaeBKjF6AGNsA6vxVj0=" + # nix store prefetch-file --json --unpack https://github.com/codex-storage/codex-go-bindings/releases/download/v0.0.28/codex-Linux-amd64.zip | jq -r .hash + else "sha256-sYhbgBN0LNA7YhmBigPwo1h34QTADTxFGjO8QAw8m18="; # Pre-fetch libcodex to avoid network during build codexLib = pkgs.fetchzip { From 083cb241876caca52c66ecbb460fd6f201291b1a Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 14 Nov 2025 06:14:13 +0100 Subject: [PATCH 67/75] chore: still updating codex version --- .../codex-storage/codex-go-bindings/codex/codex.go | 4 ++-- .../codex-go-bindings/codex/testutil.go | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/vendor/github.com/codex-storage/codex-go-bindings/codex/codex.go b/vendor/github.com/codex-storage/codex-go-bindings/codex/codex.go index 24495b48dad..ab62c686c09 100644 --- a/vendor/github.com/codex-storage/codex-go-bindings/codex/codex.go +++ b/vendor/github.com/codex-storage/codex-go-bindings/codex/codex.go @@ -159,12 +159,12 @@ type Config struct { // Default block timeout in seconds - 0 disables the ttl // Default: 30 days - BlockTtl int `json:"block-ttl,omitempty"` + BlockTtl string `json:"block-ttl,omitempty"` // Time interval in seconds - determines frequency of block // maintenance cycle: how often blocks are checked for expiration and cleanup // Default: 10 minutes - BlockMaintenanceInterval int `json:"block-mi,omitempty"` + BlockMaintenanceInterval string `json:"block-mi,omitempty"` // Number of blocks to check every maintenance cycle // Default: 1000 diff --git a/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go b/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go index fb0471c7701..2d297da064f 100644 --- a/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go +++ b/vendor/github.com/codex-storage/codex-go-bindings/codex/testutil.go @@ -47,6 +47,18 @@ func newCodexNode(t *testing.T, opts ...Config) *CodexNode { if c.StorageQuota != 0 { config.StorageQuota = c.StorageQuota } + + if c.NumThreads != 0 { + config.NumThreads = c.NumThreads + } + + if c.BlockTtl != "" { + config.BlockTtl = c.BlockTtl + } + + if c.BlockMaintenanceInterval != "" { + config.BlockMaintenanceInterval = c.BlockMaintenanceInterval + } } node, err := New(config) From 2451290c66f89fbb6b6390e1d3d241fa33d49ee1 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 14 Nov 2025 06:14:35 +0100 Subject: [PATCH 68/75] chore: disable codex discovery port random --- protocol/communities/manager_archive.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 598db1ff956..6312a27d498 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -296,9 +296,9 @@ func (m *ArchiveManager) StartCodexClient() error { cfgCopy := *m.codexConfig cfgCopy.CodexNodeConfig = m.codexConfig.CodexNodeConfig - if err := m.ensureCodexDiscoveryPort(&cfgCopy); err != nil { - return err - } + // if err := m.ensureCodexDiscoveryPort(&cfgCopy); err != nil { + // return err + // } client, err := NewCodexClient(cfgCopy) if err != nil { @@ -306,8 +306,7 @@ func (m *ArchiveManager) StartCodexClient() error { } m.codexClient = client m.ArchiveFileManager.codexClient = client - m.isCodexClientStarted = true - m.codexConfig.CodexNodeConfig.DiscoveryPort = cfgCopy.CodexNodeConfig.DiscoveryPort + // m.codexConfig.CodexNodeConfig.DiscoveryPort = cfgCopy.CodexNodeConfig.DiscoveryPort if err := m.codexClient.Start(); err != nil { m.isCodexClientStarted = false @@ -316,6 +315,8 @@ func (m *ArchiveManager) StartCodexClient() error { return err } + m.isCodexClientStarted = true + return nil } From e5e43ea19830f727c61183dfdd93579615c2f91b Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 14 Nov 2025 07:13:48 +0100 Subject: [PATCH 69/75] chore: add a test to check if the archive is downloaded after logout then login --- protocol/messenger.go | 5 + services/ext/api.go | 14 +++ tests-functional/clients/services/wakuext.py | 5 + .../tests/test_wakuext_community_archives.py | 98 +++++++++++++++++-- vendor/modules.txt | 2 +- 5 files changed, 114 insertions(+), 10 deletions(-) diff --git a/protocol/messenger.go b/protocol/messenger.go index 8d62d01a938..1b8b011ec2a 100644 --- a/protocol/messenger.go +++ b/protocol/messenger.go @@ -4616,3 +4616,8 @@ func (m *Messenger) GetDownloadedMessageArchiveIDs(communityID types.HexBytes) ( func (m *Messenger) GetMessageArchiveIDsToImport(communityID types.HexBytes) ([]string, error) { return m.archiveManager.GetMessageArchiveIDsToImport(communityID) } + +func (m *Messenger) UpdateMessageArchiveInterval(duration time.Duration) (time.Duration, error) { + messageArchiveInterval = duration + return duration, nil +} diff --git a/services/ext/api.go b/services/ext/api.go index 3732e272e48..eface5a6b75 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -3,6 +3,7 @@ package ext import ( "context" "crypto/ecdsa" + "errors" "time" "github.com/codex-storage/codex-go-bindings/codex" @@ -1619,3 +1620,16 @@ func (m *PublicAPI) GetDownloadedMessageArchiveIDs(communityID types.HexBytes) ( func (m *PublicAPI) GetMessageArchiveIDsToImport(communityID types.HexBytes) ([]string, error) { return m.service.messenger.GetMessageArchiveIDsToImport(communityID) } + +func (api *PublicAPI) UpdateMessageArchiveInterval(duration time.Duration) (time.Duration, error) { + if duration <= 0 { + return 0, errors.New("duration must be greater than zero") + } + + d := duration * time.Second + updatedInterval, err := api.service.messenger.UpdateMessageArchiveInterval(d) + if err != nil { + return 0, err + } + return updatedInterval / time.Second, nil +} diff --git a/tests-functional/clients/services/wakuext.py b/tests-functional/clients/services/wakuext.py index 50a40cf523d..4067bec8a5d 100644 --- a/tests-functional/clients/services/wakuext.py +++ b/tests-functional/clients/services/wakuext.py @@ -801,3 +801,8 @@ def get_message_archive_ids_to_import(self, community_id: str): params = [community_id] response = self.rpc_request("getMessageArchiveIDsToImport", params) return response + + def update_message_archive_interval(self, duration_seconds: int): + params = [duration_seconds] + response = self.rpc_request("updateMessageArchiveInterval", params) + return response diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index e1a6c897e99..12ea64ee9fa 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -1,6 +1,7 @@ import logging from uuid import uuid4 import pytest +import time from steps.messenger import MessengerSteps from clients.signals import SignalType @@ -11,10 +12,9 @@ class TestCommunityArchives(MessengerSteps): @pytest.fixture(autouse=True) def setup_backends(self, backend_new_profile): """Initialize three backends (creator, member and another_member) for each test function""" - self.message_archive_interval = 80 # Community owner - self.creator = backend_new_profile("creator", codex_config_enabled=True, message_archive_interval=self.message_archive_interval) + self.creator = backend_new_profile("creator", codex_config_enabled=True) # Define codex as archive distribution preference self.creator.wakuext_service.set_archive_distribution_preference("codex") @@ -50,6 +50,9 @@ def setup_backends(self, backend_new_profile): } def test_community_archive_index_exists(self): + message_archive_interval = 80 + self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) + community_id = self.create_community(self.creator, history_archive_support_enabled=True) # Ensure that no community archive exists initially @@ -81,10 +84,10 @@ def test_community_archive_index_exists(self): message_text = member_msgs_resp.get("messages")[0].get("text") assert message_text == text, "Member should have received the message" - logging.info(f"Waiting {self.message_archive_interval + 10}s for community owner to create archive...") + logging.info(f"Waiting {message_archive_interval + 10}s for community owner to create archive...") # Wait for the community archive to be created for the community owner - self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=message_archive_interval + 10) logging.info("Checking that community owner has local index CID file...") @@ -202,6 +205,9 @@ def test_community_archive_index_exists(self): logging.info("Success! Another member has the message after importing history archive.") def test_community_archive_exists_for_default_chat(self): + message_archive_interval = 10 + self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) + # Create a community response = self.creator.wakuext_service.create_community("Codex community", "No one should join", history_archive_support_enabled=True) community_id = response.get("communities", [{}])[0].get("id") @@ -217,26 +223,30 @@ def test_community_archive_exists_for_default_chat(self): assert send_resp.get("chats")[0].get("lastMessage").get("text") == text # Wait for the community archive to be created for the community owner - self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=message_archive_interval + 10) # Ensure that the community archive exists in the file system for the community owner has_archive = self.creator.wakuext_service.has_community_archive(community_id) assert has_archive is True, "Creator should have community archive after messages are sent" def test_archive_is_not_created_without_messages(self): + message_archive_interval = 10 + self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) + # Create a community response = self.creator.wakuext_service.create_community("Codex community", "No one should join", history_archive_support_enabled=True) community_id = response.get("communities", [{}])[0].get("id") - # Wait to be sure that the archive creation signal is not sent - with pytest.raises(TimeoutError): - self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) + time.sleep(message_archive_interval + 10) # Ensure that the community archive exists in the file system for the community owner has_archive = self.creator.wakuext_service.has_community_archive(community_id) assert has_archive is False, "Creator should not have community archive without message" def test_different_archives_are_created_with_multiple_messages(self): + message_archive_interval = 10 + self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) + community_id = self.create_community(self.creator, history_archive_support_enabled=True) # Create community chat @@ -251,7 +261,7 @@ def test_different_archives_are_created_with_multiple_messages(self): text = f"Hi @{self.member.public_key}!" self.creator.wakuext_service.send_chat_message(chat_id, text) - self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=self.message_archive_interval + 10) + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=message_archive_interval + 10) # The timeout is arbitrary set to 10 seconds # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds @@ -283,3 +293,73 @@ def test_different_archives_are_created_with_multiple_messages(self): download_archive_ids = self.member.wakuext_service.get_downloaded_message_archive_ids(community_id) assert len(download_archive_ids) == i + 1, "Member should have exactly 1 archive ID downloaded" logging.info("Success! Archive ID (HASH) is recorded in the database!") + + def test_archive_is_downloaded_after_logout_login(self): + message_archive_interval = 10 + self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) + + community_id = self.create_community(self.creator, history_archive_support_enabled=True) + + # Ensure that no community archive exists initially + has_archive_index = self.creator.wakuext_service.has_community_archive(community_id) + assert has_archive_index is False, "Creator should not have community archive initially" + has_archive_index = self.member.wakuext_service.has_community_archive(community_id) + assert has_archive_index is False, "Member should not have community archive initially" + + create_resp = self.creator.wakuext_service.create_community_chat(community_id, self.chat_payload) + chat_id = create_resp.get("chats")[0].get("id") + + # Wait for member to receive chat creation signal + self.join_community(member=self.member, admin=self.creator) + self.member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=chat_id, timeout=10) + + # Send a message to the community chat + text = f"Hi @{self.member.public_key}" + send_resp = self.creator.wakuext_service.send_chat_message(chat_id, text) + message_id = send_resp.get("messages", [])[0].get("id", "") + + # Wait for member to receive the new message + self.member.find_signal_containing_pattern(SignalType.MESSAGES_NEW.value, event_pattern=message_id, timeout=10) + + # Logout the member to simulate offline scenario + key_uid = str(self.member.key_uid) + self.member.logout() + self.member.wait_for_logout() + + logging.info(f"Waiting {message_archive_interval + 10}s for community owner to create archive...") + # Wait for the community archive to be created for the community owner + self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=message_archive_interval + 10) + logging.info("Success! History archive created and dispatched!") + + # Login the member back + self.member.login(key_uid) + self.member.wait_for_login() + self.member.wakuext_service.start_messenger() + + # Re-connect member to community codex client + info = self.creator.wakuext_service.debug() + self.member.wakuext_service.connect(info["id"], info["addrs"]) + + # The timeout is arbitrary set to 20 seconds + # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds + archive_timeout = 20 + + logging.info("Waiting for community member to download manifest of the archive index...") + # Wait for the community member to download the archive index manifest + self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_MANIFEST_FETCHED.value, timeout=archive_timeout) + logging.info("Success! Manifest of the archive index fetched!") + + # When wait for index download completed signal - at this stage the index and index CID files + # should both exist in the file system of the member. + logging.info("Waiting for community member to download archive index...") + self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) + logging.info("Success! Archive index downloaded!") + + # Ensure that the community archive index CID file exists in the file system for the member. + # After successfully downloading the archive index, its CID is stored in the the + # index CID file and the file is written immediately after the archive index has been downloaded. + # Notice that at this stage, the node still does not have any single archive downloaded. + logging.info("Verifying that community member has index CID file...") + has_archive_index = self.member.wakuext_service.has_community_archive(community_id) + assert has_archive_index is True, "Member should have community archive index after messages are sent" + logging.info("Success! Community member has index CID file!") diff --git a/vendor/modules.txt b/vendor/modules.txt index 0da82434c6c..981d060a595 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -269,7 +269,7 @@ github.com/cockroachdb/redact/internal/rfmt/fmtsort # github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 ## explicit; go 1.19 github.com/cockroachdb/tokenbucket -# github.com/codex-storage/codex-go-bindings v0.0.27 +# github.com/codex-storage/codex-go-bindings v0.0.28 ## explicit; go 1.24.0 github.com/codex-storage/codex-go-bindings/codex # github.com/consensys/gnark-crypto v0.18.0 From 5fefb80dee8e9fa8ebb44a1c6025a67b9d153624 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 14 Nov 2025 10:50:10 +0100 Subject: [PATCH 70/75] adds more logs to SetOnline --- protocol/communities/manager_archive.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 6312a27d498..ec724ad07f1 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -107,22 +107,28 @@ func (m *ArchiveManager) GetCodexClient() CodexClientInterface { } func (m *ArchiveManager) SetOnline(online bool) { + m.logger.Info("[CODEX][set_online] testing online status:", zap.Bool("online", online)) if online { + m.logger.Info("[CODEX][set_online] Online: checking if torrent/codex clients need to be started...") m.codexClientMu.RLock() codexStarted := m.isCodexClientStarted m.codexClientMu.RUnlock() + m.logger.Info("[CODEX][set_online] Online. codexStarted:", zap.Bool("codexStarted", codexStarted)) + if m.torrentConfig != nil && m.torrentConfig.Enabled && !m.torrentClientStarted() { + m.logger.Info("[CODEX][set_online] Starting torrent client...") err := m.StartTorrentClient() if err != nil { - m.logger.Error("couldn't start torrent client", zap.Error(err)) + m.logger.Error("[CODEX][set_online] couldn't start torrent client", zap.Error(err)) } } if m.codexConfig != nil && m.codexConfig.Enabled && !codexStarted { + m.logger.Info("[CODEX][set_online] Starting codex client...") err := m.StartCodexClient() if err != nil { - m.logger.Error("[CODEX] couldn't start codex client", zap.Error(err)) + m.logger.Error("[CODEX][set_online] couldn't start codex client", zap.Error(err)) } } } @@ -300,6 +306,8 @@ func (m *ArchiveManager) StartCodexClient() error { // return err // } + m.logger.Info("[CODEX][start_codex_config] Using the following CodexNodeConfig", zap.Any("config", cfgCopy.CodexNodeConfig)) + client, err := NewCodexClient(cfgCopy) if err != nil { return err From 6a65ed54ab09701cd6002d4617dddb483c2f5659 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 14 Nov 2025 11:09:07 +0100 Subject: [PATCH 71/75] remove ensureCodexDiscoveryPort --- protocol/communities/manager_archive.go | 44 ------------------------- 1 file changed, 44 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index ec724ad07f1..e5457d7acb1 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -209,45 +209,6 @@ func (m *ArchiveManager) getFreeUDPPort() (int, error) { return udpListener.LocalAddr().(*net.UDPAddr).Port, nil } -func (m *ArchiveManager) ensureCodexDiscoveryPort(config *params.CodexConfig) error { - checkPortAvailable := func(port int) (bool, error) { - addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort("localhost", fmt.Sprintf("%d", port))) - if err != nil { - return false, err - } - // Attempt to listen on the port; if it succeeds, it's available. - conn, err := net.ListenUDP("udp", addr) - if err != nil { - return false, nil - } - _ = conn.Close() - return true, nil - } - - port := config.CodexNodeConfig.DiscoveryPort - if port != 0 { - available, err := checkPortAvailable(port) - if err != nil { - return err - } - if available { - return nil - } - m.logger.Warn("[CODEX] discovery port already in use, selecting a free one", zap.Int("port", port)) - } - - for range 10 { - freePort, err := m.getFreeUDPPort() - if err != nil { - continue - } - config.CodexNodeConfig.DiscoveryPort = freePort - return nil - } - - return fmt.Errorf("no free discovery port found for codex") -} - func (m *ArchiveManager) StartTorrentClient() error { if m.torrentConfig == nil { return fmt.Errorf("can't start torrent client: missing torrentConfig") @@ -302,10 +263,6 @@ func (m *ArchiveManager) StartCodexClient() error { cfgCopy := *m.codexConfig cfgCopy.CodexNodeConfig = m.codexConfig.CodexNodeConfig - // if err := m.ensureCodexDiscoveryPort(&cfgCopy); err != nil { - // return err - // } - m.logger.Info("[CODEX][start_codex_config] Using the following CodexNodeConfig", zap.Any("config", cfgCopy.CodexNodeConfig)) client, err := NewCodexClient(cfgCopy) @@ -314,7 +271,6 @@ func (m *ArchiveManager) StartCodexClient() error { } m.codexClient = client m.ArchiveFileManager.codexClient = client - // m.codexConfig.CodexNodeConfig.DiscoveryPort = cfgCopy.CodexNodeConfig.DiscoveryPort if err := m.codexClient.Start(); err != nil { m.isCodexClientStarted = false From 5ae72d38cce5e6c4c4667dd08aca4be4ca906a4b Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 14 Nov 2025 16:11:53 +0100 Subject: [PATCH 72/75] passing CodexNode config via EnableCodexCommunityHistoryArchiveProtocol --- protocol/communities/manager_archive.go | 2 +- ...nities_messenger_token_permissions_test.go | 4 +- protocol/messenger_communities.go | 266 ++++++++++++++++-- services/ext/api.go | 4 + tests-functional/clients/services/wakuext.py | 13 + .../tests/test_wakuext_community_archives.py | 33 ++- 6 files changed, 283 insertions(+), 39 deletions(-) diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index e5457d7acb1..5fe33f43adc 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -263,7 +263,7 @@ func (m *ArchiveManager) StartCodexClient() error { cfgCopy := *m.codexConfig cfgCopy.CodexNodeConfig = m.codexConfig.CodexNodeConfig - m.logger.Info("[CODEX][start_codex_config] Using the following CodexNodeConfig", zap.Any("config", cfgCopy.CodexNodeConfig)) + m.logger.Info("[CODEX][start_codex_client] Using the following CodexNodeConfig", zap.Any("config", cfgCopy.CodexNodeConfig)) client, err := NewCodexClient(cfgCopy) if err != nil { diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index c0bc131ba2e..6b2c06804e1 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -3164,8 +3164,8 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas s.Assert().False(ownerNodeCfgFromDB.CodexConfig.Enabled) s.Assert().False(bobNodeCfgFromDB.CodexConfig.Enabled) - s.Require().NoError(s.owner.EnableCommunityHistoryArchiveProtocol()) - s.Require().NoError(s.bob.EnableCommunityHistoryArchiveProtocol()) + s.Require().NoError(s.owner.EnableCodexCommunityHistoryArchiveProtocol(nil)) + s.Require().NoError(s.bob.EnableCodexCommunityHistoryArchiveProtocol(nil)) ownerNodeCfgFromDB2, err := s.owner.settings.GetNodeConfig() s.Require().NoError(err) diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index 764a07328a8..bb030946ccf 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -7,7 +7,9 @@ import ( "encoding/json" "errors" "fmt" + "reflect" "slices" + "strconv" "strings" "sync" "time" @@ -4268,58 +4270,125 @@ func (m *Messenger) dispatchIndexCidMessage(communityID string) error { } func (m *Messenger) EnableCommunityHistoryArchiveProtocol() error { + m.logger.Info("enabling community history archive protocol") + m.logger.Info("checking archive distribution preference") + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() + if err != nil { + return err + } + + if archiveDistributionPreference == communities.ArchiveDistributionMethodCodex { + m.logger.Info("[CODEX][enable_community_history_archive_protocol] archive distribution preference is codex, skipping enabling Torrent distribution") + return nil + } nodeConfig, err := m.settings.GetNodeConfig() if err != nil { return err } - if nodeConfig.TorrentConfig.Enabled || nodeConfig.CodexConfig.Enabled { + if nodeConfig.CodexConfig.Enabled { + m.logger.Info("Codex archive distribution is enabled") + return fmt.Errorf("cannot enable Torrent archive distribution when Codex archive distribution is already enabled") + } + + if nodeConfig.TorrentConfig.Enabled { + m.logger.Info("Torrent archive distribution is already enabled") return nil } + m.logger.Info("enabling torrent archive distribution") + nodeConfig.TorrentConfig.Enabled = true + err = m.settings.SaveSetting("node-config", nodeConfig) + if err != nil { + return err + } + + m.config.torrentConfig = &nodeConfig.TorrentConfig + m.archiveManager.SetTorrentConfig(&nodeConfig.TorrentConfig) + err = m.archiveManager.StartTorrentClient() + if err != nil { + return err + } + + controlledCommunities, err := m.communitiesManager.Controlled() + if err != nil { + return err + } + + if len(controlledCommunities) > 0 { + m.logger.Info("[CODEX][enable_community_history_archive_protocol] initializing history archive tasks for controlled communities", zap.Int("count", len(controlledCommunities))) + go m.InitHistoryArchiveTasks(controlledCommunities) + } + if m.config.messengerSignalsHandler != nil { + m.config.messengerSignalsHandler.HistoryArchivesProtocolEnabled() + } + return nil +} + +func (m *Messenger) EnableCodexCommunityHistoryArchiveProtocol(overrides map[string]string) error { + m.logger.Info("[CODEX][enable_community_history_archive_protocol] enabling community history archive protocol") + m.logger.Info("[CODEX][enable_community_history_archive_protocol] checking archive distribution preference") + archiveDistributionPreference, err := m.GetArchiveDistributionPreference() if err != nil { return err } if archiveDistributionPreference == communities.ArchiveDistributionMethodTorrent { - nodeConfig.TorrentConfig.Enabled = true - err = m.settings.SaveSetting("node-config", nodeConfig) - if err != nil { - return err - } + m.logger.Info("[CODEX][enable_community_history_archive_protocol] archive distribution preference is torrent, skipping Codex enabling") + return nil + } - m.config.torrentConfig = &nodeConfig.TorrentConfig - m.archiveManager.SetTorrentConfig(&nodeConfig.TorrentConfig) - err = m.archiveManager.StartTorrentClient() - if err != nil { - return err - } + nodeConfig, err := m.settings.GetNodeConfig() + if err != nil { + return err } - if archiveDistributionPreference == communities.ArchiveDistributionMethodCodex { - nodeConfig.CodexConfig.Enabled = true + m.logger.Info("[CODEX][enable_community_history_archive_protocol] current CodexConfig for history archive protocol", zap.Any("CodexConfig", nodeConfig.CodexConfig)) - err = m.settings.SaveSetting("node-config", nodeConfig) - if err != nil { - return err - } + if nodeConfig.TorrentConfig.Enabled { + m.logger.Info("[CODEX][enable_community_history_archive_protocol] torrent archive distribution is enabled") + return fmt.Errorf("cannot enable Codex archive distribution when Torrent archive distribution is already enabled") + } - m.config.codexConfig = &nodeConfig.CodexConfig - m.archiveManager.SetCodexConfig(&nodeConfig.CodexConfig) + if nodeConfig.CodexConfig.Enabled { + m.logger.Info("[CODEX][enable_community_history_archive_protocol] codex archive distribution is already enabled") + return nil + } - err = m.archiveManager.StartCodexClient() - if err != nil { + if len(overrides) > 0 { + m.logger.Info("[CODEX][enable_community_history_archive_protocol] applying CodexConfig overrides", zap.Any("overrides", overrides)) + if err := applyCodexConfigOverrides(&nodeConfig.CodexConfig, overrides); err != nil { return err } } + m.logger.Info("[CODEX][enable_community_history_archive_protocol] enabling codex archive distribution") + nodeConfig.CodexConfig.Enabled = true + + err = m.settings.SaveSetting("node-config", nodeConfig) + if err != nil { + return err + } + + m.logger.Info("[CODEX][enable_community_history_archive_protocol] CodexConfig (with potential overrides)", zap.Any("CodexConfig", nodeConfig.CodexConfig)) + + m.config.codexConfig = &nodeConfig.CodexConfig + m.archiveManager.SetCodexConfig(&nodeConfig.CodexConfig) + + m.logger.Info("[CODEX][enable_community_history_archive_protocol] starting codex client") + err = m.archiveManager.StartCodexClient() + if err != nil { + return err + } + controlledCommunities, err := m.communitiesManager.Controlled() if err != nil { return err } if len(controlledCommunities) > 0 { + m.logger.Info("[CODEX][enable_community_history_archive_protocol] initializing history archive tasks for controlled communities", zap.Int("count", len(controlledCommunities))) go m.InitHistoryArchiveTasks(controlledCommunities) } if m.config.messengerSignalsHandler != nil { @@ -4328,13 +4397,139 @@ func (m *Messenger) EnableCommunityHistoryArchiveProtocol() error { return nil } +func applyCodexConfigOverrides(cfg *params.CodexConfig, overrides map[string]string) error { + if cfg == nil || len(overrides) == 0 { + return nil + } + + for key, raw := range overrides { + key = strings.TrimSpace(key) + if key == "" { + continue + } + if err := setStructFieldValue(reflect.ValueOf(cfg), key, raw); err != nil { + return fmt.Errorf("failed to apply CodexConfig override %q: %w", key, err) + } + } + + return nil +} + +func setStructFieldValue(target reflect.Value, path, raw string) error { + if target.Kind() != reflect.Pointer { + return fmt.Errorf("target must be a pointer, got %s", target.Kind()) + } + + current := target.Elem() + if !current.IsValid() { + return fmt.Errorf("invalid target for path %q", path) + } + + parts := strings.Split(path, ".") + for idx, part := range parts { + if part == "" { + return fmt.Errorf("invalid empty segment in path %q", path) + } + field := current.FieldByName(part) + if !field.IsValid() { + return fmt.Errorf("unknown field %q in path %q", part, path) + } + + if idx == len(parts)-1 { + if !field.CanSet() { + return fmt.Errorf("cannot set field %q in path %q", part, path) + } + return assignValue(field, raw) + } + + switch field.Kind() { + case reflect.Struct: + current = field + case reflect.Pointer: + if field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + current = field.Elem() + default: + return fmt.Errorf("field %q in path %q is not addressable struct or pointer", part, path) + } + } + + return nil +} + +func assignValue(field reflect.Value, raw string) error { + switch field.Kind() { + case reflect.String: + field.SetString(raw) + case reflect.Bool: + parsed, err := strconv.ParseBool(raw) + if err != nil { + return err + } + field.SetBool(parsed) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + parsed, err := strconv.ParseInt(raw, 10, field.Type().Bits()) + if err != nil { + return err + } + field.SetInt(parsed) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + parsed, err := strconv.ParseUint(raw, 10, field.Type().Bits()) + if err != nil { + return err + } + field.SetUint(parsed) + case reflect.Float32, reflect.Float64: + parsed, err := strconv.ParseFloat(raw, field.Type().Bits()) + if err != nil { + return err + } + field.SetFloat(parsed) + case reflect.Slice: + return assignSlice(field, raw) + default: + return fmt.Errorf("unsupported field kind %s", field.Kind()) + } + + return nil +} + +func assignSlice(field reflect.Value, raw string) error { + elemKind := field.Type().Elem().Kind() + switch elemKind { + case reflect.String: + if raw == "" { + field.Set(reflect.Zero(field.Type())) + return nil + } + + var parsed []string + if err := json.Unmarshal([]byte(raw), &parsed); err != nil { + chunks := strings.SplitSeq(raw, ",") + for chunk := range chunks { + chunk = strings.TrimSpace(chunk) + if chunk == "" { + continue + } + parsed = append(parsed, chunk) + } + } + field.Set(reflect.ValueOf(parsed)) + default: + return fmt.Errorf("unsupported slice element kind %s", elemKind) + } + + return nil +} + func (m *Messenger) DisableCommunityHistoryArchiveProtocol() error { nodeConfig, err := m.settings.GetNodeConfig() if err != nil { return err } - if !nodeConfig.TorrentConfig.Enabled { + if !nodeConfig.TorrentConfig.Enabled && !nodeConfig.CodexConfig.Enabled { return nil } @@ -4343,13 +4538,26 @@ func (m *Messenger) DisableCommunityHistoryArchiveProtocol() error { m.logger.Error("failed to stop torrent manager", zap.Error(err)) } - nodeConfig.TorrentConfig.Enabled = false - err = m.settings.SaveSetting("node-config", nodeConfig) - m.config.torrentConfig = &nodeConfig.TorrentConfig - m.archiveManager.SetTorrentConfig(&nodeConfig.TorrentConfig) - if err != nil { - return err + if nodeConfig.TorrentConfig.Enabled { + nodeConfig.TorrentConfig.Enabled = false + err = m.settings.SaveSetting("node-config", nodeConfig) + m.config.torrentConfig = &nodeConfig.TorrentConfig + m.archiveManager.SetTorrentConfig(&nodeConfig.TorrentConfig) + if err != nil { + return err + } } + + if nodeConfig.CodexConfig.Enabled { + nodeConfig.CodexConfig.Enabled = false + err = m.settings.SaveSetting("node-config", nodeConfig) + m.config.codexConfig = &nodeConfig.CodexConfig + m.archiveManager.SetCodexConfig(&nodeConfig.CodexConfig) + if err != nil { + return err + } + } + if m.config.messengerSignalsHandler != nil { m.config.messengerSignalsHandler.HistoryArchivesProtocolDisabled() } diff --git a/services/ext/api.go b/services/ext/api.go index eface5a6b75..6fcb39549fa 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -1183,6 +1183,10 @@ func (api *PublicAPI) GetCommunitiesSettings() ([]communities.CommunitySettings, return api.service.messenger.GetCommunitiesSettings() } +func (api *PublicAPI) EnableCodexCommunityHistoryArchiveProtocol(overrides map[string]string) error { + return api.service.messenger.EnableCodexCommunityHistoryArchiveProtocol(overrides) +} + func (api *PublicAPI) EnableCommunityHistoryArchiveProtocol() error { return api.service.messenger.EnableCommunityHistoryArchiveProtocol() } diff --git a/tests-functional/clients/services/wakuext.py b/tests-functional/clients/services/wakuext.py index 4067bec8a5d..1c700887f49 100644 --- a/tests-functional/clients/services/wakuext.py +++ b/tests-functional/clients/services/wakuext.py @@ -806,3 +806,16 @@ def update_message_archive_interval(self, duration_seconds: int): params = [duration_seconds] response = self.rpc_request("updateMessageArchiveInterval", params) return response + + def enable_community_history_archive_protocol(self): + return self.rpc_request("enableCommunityHistoryArchiveProtocol") + + def enable_codex_community_history_archive_protocol(self, codex_overrides=None): + params = [] + if codex_overrides: + params = [{k: str(v) for k, v in codex_overrides.items()}] + return self.rpc_request("enableCodexCommunityHistoryArchiveProtocol", params) + + def disable_community_history_archive_protocol(self): + response = self.rpc_request("disableCommunityHistoryArchiveProtocol") + return response diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 12ea64ee9fa..83847c0c833 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -14,23 +14,42 @@ def setup_backends(self, backend_new_profile): """Initialize three backends (creator, member and another_member) for each test function""" # Community owner - self.creator = backend_new_profile("creator", codex_config_enabled=True) + self.creator = backend_new_profile("creator") # Define codex as archive distribution preference self.creator.wakuext_service.set_archive_distribution_preference("codex") + # Enable community history archive protocol + self.creator.wakuext_service.enable_codex_community_history_archive_protocol( + { + "CodexNodeConfig.DiscoveryPort": 8091, + } + ) + + info = self.creator.wakuext_service.debug() # Create a first member that will join the community first - self.member = backend_new_profile("member", codex_config_enabled=True, import_initial_delay=5) + self.member = backend_new_profile("member", import_initial_delay=5) # Define codex as archive distribution preference self.member.wakuext_service.set_archive_distribution_preference("codex") + self.member.wakuext_service.enable_codex_community_history_archive_protocol( + { + "CodexNodeConfig.DiscoveryPort": 8092, + "CodexNodeConfig.BootstrapNodes": f'["{info["spr"]}"]', + } + ) # Create another member that will join the community later after the first message is sent - self.another_member = backend_new_profile("member", codex_config_enabled=True, import_initial_delay=5) + self.another_member = backend_new_profile("member", import_initial_delay=5) # Define codex as archive distribution preference self.another_member.wakuext_service.set_archive_distribution_preference("codex") - - # Connect members to community codex client - # In the real life, this would be done via discovery - info = self.creator.wakuext_service.debug() + self.another_member.wakuext_service.enable_codex_community_history_archive_protocol( + { + "CodexNodeConfig.DiscoveryPort": 8093, + "CodexNodeConfig.BootstrapNodes": f'["{info["spr"]}"]', + } + ) + + # Using bootstrap nodes does not seem to be working in our setup, + # thus we need to connect members manually. self.member.wakuext_service.connect(info["id"], info["addrs"]) self.another_member.wakuext_service.connect(info["id"], info["addrs"]) From 64f21b16b89f86237ce7f05ea0cbfcd6ffd15ee6 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Fri, 14 Nov 2025 17:42:25 +0100 Subject: [PATCH 73/75] extracts CodexConfig overrides to a separate module --- .../communities/codex_config_overrides.go | 137 +++++++++++ .../codex_config_overrides_test.go | 221 ++++++++++++++++++ protocol/messenger_communities.go | 130 +---------- 3 files changed, 359 insertions(+), 129 deletions(-) create mode 100644 protocol/communities/codex_config_overrides.go create mode 100644 protocol/communities/codex_config_overrides_test.go diff --git a/protocol/communities/codex_config_overrides.go b/protocol/communities/codex_config_overrides.go new file mode 100644 index 00000000000..0dbb354179b --- /dev/null +++ b/protocol/communities/codex_config_overrides.go @@ -0,0 +1,137 @@ +package communities + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/status-im/status-go/params" +) + +func ApplyCodexConfigOverrides(cfg *params.CodexConfig, overrides map[string]string) error { + if cfg == nil || len(overrides) == 0 { + return nil + } + + for key, raw := range overrides { + key = strings.TrimSpace(key) + if key == "" { + continue + } + if err := setStructFieldValue(reflect.ValueOf(cfg), key, raw); err != nil { + return fmt.Errorf("failed to apply CodexConfig override %q: %w", key, err) + } + } + + return nil +} + +func setStructFieldValue(target reflect.Value, path, raw string) error { + if target.Kind() != reflect.Pointer { + return fmt.Errorf("target must be a pointer, got %s", target.Kind()) + } + + current := target.Elem() + if !current.IsValid() { + return fmt.Errorf("invalid target for path %q", path) + } + + parts := strings.Split(path, ".") + for idx, part := range parts { + if part == "" { + return fmt.Errorf("invalid empty segment in path %q", path) + } + field := current.FieldByName(part) + if !field.IsValid() { + return fmt.Errorf("unknown field %q in path %q", part, path) + } + + if idx == len(parts)-1 { + if !field.CanSet() { + return fmt.Errorf("cannot set field %q in path %q", part, path) + } + return assignValue(field, raw) + } + + switch field.Kind() { + case reflect.Struct: + current = field + case reflect.Pointer: + if field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + current = field.Elem() + default: + return fmt.Errorf("field %q in path %q is not addressable struct or pointer", part, path) + } + } + + return nil +} + +func assignValue(field reflect.Value, raw string) error { + switch field.Kind() { + case reflect.String: + field.SetString(raw) + case reflect.Bool: + parsed, err := strconv.ParseBool(raw) + if err != nil { + return err + } + field.SetBool(parsed) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + parsed, err := strconv.ParseInt(raw, 10, field.Type().Bits()) + if err != nil { + return err + } + field.SetInt(parsed) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + parsed, err := strconv.ParseUint(raw, 10, field.Type().Bits()) + if err != nil { + return err + } + field.SetUint(parsed) + case reflect.Float32, reflect.Float64: + parsed, err := strconv.ParseFloat(raw, field.Type().Bits()) + if err != nil { + return err + } + field.SetFloat(parsed) + case reflect.Slice: + return assignSlice(field, raw) + default: + return fmt.Errorf("unsupported field kind %s", field.Kind()) + } + + return nil +} + +func assignSlice(field reflect.Value, raw string) error { + elemKind := field.Type().Elem().Kind() + switch elemKind { + case reflect.String: + if raw == "" { + field.Set(reflect.Zero(field.Type())) + return nil + } + + var parsed []string + if err := json.Unmarshal([]byte(raw), &parsed); err != nil { + chunks := strings.SplitSeq(raw, ",") + for chunk := range chunks { + chunk = strings.TrimSpace(chunk) + if chunk == "" { + continue + } + parsed = append(parsed, chunk) + } + } + field.Set(reflect.ValueOf(parsed)) + default: + return fmt.Errorf("unsupported slice element kind %s", elemKind) + } + + return nil +} diff --git a/protocol/communities/codex_config_overrides_test.go b/protocol/communities/codex_config_overrides_test.go new file mode 100644 index 00000000000..c0f78e7a751 --- /dev/null +++ b/protocol/communities/codex_config_overrides_test.go @@ -0,0 +1,221 @@ +package communities + +import ( + "testing" + + "github.com/codex-storage/codex-go-bindings/codex" + "github.com/stretchr/testify/suite" + + "github.com/status-im/status-go/params" +) + +type CodexConfigOverridesTestSuite struct { + suite.Suite +} + +func TestCodexConfigOverridesTestSuite(t *testing.T) { + suite.Run(t, new(CodexConfigOverridesTestSuite)) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_AllFields() { + cfg := ¶ms.CodexConfig{} + + overrides := map[string]string{ + // params.CodexConfig fields + "Enabled": "true", + "HistoryArchiveDataDir": "/custom/archive/path", + + // codex.Config fields (nested under CodexNodeConfig) + "CodexNodeConfig.LogLevel": "DEBUG", + "CodexNodeConfig.LogFormat": "json", + "CodexNodeConfig.MetricsEnabled": "true", + "CodexNodeConfig.MetricsAddress": "0.0.0.0", + "CodexNodeConfig.MetricsPort": "9090", + "CodexNodeConfig.DataDir": "/custom/data/dir", + "CodexNodeConfig.ListenAddrs": `["/ip4/0.0.0.0/tcp/4001","/ip4/0.0.0.0/tcp/4002"]`, + "CodexNodeConfig.Nat": "none", + "CodexNodeConfig.DiscoveryPort": "8091", + "CodexNodeConfig.NetPrivKeyFile": "/path/to/key", + "CodexNodeConfig.BootstrapNodes": `["spr:CiUIAhIhA1..","spr:CiUIAhIhA2.."]`, + "CodexNodeConfig.MaxPeers": "200", + "CodexNodeConfig.NumThreads": "4", + "CodexNodeConfig.AgentString": "CustomCodex/1.0", + "CodexNodeConfig.RepoKind": "sqlite", + "CodexNodeConfig.StorageQuota": "50000000000", + "CodexNodeConfig.BlockTtl": "604800", + "CodexNodeConfig.BlockMaintenanceInterval": "300", + "CodexNodeConfig.BlockMaintenanceNumberOfBlocks": "2000", + "CodexNodeConfig.BlockRetries": "5000", + "CodexNodeConfig.CacheSize": "1024", + "CodexNodeConfig.LogFile": "/var/log/codex.log", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.Require().NoError(err) + + // Verify params.CodexConfig fields + s.Equal(true, cfg.Enabled) + s.Equal("/custom/archive/path", cfg.HistoryArchiveDataDir) + + // Verify codex.Config fields + s.Equal("DEBUG", cfg.CodexNodeConfig.LogLevel) + s.Equal(codex.LogFormat("json"), cfg.CodexNodeConfig.LogFormat) + s.Equal(true, cfg.CodexNodeConfig.MetricsEnabled) + s.Equal("0.0.0.0", cfg.CodexNodeConfig.MetricsAddress) + s.Equal(9090, cfg.CodexNodeConfig.MetricsPort) + s.Equal("/custom/data/dir", cfg.CodexNodeConfig.DataDir) + s.Equal([]string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/tcp/4002"}, cfg.CodexNodeConfig.ListenAddrs) + s.Equal("none", cfg.CodexNodeConfig.Nat) + s.Equal(8091, cfg.CodexNodeConfig.DiscoveryPort) + s.Equal("/path/to/key", cfg.CodexNodeConfig.NetPrivKeyFile) + s.Equal([]string{"spr:CiUIAhIhA1..", "spr:CiUIAhIhA2.."}, cfg.CodexNodeConfig.BootstrapNodes) + s.Equal(200, cfg.CodexNodeConfig.MaxPeers) + s.Equal(4, cfg.CodexNodeConfig.NumThreads) + s.Equal("CustomCodex/1.0", cfg.CodexNodeConfig.AgentString) + s.Equal(codex.RepoKind("sqlite"), cfg.CodexNodeConfig.RepoKind) + s.Equal(50000000000, cfg.CodexNodeConfig.StorageQuota) + s.Equal("604800", cfg.CodexNodeConfig.BlockTtl) + s.Equal("300", cfg.CodexNodeConfig.BlockMaintenanceInterval) + s.Equal(2000, cfg.CodexNodeConfig.BlockMaintenanceNumberOfBlocks) + s.Equal(5000, cfg.CodexNodeConfig.BlockRetries) + s.Equal(1024, cfg.CodexNodeConfig.CacheSize) + s.Equal("/var/log/codex.log", cfg.CodexNodeConfig.LogFile) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_NilConfig() { + err := ApplyCodexConfigOverrides(nil, map[string]string{"Enabled": "true"}) + s.NoError(err, "should handle nil config gracefully") +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_EmptyOverrides() { + cfg := ¶ms.CodexConfig{} + err := ApplyCodexConfigOverrides(cfg, map[string]string{}) + s.NoError(err, "should handle empty overrides gracefully") +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_InvalidFieldName() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "NonExistentField": "value", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.Error(err) + s.Contains(err.Error(), "unknown field") +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_InvalidBoolValue() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "Enabled": "not-a-bool", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.Error(err) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_InvalidIntValue() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "CodexNodeConfig.MetricsPort": "not-a-number", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.Error(err) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_EmptyKey() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "": "should-be-ignored", + "Enabled": "true", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.NoError(err) + s.Equal(true, cfg.Enabled) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_WhitespaceKey() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + " ": "should-be-ignored", + "Enabled": "true", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.NoError(err) + s.Equal(true, cfg.Enabled) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_StringSliceJSON() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "CodexNodeConfig.ListenAddrs": `["/ip4/0.0.0.0/tcp/4001"]`, + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.NoError(err) + s.Equal([]string{"/ip4/0.0.0.0/tcp/4001"}, cfg.CodexNodeConfig.ListenAddrs) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_StringSliceCommaSeparated() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "CodexNodeConfig.BootstrapNodes": "node1,node2,node3", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.NoError(err) + s.Equal([]string{"node1", "node2", "node3"}, cfg.CodexNodeConfig.BootstrapNodes) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_StringSliceEmpty() { + cfg := ¶ms.CodexConfig{ + CodexNodeConfig: codex.Config{ + ListenAddrs: []string{"existing"}, + }, + } + overrides := map[string]string{ + "CodexNodeConfig.ListenAddrs": "", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.NoError(err) + s.Nil(cfg.CodexNodeConfig.ListenAddrs) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_NestedPath() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "CodexNodeConfig.DataDir": "/test/path", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.NoError(err) + s.Equal("/test/path", cfg.CodexNodeConfig.DataDir) +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_InvalidNestedPath() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "CodexNodeConfig.NonExistent.Field": "value", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.Error(err) + s.Contains(err.Error(), "unknown field") + s.Regexp(`\bNonExistent\b`, err.Error()) + s.Contains(err.Error(), "CodexNodeConfig.NonExistent.Field") +} + +func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_EmptySegmentInPath() { + cfg := ¶ms.CodexConfig{} + overrides := map[string]string{ + "CodexNodeConfig..DataDir": "/test/path", + } + + err := ApplyCodexConfigOverrides(cfg, overrides) + s.Error(err) + s.Contains(err.Error(), "invalid empty segment") +} diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index bb030946ccf..f7b9cf57130 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -7,9 +7,7 @@ import ( "encoding/json" "errors" "fmt" - "reflect" "slices" - "strconv" "strings" "sync" "time" @@ -4358,7 +4356,7 @@ func (m *Messenger) EnableCodexCommunityHistoryArchiveProtocol(overrides map[str if len(overrides) > 0 { m.logger.Info("[CODEX][enable_community_history_archive_protocol] applying CodexConfig overrides", zap.Any("overrides", overrides)) - if err := applyCodexConfigOverrides(&nodeConfig.CodexConfig, overrides); err != nil { + if err := communities.ApplyCodexConfigOverrides(&nodeConfig.CodexConfig, overrides); err != nil { return err } } @@ -4397,132 +4395,6 @@ func (m *Messenger) EnableCodexCommunityHistoryArchiveProtocol(overrides map[str return nil } -func applyCodexConfigOverrides(cfg *params.CodexConfig, overrides map[string]string) error { - if cfg == nil || len(overrides) == 0 { - return nil - } - - for key, raw := range overrides { - key = strings.TrimSpace(key) - if key == "" { - continue - } - if err := setStructFieldValue(reflect.ValueOf(cfg), key, raw); err != nil { - return fmt.Errorf("failed to apply CodexConfig override %q: %w", key, err) - } - } - - return nil -} - -func setStructFieldValue(target reflect.Value, path, raw string) error { - if target.Kind() != reflect.Pointer { - return fmt.Errorf("target must be a pointer, got %s", target.Kind()) - } - - current := target.Elem() - if !current.IsValid() { - return fmt.Errorf("invalid target for path %q", path) - } - - parts := strings.Split(path, ".") - for idx, part := range parts { - if part == "" { - return fmt.Errorf("invalid empty segment in path %q", path) - } - field := current.FieldByName(part) - if !field.IsValid() { - return fmt.Errorf("unknown field %q in path %q", part, path) - } - - if idx == len(parts)-1 { - if !field.CanSet() { - return fmt.Errorf("cannot set field %q in path %q", part, path) - } - return assignValue(field, raw) - } - - switch field.Kind() { - case reflect.Struct: - current = field - case reflect.Pointer: - if field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - } - current = field.Elem() - default: - return fmt.Errorf("field %q in path %q is not addressable struct or pointer", part, path) - } - } - - return nil -} - -func assignValue(field reflect.Value, raw string) error { - switch field.Kind() { - case reflect.String: - field.SetString(raw) - case reflect.Bool: - parsed, err := strconv.ParseBool(raw) - if err != nil { - return err - } - field.SetBool(parsed) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - parsed, err := strconv.ParseInt(raw, 10, field.Type().Bits()) - if err != nil { - return err - } - field.SetInt(parsed) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - parsed, err := strconv.ParseUint(raw, 10, field.Type().Bits()) - if err != nil { - return err - } - field.SetUint(parsed) - case reflect.Float32, reflect.Float64: - parsed, err := strconv.ParseFloat(raw, field.Type().Bits()) - if err != nil { - return err - } - field.SetFloat(parsed) - case reflect.Slice: - return assignSlice(field, raw) - default: - return fmt.Errorf("unsupported field kind %s", field.Kind()) - } - - return nil -} - -func assignSlice(field reflect.Value, raw string) error { - elemKind := field.Type().Elem().Kind() - switch elemKind { - case reflect.String: - if raw == "" { - field.Set(reflect.Zero(field.Type())) - return nil - } - - var parsed []string - if err := json.Unmarshal([]byte(raw), &parsed); err != nil { - chunks := strings.SplitSeq(raw, ",") - for chunk := range chunks { - chunk = strings.TrimSpace(chunk) - if chunk == "" { - continue - } - parsed = append(parsed, chunk) - } - } - field.Set(reflect.ValueOf(parsed)) - default: - return fmt.Errorf("unsupported slice element kind %s", elemKind) - } - - return nil -} - func (m *Messenger) DisableCommunityHistoryArchiveProtocol() error { nodeConfig, err := m.settings.GetNodeConfig() From b2316d46dea42d946afef695cf809ff67a4bc452 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Wed, 12 Nov 2025 18:56:43 +0100 Subject: [PATCH 74/75] version without index and index cid files s Please enter the commit message for your changes. Lines starting --- api/defaults.go | 3 +- .../sql/1761913234_add_codex_config.up.sql | 2 - appdatabase/node_config_test.go | 3 +- nodecfg/node_config.go | 12 +- params/config.go | 11 +- params/config_test.go | 2 - .../communities/codex_archive_downloader.go | 6 +- .../codex_config_overrides_test.go | 4 +- .../communities/codex_index_downloader.go | 258 +----- ...codex_index_downloader_integration_test.go | 182 ----- .../codex_index_downloader_test.go | 749 ++++++++--------- ...codex_manager_archive_cancellation_test.go | 114 +-- .../communities/codex_manager_archive_test.go | 531 +----------- protocol/communities/codex_testutil_test.go | 3 +- protocol/communities/manager.go | 30 +- protocol/communities/manager_archive.go | 755 ++++++++++++------ protocol/communities/manager_archive_file.go | 406 ---------- protocol/communities/manager_test.go | 3 +- protocol/communities/persistence.go | 72 +- ...nities_messenger_token_permissions_test.go | 317 ++------ protocol/messenger.go | 9 +- protocol/messenger_communities.go | 81 +- .../messenger_communities_import_discord.go | 18 +- protocol/messenger_handler.go | 33 +- protocol/messenger_handler_test.go | 3 +- services/ext/api.go | 2 +- .../tests/test_wakuext_community_archives.py | 101 +-- 27 files changed, 1183 insertions(+), 2527 deletions(-) delete mode 100644 protocol/communities/codex_index_downloader_integration_test.go diff --git a/api/defaults.go b/api/defaults.go index d070d42cc96..d35f47ec325 100644 --- a/api/defaults.go +++ b/api/defaults.go @@ -367,8 +367,7 @@ func DefaultNodeConfig(installationID, keyUID string, request *requests.CreateAc } nodeConfig.CodexConfig = params.CodexConfig{ - Enabled: nodeConfig.CodexConfig.Enabled, - HistoryArchiveDataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "archivedata"), + Enabled: nodeConfig.CodexConfig.Enabled, CodexNodeConfig: codex.Config{ DataDir: filepath.Join(nodeConfig.RootDataDir, "codex", "codexdata"), BlockRetries: params.BlockRetries, diff --git a/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql b/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql index 2f43637ed0f..d20c0013981 100644 --- a/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql +++ b/appdatabase/migrations/sql/1761913234_add_codex_config.up.sql @@ -1,6 +1,5 @@ CREATE TABLE codex_config ( enabled BOOLEAN DEFAULT false, - history_archive_data_dir VARCHAR NOT NULL, log_level TEXT DEFAULT 'info', log_format TEXT DEFAULT 'auto', metrics_enabled BOOLEAN DEFAULT false, @@ -26,4 +25,3 @@ CREATE TABLE codex_config ( synthetic_id VARCHAR DEFAULT 'id' PRIMARY KEY ) WITHOUT ROWID; - diff --git a/appdatabase/node_config_test.go b/appdatabase/node_config_test.go index dc0803a162c..8ffb9d14ea7 100644 --- a/appdatabase/node_config_test.go +++ b/appdatabase/node_config_test.go @@ -58,8 +58,7 @@ func randomNodeConfig() *params.NodeConfig { LightClient: randomBool(), }, CodexConfig: params.CodexConfig{ - Enabled: randomBool(), - HistoryArchiveDataDir: randomString(), + Enabled: randomBool(), CodexNodeConfig: codex.Config{ DataDir: randomString(), DiscoveryPort: randomInt(65535), diff --git a/nodecfg/node_config.go b/nodecfg/node_config.go index ca5bde910b7..ce47cbcfa8c 100644 --- a/nodecfg/node_config.go +++ b/nodecfg/node_config.go @@ -168,13 +168,12 @@ func insertCodexConfig(tx *sql.Tx, c *params.NodeConfig) error { } _, err = tx.Exec(` INSERT OR REPLACE INTO codex_config ( - enabled, history_archive_data_dir, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, + enabled, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, listen_addrs, nat, disc_port, net_privkey, bootstrap_nodes, max_peers, num_threads, agent_string, repo_kind, storage_quota, block_ttl, block_maintenance_interval, block_maintenance_number_of_blocks, block_retries, cache_size, log_file, synthetic_id - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`, + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`, c.CodexConfig.Enabled, - c.CodexConfig.HistoryArchiveDataDir, c.CodexConfig.CodexNodeConfig.LogLevel, c.CodexConfig.CodexNodeConfig.LogFormat, c.CodexConfig.CodexNodeConfig.MetricsEnabled, @@ -356,9 +355,7 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) { if err != nil && err != sql.ErrNoRows { return nil, err } - if nodecfg.HistoryArchiveDistributionPreference == "" { - nodecfg.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference - } + if nodecfg.HistoryArchiveDistributionPreference == "" { nodecfg.HistoryArchiveDistributionPreference = params.DefaultHistoryArchiveDistributionPreference } @@ -366,14 +363,13 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) { // Load codex_config var listenAddrsStr, bootstrapNodesStr string err = tx.QueryRow(` - SELECT enabled, history_archive_data_dir, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, + SELECT enabled, log_level, log_format, metrics_enabled, metrics_address, metrics_port, data_dir, listen_addrs, nat, disc_port, net_privkey, bootstrap_nodes, max_peers, num_threads, agent_string, repo_kind, storage_quota, block_ttl, block_maintenance_interval, block_maintenance_number_of_blocks, block_retries, cache_size, log_file FROM codex_config WHERE synthetic_id = 'id' `).Scan( &nodecfg.CodexConfig.Enabled, - &nodecfg.CodexConfig.HistoryArchiveDataDir, &nodecfg.CodexConfig.CodexNodeConfig.LogLevel, &nodecfg.CodexConfig.CodexNodeConfig.LogFormat, &nodecfg.CodexConfig.CodexNodeConfig.MetricsEnabled, diff --git a/params/config.go b/params/config.go index 83b6eaed365..a5c531e8ed1 100644 --- a/params/config.go +++ b/params/config.go @@ -333,9 +333,8 @@ type TorrentConfig struct { } type CodexConfig struct { - Enabled bool - HistoryArchiveDataDir string - CodexNodeConfig codex.Config + Enabled bool + CodexNodeConfig codex.Config } const ( @@ -397,9 +396,6 @@ func (c *NodeConfig) UpdateWithDefaults() error { if c.HistoryArchiveDistributionPreference == ArchiveDistributionMethodCodex { if c.CodexConfig.Enabled { - if c.CodexConfig.HistoryArchiveDataDir == "" { - c.CodexConfig.HistoryArchiveDataDir = filepath.Join(c.RootDataDir, "codex", "archivedata") - } if c.CodexConfig.CodexNodeConfig.DataDir == "" { c.CodexConfig.CodexNodeConfig.DataDir = filepath.Join(c.RootDataDir, "codex", "codexdata") } @@ -441,8 +437,7 @@ func NewNodeConfig(dataDir string, networkID uint64) (*NodeConfig, error) { TorrentDir: dataDir + "/torrents", }, CodexConfig: CodexConfig{ - Enabled: false, - HistoryArchiveDataDir: filepath.Join(dataDir, "codex", "archivedata"), + Enabled: false, CodexNodeConfig: codex.Config{ BlockRetries: BlockRetries, DataDir: filepath.Join(dataDir, "codex", "codexdata"), diff --git a/params/config_test.go b/params/config_test.go index 06492300552..32fe1ea9174 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -32,7 +32,6 @@ func TestNewConfigFromJSON(t *testing.T) { }, "CodexConfig": { "Enabled": false, - "HistoryArchiveDataDir": "` + tmpDir + `/codex/archivedata", "CodexNodeConfig": { "data-dir": "` + tmpDir + `/codex/codexdata", "block-retries": 5 @@ -49,7 +48,6 @@ func TestNewConfigFromJSON(t *testing.T) { require.Equal(t, tmpDir+"/archivedata", c.TorrentConfig.DataDir) require.Equal(t, tmpDir+"/torrents", c.TorrentConfig.TorrentDir) require.Equal(t, "DEBUG", c.RuntimeLogLevel) - require.Equal(t, filepath.Join(tmpDir, "codex", "archivedata"), c.CodexConfig.HistoryArchiveDataDir) require.Equal(t, filepath.Join(tmpDir, "codex", "codexdata"), c.CodexConfig.CodexNodeConfig.DataDir) require.Equal(t, 5, c.CodexConfig.CodexNodeConfig.BlockRetries) } diff --git a/protocol/communities/codex_archive_downloader.go b/protocol/communities/codex_archive_downloader.go index 98b072fa277..443e55a1db7 100644 --- a/protocol/communities/codex_archive_downloader.go +++ b/protocol/communities/codex_archive_downloader.go @@ -266,12 +266,12 @@ func (d *CodexArchiveDownloader) downloadAllArchives() { zap.String("cid", archiveCid), zap.String("hash", archiveHash), zap.Duration("timeout", d.pollingTimeout)) - return // Exit without success callback or count increment + return case <-archiveCancel: d.logger.Debug("[CODEX] download cancelled", zap.String("cid", archiveCid), zap.String("hash", archiveHash)) - return // Exit without success callback or count increment + return case <-ticker.C: hasCid, err := d.codexClient.HasCid(archiveCid) if err != nil { @@ -297,7 +297,7 @@ func (d *CodexArchiveDownloader) downloadAllArchives() { if d.onArchiveDownloaded != nil { d.onArchiveDownloaded(archiveHash, archiveFrom, archiveTo) } - return // Exit after successful completion + return } } } diff --git a/protocol/communities/codex_config_overrides_test.go b/protocol/communities/codex_config_overrides_test.go index c0f78e7a751..43803a866e1 100644 --- a/protocol/communities/codex_config_overrides_test.go +++ b/protocol/communities/codex_config_overrides_test.go @@ -22,8 +22,7 @@ func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_AllFields( overrides := map[string]string{ // params.CodexConfig fields - "Enabled": "true", - "HistoryArchiveDataDir": "/custom/archive/path", + "Enabled": "true", // codex.Config fields (nested under CodexNodeConfig) "CodexNodeConfig.LogLevel": "DEBUG", @@ -55,7 +54,6 @@ func (s *CodexConfigOverridesTestSuite) TestApplyCodexConfigOverrides_AllFields( // Verify params.CodexConfig fields s.Equal(true, cfg.Enabled) - s.Equal("/custom/archive/path", cfg.HistoryArchiveDataDir) // Verify codex.Config fields s.Equal("DEBUG", cfg.CodexNodeConfig.LogLevel) diff --git a/protocol/communities/codex_index_downloader.go b/protocol/communities/codex_index_downloader.go index 7f21b8cd91c..c14708e5930 100644 --- a/protocol/communities/codex_index_downloader.go +++ b/protocol/communities/codex_index_downloader.go @@ -2,269 +2,45 @@ package communities import ( "context" - "fmt" "io" - "os" - "path/filepath" - "sync" "github.com/status-im/status-go/common" "go.uber.org/zap" ) -// ManifestResponse represents the response from Codex manifest API -type ManifestResponse struct { - CID string `json:"cid"` - Manifest struct { - TreeCID string `json:"treeCid"` - DatasetSize int64 `json:"datasetSize"` - BlockSize int `json:"blockSize"` - Protected bool `json:"protected"` - Filename string `json:"filename"` - Mimetype string `json:"mimetype"` - } `json:"manifest"` -} - -// CodexIndexDownloader handles downloading index files from Codex storage type CodexIndexDownloader struct { - codexClient CodexClientInterface - indexCid string - filePath string - mu sync.RWMutex // protects all fields below - datasetSize int64 // stores the dataset size from the manifest - bytesCompleted int64 // tracks download progress - downloadComplete bool // true when file is fully downloaded and renamed - downloadError error // stores the last error that occurred during manifest fetch or download - cancelChan <-chan struct{} // for cancellation support - logger *zap.Logger + codexClient CodexClientInterface + logger *zap.Logger } -// NewCodexIndexDownloader creates a new index downloader -func NewCodexIndexDownloader(codexClient CodexClientInterface, indexCid string, filePath string, cancelChan <-chan struct{}, logger *zap.Logger) *CodexIndexDownloader { +func NewCodexIndexDownloader(codexClient CodexClientInterface, logger *zap.Logger) *CodexIndexDownloader { return &CodexIndexDownloader{ codexClient: codexClient, - indexCid: indexCid, - filePath: filePath, - cancelChan: cancelChan, logger: logger, } } -// GotManifest returns a channel that is closed when the Codex manifest file -// for the configured CID is successfully fetched. On error, the channel is not closed -// (allowing timeout to handle failures). Check GetDatasetSize() > 0 to verify success. -func (d *CodexIndexDownloader) GotManifest() <-chan struct{} { +func (d *CodexIndexDownloader) DownloadIndexFileFromLocalNode( + ctx context.Context, + indexCid string, + output io.Writer, +) error { defer common.LogOnPanic() - ch := make(chan struct{}) - - // Create cancellable context - ctx, cancel := context.WithCancel(context.Background()) - - // Monitor for cancellation in separate goroutine - go func() { - defer common.LogOnPanic() - select { - case <-d.cancelChan: - cancel() // Cancel fetch immediately - case <-ctx.Done(): - // Context already cancelled, nothing to do - } - }() - - go func() { - defer common.LogOnPanic() - defer cancel() // Ensure context is cancelled when fetch completes or fails - - // Reset datasetSize to 0 to indicate no successful fetch yet - d.mu.Lock() - d.datasetSize = 0 - d.downloadError = nil - d.mu.Unlock() - - // Fetch manifest from Codex - manifest, err := d.codexClient.FetchManifestWithContext(ctx, d.indexCid) - if err != nil { - d.mu.Lock() - d.downloadError = err - d.mu.Unlock() - d.logger.Debug("[CODEX] failed to fetch manifest", - zap.String("indexCid", d.indexCid), - zap.Error(err)) - // Don't close channel on error - let timeout handle it - return - } - - // Verify that the CID matches our configured indexCid - if manifest.Cid != d.indexCid { - d.mu.Lock() - d.downloadError = fmt.Errorf("manifest CID mismatch: expected %s, got %s", d.indexCid, manifest.Cid) - d.mu.Unlock() - d.logger.Debug("[CODEX] manifest CID mismatch", - zap.String("expected", d.indexCid), - zap.String("got", manifest.Cid)) - // Don't close channel on error - let timeout handle it - return - } - // Store the dataset size for later use - this indicates success - d.datasetSize = int64(manifest.DatasetSize) + d.logger.Debug("[CODEX][download_index_file_from_local_node] downloading codex index file from local node", zap.String("indexCid", indexCid)) - // Success! Close the channel to signal completion - close(ch) - }() - - return ch -} - -// GetDatasetSize returns the dataset size from the last successfully fetched manifest -func (d *CodexIndexDownloader) GetDatasetSize() int64 { - d.mu.RLock() - defer d.mu.RUnlock() - return d.datasetSize + return d.codexClient.LocalDownloadWithContext(ctx, indexCid, output) } -// DownloadIndexFile starts downloading the index file from Codex and writes it to the configured file path -func (d *CodexIndexDownloader) DownloadIndexFile() { +func (d *CodexIndexDownloader) DownloadIndexFileFromNetwork( + ctx context.Context, + indexCid string, + output io.Writer, +) error { defer common.LogOnPanic() - // Reset progress counter and completion flag - d.mu.Lock() - d.bytesCompleted = 0 - d.downloadComplete = false - d.downloadError = nil - d.mu.Unlock() - - // Create cancellable context - ctx, cancel := context.WithCancel(context.Background()) - - // Monitor for cancellation in separate goroutine - go func() { - defer common.LogOnPanic() - select { - case <-d.cancelChan: - cancel() // Cancel download immediately - case <-ctx.Done(): - // Context already cancelled, nothing to do - } - }() - - // Start download in separate goroutine - go func() { - defer common.LogOnPanic() - defer cancel() // Ensure context is cancelled when download completes or fails - - // Create a temporary file in the same directory as the target file - // This ensures atomic rename works (same filesystem) - tmpFile, err := os.CreateTemp(filepath.Dir(d.filePath), ".codex-download-*.tmp") - if err != nil { - d.mu.Lock() - d.downloadError = fmt.Errorf("failed to create temporary file: %w", err) - d.mu.Unlock() - d.logger.Debug("[CODEX] failed to create temporary file", - zap.String("filePath", d.filePath), - zap.Error(err)) - return - } - tmpPath := tmpFile.Name() - defer func() { - tmpFile.Close() - // Clean up temporary file if it still exists (i.e., download failed) - os.Remove(tmpPath) - }() - - // Create a progress tracking writer - progressWriter := &progressWriter{ - writer: tmpFile, - completed: &d.bytesCompleted, - mu: &d.mu, - } - // Use CodexClient to download and stream to temporary file with context for cancellation - err = d.codexClient.DownloadWithContext(ctx, d.indexCid, progressWriter) - if err != nil { - d.mu.Lock() - d.downloadError = fmt.Errorf("failed to download index file: %w", err) - d.mu.Unlock() - d.logger.Debug("[CODEX] failed to download index file", - zap.String("indexCid", d.indexCid), - zap.String("filePath", d.filePath), - zap.String("tmpPath", tmpPath), - zap.Error(err)) - return - } + d.logger.Debug("[CODEX][download_index_file_from_network] downloading codex index file from network", zap.String("indexCid", indexCid)) - // Close the temporary file before renaming - if err := tmpFile.Close(); err != nil { - d.mu.Lock() - d.downloadError = fmt.Errorf("failed to close temporary file: %w", err) - d.mu.Unlock() - d.logger.Debug("[CODEX] failed to close temporary file", - zap.String("tmpPath", tmpPath), - zap.Error(err)) - return - } - - // Atomically rename temporary file to final destination - // This ensures we only have a complete file at filePath - if err := os.Rename(tmpPath, d.filePath); err != nil { - d.mu.Lock() - d.downloadError = fmt.Errorf("failed to rename temporary file to final destination: %w", err) - d.mu.Unlock() - d.logger.Debug("[CODEX] failed to rename temporary file to final destination", - zap.String("tmpPath", tmpPath), - zap.String("filePath", d.filePath), - zap.Error(err)) - return - } - - // Mark download as complete only after successful rename - d.mu.Lock() - d.downloadComplete = true - d.mu.Unlock() - }() -} - -// BytesCompleted returns the number of bytes downloaded so far -func (d *CodexIndexDownloader) BytesCompleted() int64 { - d.mu.RLock() - defer d.mu.RUnlock() - return d.bytesCompleted -} - -// IsDownloadComplete returns true when the file has been fully downloaded and saved to disk -func (d *CodexIndexDownloader) IsDownloadComplete() bool { - d.mu.RLock() - defer d.mu.RUnlock() - return d.downloadComplete -} - -// GetError returns the last error that occurred during manifest fetch or download, or nil if no error -func (d *CodexIndexDownloader) GetError() error { - d.mu.RLock() - defer d.mu.RUnlock() - return d.downloadError -} - -// Length returns the total dataset size (equivalent to torrent file length) -func (d *CodexIndexDownloader) Length() int64 { - d.mu.RLock() - defer d.mu.RUnlock() - return d.datasetSize -} - -// progressWriter wraps an io.Writer to track bytes written -type progressWriter struct { - writer io.Writer - completed *int64 - mu *sync.RWMutex -} - -func (pw *progressWriter) Write(p []byte) (n int, err error) { - n, err = pw.writer.Write(p) - if n > 0 { - pw.mu.Lock() - *pw.completed += int64(n) - pw.mu.Unlock() - } - return n, err + return d.codexClient.DownloadWithContext(ctx, indexCid, output) } diff --git a/protocol/communities/codex_index_downloader_integration_test.go b/protocol/communities/codex_index_downloader_integration_test.go deleted file mode 100644 index ffaa0ed5108..00000000000 --- a/protocol/communities/codex_index_downloader_integration_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package communities_test - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.uber.org/zap" - - "github.com/status-im/status-go/protocol/communities" -) - -// CodexIndexDownloaderIntegrationTestSuite demonstrates testify's suite functionality for CodexIndexDownloader integration tests -// These tests exercise real network calls against a running Codex node. -// Required env vars (with defaults): -// - CODEX_HOST (default: localhost) -// - CODEX_API_PORT (default: 8001) -// - CODEX_TIMEOUT_MS (optional; default: 60000) -type CodexIndexDownloaderIntegrationTestSuite struct { - suite.Suite - testDir string - logger *zap.Logger -} - -// SetupSuite runs once before all tests in the suite -func (suite *CodexIndexDownloaderIntegrationTestSuite) SetupSuite() { - // Create logger - suite.logger, _ = zap.NewDevelopment() -} - -// SetupTest runs before each test -func (suite *CodexIndexDownloaderIntegrationTestSuite) SetupTest() { - // Create a temporary directory for test files - var err error - suite.testDir, err = os.MkdirTemp("", "codex-index-integration-*") - require.NoError(suite.T(), err) -} - -// TearDownTest runs after each test -func (suite *CodexIndexDownloaderIntegrationTestSuite) TearDownTest() { - // Clean up test directory - if suite.testDir != "" { - os.RemoveAll(suite.testDir) - } -} - -// TestCodexIndexDownloaderIntegrationTestSuite runs the integration test suite -func TestCodexIndexDownloaderIntegrationTestSuite(t *testing.T) { - suite.Run(t, new(CodexIndexDownloaderIntegrationTestSuite)) -} - -func (suite *CodexIndexDownloaderIntegrationTestSuite) TestIntegration_GotManifest() { - client := NewCodexClientTest(suite.T()) - - // Generate random payload to create a test file - payload := make([]byte, 2048) - _, err := rand.Read(payload) - require.NoError(suite.T(), err, "failed to generate random payload") - suite.T().Logf("Generated payload (first 32 bytes hex): %s", hex.EncodeToString(payload[:32])) - - // Upload the data to Codex - cid, err := client.Upload(bytes.NewReader(payload), "index-manifest-test.bin") - require.NoError(suite.T(), err, "upload failed") - suite.T().Logf("Upload successful, CID: %s", cid) - - // Clean up after test - defer func() { - if err := client.RemoveCid(cid); err != nil { - suite.T().Logf("Warning: Failed to remove CID %s: %v", cid, err) - } - }() - - // Create downloader with cancel channel - cancelChan := make(chan struct{}) - defer close(cancelChan) - - filePath := filepath.Join(suite.testDir, "test-index.bin") - downloader := communities.NewCodexIndexDownloader(client, cid, filePath, cancelChan, suite.logger) - - // Test GotManifest - manifestChan := downloader.GotManifest() - - // Wait for manifest to be fetched (with timeout) - select { - case <-manifestChan: - suite.T().Log("✅ Manifest fetched successfully") - case <-time.After(10 * time.Second): - suite.T().Fatal("Timeout waiting for manifest to be fetched") - } - - // Verify dataset size was recorded - datasetSize := downloader.GetDatasetSize() - assert.Greater(suite.T(), datasetSize, int64(0), "Dataset size should be greater than 0") - suite.T().Logf("Dataset size from manifest: %d bytes", datasetSize) - - // Verify Length returns the same value - assert.Equal(suite.T(), datasetSize, downloader.Length(), "Length() should return dataset size") - - // Verify no error occurred - assert.NoError(suite.T(), downloader.GetError(), "No error should occur during manifest fetch") - suite.T().Log("✅ No errors during manifest fetch") -} - -func (suite *CodexIndexDownloaderIntegrationTestSuite) TestIntegration_DownloadIndexFile() { - client := NewCodexClientTest(suite.T()) - - // Generate random payload - payload := make([]byte, 1024) - _, err := rand.Read(payload) - require.NoError(suite.T(), err, "failed to generate random payload") - suite.T().Logf("Generated payload (first 32 bytes hex): %s", hex.EncodeToString(payload[:32])) - - // Upload the data to Codex - cid, err := client.Upload(bytes.NewReader(payload), "index-download-test.bin") - require.NoError(suite.T(), err, "upload failed") - suite.T().Logf("Upload successful, CID: %s", cid) - - // Clean up after test - defer func() { - if err := client.RemoveCid(cid); err != nil { - suite.T().Logf("Warning: Failed to remove CID %s: %v", cid, err) - } - }() - - // Create downloader - cancelChan := make(chan struct{}) - defer close(cancelChan) - - filePath := filepath.Join(suite.testDir, "downloaded-index.bin") - downloader := communities.NewCodexIndexDownloader(client, cid, filePath, cancelChan, suite.logger) - - // First, get the manifest to know the expected size - manifestChan := downloader.GotManifest() - select { - case <-manifestChan: - suite.T().Log("Manifest fetched") - case <-time.After(10 * time.Second): - suite.T().Fatal("Timeout waiting for manifest") - } - - expectedSize := downloader.GetDatasetSize() - suite.T().Logf("Expected file size: %d bytes", expectedSize) - - // Verify no error from manifest fetch - assert.NoError(suite.T(), downloader.GetError(), "No error should occur during manifest fetch") - - // Start the download - downloader.DownloadIndexFile() - - // Wait for download to complete by monitoring progress - require.Eventually(suite.T(), func() bool { - return downloader.BytesCompleted() == expectedSize - }, 30*time.Second, 100*time.Millisecond, "Download should complete") - - suite.T().Logf("✅ Download completed: %d/%d bytes", downloader.BytesCompleted(), expectedSize) - - // Verify download is marked as complete - assert.True(suite.T(), downloader.IsDownloadComplete(), "Download should be marked as complete") - suite.T().Log("✅ Download marked as complete") - - // Verify no error occurred during download - assert.NoError(suite.T(), downloader.GetError(), "No error should occur during download") - suite.T().Log("✅ No errors during download") - - // Verify file exists and has correct size - stat, err := os.Stat(filePath) - require.NoError(suite.T(), err, "Downloaded file should exist") - assert.Equal(suite.T(), expectedSize, stat.Size(), "File size should match dataset size") - - // Verify file contents match original payload - downloadedData, err := os.ReadFile(filePath) - require.NoError(suite.T(), err, "Should be able to read downloaded file") - assert.Equal(suite.T(), payload, downloadedData, "Downloaded data should match original payload") - suite.T().Log("✅ Downloaded file contents verified") -} diff --git a/protocol/communities/codex_index_downloader_test.go b/protocol/communities/codex_index_downloader_test.go index d3e015220fb..18d3aca198b 100644 --- a/protocol/communities/codex_index_downloader_test.go +++ b/protocol/communities/codex_index_downloader_test.go @@ -1,15 +1,15 @@ package communities_test import ( + "bytes" "context" + "crypto/rand" + "encoding/hex" "errors" "io" - "os" - "path/filepath" "testing" "time" - "github.com/codex-storage/codex-go-bindings/codex" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -20,510 +20,451 @@ import ( mock_communities "github.com/status-im/status-go/protocol/communities/mock/communities" ) -// CodexIndexDownloaderTestSuite demonstrates testify's suite functionality for CodexIndexDownloader tests -type CodexIndexDownloaderTestSuite struct { +// ============================================================================ +// Suite 1: Real CodexClient Integration Tests +// ============================================================================ + +// CodexIndexDownloaderRealClientSuite tests successful index downloads +// using a real CodexClient instance against a running Codex node. +type CodexIndexDownloaderRealClientSuite struct { suite.Suite - ctrl *gomock.Controller - mockClient *mock_communities.MockCodexClientInterface - testDir string - cancelChan chan struct{} - logger *zap.Logger + client communities.CodexClientInterface + logger *zap.Logger + uploadedCIDs []string // Track uploaded CIDs for cleanup } -// SetupTest runs before each test method -func (suite *CodexIndexDownloaderTestSuite) SetupTest() { - suite.ctrl = gomock.NewController(suite.T()) - suite.mockClient = mock_communities.NewMockCodexClientInterface(suite.ctrl) - - // Create a temporary directory for test files - var err error - suite.testDir, err = os.MkdirTemp("", "codex-index-test-*") - require.NoError(suite.T(), err) - - // Create a fresh cancel channel for each test - suite.cancelChan = make(chan struct{}) - - // Use NOP logger for unit tests (no output noise) - suite.logger = zap.NewNop() +func (suite *CodexIndexDownloaderRealClientSuite) SetupSuite() { + suite.logger, _ = zap.NewDevelopment() } -// TearDownTest runs after each test method -func (suite *CodexIndexDownloaderTestSuite) TearDownTest() { - suite.ctrl.Finish() +func (suite *CodexIndexDownloaderRealClientSuite) SetupTest() { + suite.client = NewCodexClientTest(suite.T()) + suite.uploadedCIDs = []string{} +} - // Clean up cancel channel - check if it's still open before closing - if suite.cancelChan != nil { - select { - case <-suite.cancelChan: - // Already closed, do nothing - default: - // Still open, close it - close(suite.cancelChan) +func (suite *CodexIndexDownloaderRealClientSuite) TearDownTest() { + // Clean up all uploaded CIDs + for _, cid := range suite.uploadedCIDs { + if err := suite.client.RemoveCid(cid); err != nil { + suite.T().Logf("Warning: Failed to remove CID %s: %v", cid, err) } } - - // Clean up test directory - if suite.testDir != "" { - os.RemoveAll(suite.testDir) - } } -// TestCodexIndexDownloaderTestSuite runs the test suite -func TestCodexIndexDownloaderTestSuite(t *testing.T) { - suite.Run(t, new(CodexIndexDownloaderTestSuite)) +func TestCodexIndexDownloaderRealClientSuite(t *testing.T) { + suite.Run(t, new(CodexIndexDownloaderRealClientSuite)) } -// ==================== GotManifest Tests ==================== +// TestDownloadIndexFileFromLocalNode_Success tests successful download from local node +func (suite *CodexIndexDownloaderRealClientSuite) TestDownloadIndexFileFromLocalNode_Success() { + // Arrange: Upload test data to Codex + payload := make([]byte, 2048) + _, err := rand.Read(payload) + require.NoError(suite.T(), err, "failed to generate random payload") + suite.T().Logf("Generated payload (first 32 bytes hex): %s", hex.EncodeToString(payload[:32])) -func (suite *CodexIndexDownloaderTestSuite) TestGotManifest_SuccessClosesChannel() { - testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "index.bin") + cid, err := suite.client.Upload(bytes.NewReader(payload), "index-test.bin") + require.NoError(suite.T(), err, "upload failed") + suite.T().Logf("Upload successful, CID: %s", cid) + suite.uploadedCIDs = append(suite.uploadedCIDs, cid) // Track for cleanup - // Setup mock to return a successful manifest - expectedManifest := codex.Manifest{ - Cid: testCid, - } - expectedManifest.DatasetSize = 1024 - expectedManifest.TreeCid = "zDvZRwzmTreeCID" - expectedManifest.BlockSize = 65536 + // Create downloader + downloader := communities.NewCodexIndexDownloader(suite.client, suite.logger) - suite.mockClient.EXPECT(). - FetchManifestWithContext(gomock.Any(), testCid). - Return(expectedManifest, nil) + // Act: Download the index file from local node + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + var output bytes.Buffer + err = downloader.DownloadIndexFileFromLocalNode(ctx, cid, &output) + + // Assert + require.NoError(suite.T(), err, "DownloadIndexFileFromLocalNode should succeed") + assert.Equal(suite.T(), payload, output.Bytes(), "Downloaded data should match uploaded data") + suite.T().Logf("✅ Successfully downloaded %d bytes from local node", output.Len()) +} + +// TestDownloadIndexFileFromNetwork_Success tests successful download from network +func (suite *CodexIndexDownloaderRealClientSuite) TestDownloadIndexFileFromNetwork_Success() { + // Arrange: Upload test data to Codex + payload := make([]byte, 1024) + _, err := rand.Read(payload) + require.NoError(suite.T(), err, "failed to generate random payload") + suite.T().Logf("Generated payload (first 32 bytes hex): %s", hex.EncodeToString(payload[:32])) + + cid, err := suite.client.Upload(bytes.NewReader(payload), "network-index-test.bin") + require.NoError(suite.T(), err, "upload failed") + suite.T().Logf("Upload successful, CID: %s", cid) + suite.uploadedCIDs = append(suite.uploadedCIDs, cid) // Track for cleanup // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Call GotManifest - manifestChan := downloader.GotManifest() - - // Wait for channel to close (with timeout) - select { - case <-manifestChan: - // Success - channel closed as expected - suite.T().Log("✅ GotManifest channel closed successfully") - case <-time.After(1 * time.Second): - suite.T().Fatal("Timeout waiting for GotManifest channel to close") - } + downloader := communities.NewCodexIndexDownloader(suite.client, suite.logger) - // Verify dataset size was recorded - assert.Equal(suite.T(), int64(1024), downloader.GetDatasetSize(), "Dataset size should be recorded") + // Act: Download the index file from network + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() - // Verify no error was recorded - assert.NoError(suite.T(), downloader.GetError(), "No error should be recorded on success") + var output bytes.Buffer + err = downloader.DownloadIndexFileFromNetwork(ctx, cid, &output) + + // Assert + require.NoError(suite.T(), err, "DownloadIndexFileFromNetwork should succeed") + assert.Equal(suite.T(), payload, output.Bytes(), "Downloaded data should match uploaded data") + suite.T().Logf("✅ Successfully downloaded %d bytes from network", output.Len()) } -func (suite *CodexIndexDownloaderTestSuite) TestGotManifest_ErrorDoesNotCloseChannel() { - testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "index.bin") +// TestDownloadIndexFileFromLocalNode_LargeFile tests downloading a larger file +func (suite *CodexIndexDownloaderRealClientSuite) TestDownloadIndexFileFromLocalNode_LargeFile() { + // Arrange: Upload a larger file (1MB) + payload := make([]byte, 1024*1024) + _, err := rand.Read(payload) + require.NoError(suite.T(), err, "failed to generate random payload") + suite.T().Logf("Generated large payload: %d bytes", len(payload)) - // Setup mock to return an error - suite.mockClient.EXPECT(). - FetchManifestWithContext(gomock.Any(), testCid). - Return(codex.Manifest{}, errors.New("fetch error")) + cid, err := suite.client.Upload(bytes.NewReader(payload), "large-index.bin") + require.NoError(suite.T(), err, "upload failed") + suite.T().Logf("Upload successful, CID: %s", cid) + suite.uploadedCIDs = append(suite.uploadedCIDs, cid) // Track for cleanup // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Call GotManifest - manifestChan := downloader.GotManifest() - - // Channel should NOT close on error - select { - case <-manifestChan: - suite.T().Fatal("GotManifest channel should NOT close on error") - case <-time.After(200 * time.Millisecond): - // Expected - channel did not close - suite.T().Log("✅ GotManifest channel did not close on error (as expected)") - } + downloader := communities.NewCodexIndexDownloader(suite.client, suite.logger) - // Verify dataset size was NOT recorded (should be 0) - assert.Equal(suite.T(), int64(0), downloader.GetDatasetSize(), "Dataset size should be 0 on error") + // Act: Download the large file + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() - // Verify download is not complete - assert.False(suite.T(), downloader.IsDownloadComplete(), "Download should not be complete on error") + var output bytes.Buffer + err = downloader.DownloadIndexFileFromLocalNode(ctx, cid, &output) - // Verify error was recorded - assert.Error(suite.T(), downloader.GetError(), "Error should be recorded") - assert.Contains(suite.T(), downloader.GetError().Error(), "fetch error", "Error message should contain fetch error") - suite.T().Log("✅ Error was recorded correctly") + // Assert + require.NoError(suite.T(), err, "DownloadIndexFileFromLocalNode should succeed for large file") + assert.Equal(suite.T(), len(payload), output.Len(), "Downloaded size should match uploaded size") + assert.Equal(suite.T(), payload, output.Bytes(), "Downloaded data should match uploaded data") + suite.T().Logf("✅ Successfully downloaded large file: %d bytes", output.Len()) } -func (suite *CodexIndexDownloaderTestSuite) TestGotManifest_CidMismatchDoesNotCloseChannel() { - testCid := "zDvZRwzmTestCID123" - differentCid := "zDvZRwzmDifferentCID456" - filePath := filepath.Join(suite.testDir, "index.bin") - - // Setup mock to return a manifest with different CID - mismatchedManifest := codex.Manifest{ - Cid: differentCid, // Different CID! - } - mismatchedManifest.DatasetSize = 1024 +// ============================================================================ +// Suite 2: Mock CodexClient Tests (Errors and Cancellations) +// ============================================================================ - suite.mockClient.EXPECT(). - FetchManifestWithContext(gomock.Any(), testCid). - Return(mismatchedManifest, nil) +// CodexIndexDownloaderMockClientSuite tests error handling and cancellations +// using a mocked CodexClient interface. +type CodexIndexDownloaderMockClientSuite struct { + suite.Suite + ctrl *gomock.Controller + mockClient *mock_communities.MockCodexClientInterface + logger *zap.Logger +} - // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Call GotManifest - manifestChan := downloader.GotManifest() - - // Channel should NOT close on CID mismatch - select { - case <-manifestChan: - suite.T().Fatal("GotManifest channel should NOT close on CID mismatch") - case <-time.After(200 * time.Millisecond): - // Expected - channel did not close - suite.T().Log("✅ GotManifest channel did not close on CID mismatch (as expected)") - } +func (suite *CodexIndexDownloaderMockClientSuite) SetupSuite() { + suite.logger = zap.NewNop() // Use NOP logger for unit tests +} - // Verify dataset size was NOT recorded (should be 0) - assert.Equal(suite.T(), int64(0), downloader.GetDatasetSize(), "Dataset size should be 0 on CID mismatch") +func (suite *CodexIndexDownloaderMockClientSuite) SetupTest() { + suite.ctrl = gomock.NewController(suite.T()) + suite.mockClient = mock_communities.NewMockCodexClientInterface(suite.ctrl) +} - // Verify download is not complete - assert.False(suite.T(), downloader.IsDownloadComplete(), "Download should not be complete on CID mismatch") +func (suite *CodexIndexDownloaderMockClientSuite) TearDownTest() { + suite.ctrl.Finish() +} - // Verify error was recorded - assert.Error(suite.T(), downloader.GetError(), "Error should be recorded for CID mismatch") - assert.Contains(suite.T(), downloader.GetError().Error(), "CID mismatch", "Error message should mention CID mismatch") - suite.T().Log("✅ Error was recorded for CID mismatch") +func TestCodexIndexDownloaderMockClientSuite(t *testing.T) { + suite.Run(t, new(CodexIndexDownloaderMockClientSuite)) } -func (suite *CodexIndexDownloaderTestSuite) TestGotManifest_Cancellation() { +// TestDownloadIndexFileFromLocalNode_ContextCancellation tests cancellation during local download +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromLocalNode_ContextCancellation() { + // Arrange testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "index.bin") + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) - // Setup mock with DoAndReturn to simulate slow response and check for cancellation - fetchCalled := make(chan struct{}) + // Setup mock to simulate slow download that respects context cancellation + downloadStarted := make(chan struct{}) suite.mockClient.EXPECT(). - FetchManifestWithContext(gomock.Any(), testCid). - DoAndReturn(func(ctx context.Context, cid string) (codex.Manifest, error) { - close(fetchCalled) // Signal that fetch was called - + LocalDownloadWithContext(gomock.Any(), testCid, gomock.Any()). + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + close(downloadStarted) // Wait for context cancellation <-ctx.Done() - return codex.Manifest{}, ctx.Err() + return ctx.Err() }) - // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Call GotManifest - manifestChan := downloader.GotManifest() + // Act: Start download and cancel context + ctx, cancel := context.WithCancel(context.Background()) - // Wait for FetchManifestWithContext to be called - select { - case <-fetchCalled: - suite.T().Log("FetchManifestWithContext was called") - case <-time.After(1 * time.Second): - suite.T().Fatal("Timeout waiting for FetchManifestWithContext to be called") - } + var output bytes.Buffer + errChan := make(chan error, 1) + go func() { + errChan <- downloader.DownloadIndexFileFromLocalNode(ctx, testCid, &output) + }() - // Now trigger cancellation - close(suite.cancelChan) - - // Channel should NOT close on cancellation - select { - case <-manifestChan: - suite.T().Fatal("GotManifest channel should NOT close on cancellation") - case <-time.After(200 * time.Millisecond): - // Expected - channel did not close - suite.T().Log("✅ GotManifest was cancelled and channel did not close (as expected)") - } + // Wait for download to start + <-downloadStarted - // Verify dataset size was NOT recorded - assert.Equal(suite.T(), int64(0), downloader.GetDatasetSize(), "Dataset size should be 0 on cancellation") + // Cancel the context + cancel() - // Verify download is not complete - assert.False(suite.T(), downloader.IsDownloadComplete(), "Download should not be complete on cancellation") + // Wait for download to complete + err := <-errChan - // Verify error was recorded (context cancellation) - assert.Error(suite.T(), downloader.GetError(), "Error should be recorded for cancellation") - assert.ErrorIs(suite.T(), downloader.GetError(), context.Canceled, "Error should be context.Canceled") - suite.T().Log("✅ Cancellation error was recorded correctly") + // Assert + require.Error(suite.T(), err, "Should return error on cancellation") + assert.ErrorIs(suite.T(), err, context.Canceled, "Error should be context.Canceled") + suite.T().Log("✅ Correctly handled context cancellation") } -func (suite *CodexIndexDownloaderTestSuite) TestGotManifest_RecordsDatasetSize() { +// TestDownloadIndexFileFromLocalNode_DownloadError tests error during local download +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromLocalNode_DownloadError() { + // Arrange testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "index.bin") - expectedSize := 2048 - - // Setup mock to return a manifest with specific dataset size - expectedManifest := codex.Manifest{ - Cid: testCid, - } - expectedManifest.DatasetSize = expectedSize - expectedManifest.TreeCid = "zDvZRwzmTreeCID" + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) + expectedError := errors.New("local download failed: network error") suite.mockClient.EXPECT(). - FetchManifestWithContext(gomock.Any(), testCid). - Return(expectedManifest, nil) - - // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Initially, dataset size should be 0 - assert.Equal(suite.T(), int64(0), downloader.GetDatasetSize(), "Initial dataset size should be 0") - - // Call GotManifest - manifestChan := downloader.GotManifest() - - // Wait for channel to close - select { - case <-manifestChan: - suite.T().Log("GotManifest completed successfully") - case <-time.After(1 * time.Second): - suite.T().Fatal("Timeout waiting for GotManifest to complete") - } - - // Verify dataset size was recorded correctly - assert.Equal(suite.T(), int64(expectedSize), downloader.GetDatasetSize(), "Dataset size should match manifest") - suite.T().Logf("✅ Dataset size correctly recorded: %d", downloader.GetDatasetSize()) - - // Verify no error was recorded - assert.NoError(suite.T(), downloader.GetError(), "No error should be recorded on success") + LocalDownloadWithContext(gomock.Any(), testCid, gomock.Any()). + Return(expectedError) + + // Act + ctx := context.Background() + var output bytes.Buffer + err := downloader.DownloadIndexFileFromLocalNode(ctx, testCid, &output) + + // Assert + require.Error(suite.T(), err, "Should return error on download failure") + assert.Equal(suite.T(), expectedError, err, "Error should match expected error") + assert.Zero(suite.T(), output.Len(), "Output should be empty on error") + suite.T().Log("✅ Correctly propagated download error") } -// ==================== DownloadIndexFile Tests ==================== +// TestDownloadIndexFileFromNetwork_ContextCancellation tests cancellation during network download +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromNetwork_ContextCancellation() { + // Arrange + testCid := "zDvZRwzmTestCID456" + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) -func (suite *CodexIndexDownloaderTestSuite) TestDownloadIndexFile_StoresFileCorrectly() { - testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "downloaded-index.bin") - testData := []byte("test index file content with some data") - - // Setup mock to write test data to the provided writer + downloadStarted := make(chan struct{}) suite.mockClient.EXPECT(). DownloadWithContext(gomock.Any(), testCid, gomock.Any()). - DoAndReturn(func(ctx context.Context, cid string, w io.Writer) error { - _, err := w.Write(testData) - return err + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + close(downloadStarted) + <-ctx.Done() + return ctx.Err() }) - // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) + // Act + ctx, cancel := context.WithCancel(context.Background()) - // Start download - downloader.DownloadIndexFile() + var output bytes.Buffer + errChan := make(chan error, 1) + go func() { + errChan <- downloader.DownloadIndexFileFromNetwork(ctx, testCid, &output) + }() - // Wait for download to complete (check bytes completed first, then file existence) - require.Eventually(suite.T(), func() bool { - // First check: all bytes downloaded - if downloader.BytesCompleted() != int64(len(testData)) { - return false - } - // Second check: download marked as complete (file renamed) - if !downloader.IsDownloadComplete() { - return false - } - // Third check: file actually exists with correct size - stat, err := os.Stat(filePath) - if err != nil { - return false - } - return stat.Size() == int64(len(testData)) - }, 2*time.Second, 50*time.Millisecond, "File should be fully downloaded and saved") + <-downloadStarted + cancel() + err := <-errChan - // Verify file contents - actualData, err := os.ReadFile(filePath) - require.NoError(suite.T(), err, "Should be able to read downloaded file") - assert.Equal(suite.T(), testData, actualData, "File contents should match") - suite.T().Logf("✅ File downloaded successfully to: %s", filePath) + // Assert + require.Error(suite.T(), err, "Should return error on cancellation") + assert.ErrorIs(suite.T(), err, context.Canceled, "Error should be context.Canceled") + suite.T().Log("✅ Correctly handled network download cancellation") +} - // Verify download is complete - assert.True(suite.T(), downloader.IsDownloadComplete(), "Download should be complete") +// TestDownloadIndexFileFromNetwork_NetworkError tests network error handling +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromNetwork_NetworkError() { + // Arrange + testCid := "zDvZRwzmTestCID789" + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) + expectedError := errors.New("network download failed: connection timeout") - // Verify no error was recorded - assert.NoError(suite.T(), downloader.GetError(), "No error should be recorded on successful download") + suite.mockClient.EXPECT(). + DownloadWithContext(gomock.Any(), testCid, gomock.Any()). + Return(expectedError) + + // Act + ctx := context.Background() + var output bytes.Buffer + err := downloader.DownloadIndexFileFromNetwork(ctx, testCid, &output) + + // Assert + require.Error(suite.T(), err, "Should return error on network failure") + assert.Equal(suite.T(), expectedError, err, "Error should match expected error") + assert.Zero(suite.T(), output.Len(), "Output should be empty on error") + suite.T().Log("✅ Correctly propagated network error") } -func (suite *CodexIndexDownloaderTestSuite) TestDownloadIndexFile_TracksProgress() { - testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "progress-test.bin") - testData := []byte("0123456789") // 10 bytes +// TestDownloadIndexFileFromNetwork_TimeoutError tests timeout handling +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromNetwork_TimeoutError() { + // Arrange + testCid := "zDvZRwzmTestCIDTimeout" + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) - // Setup mock to write test data in chunks suite.mockClient.EXPECT(). DownloadWithContext(gomock.Any(), testCid, gomock.Any()). - DoAndReturn(func(ctx context.Context, cid string, w io.Writer) error { - // Write in 2-byte chunks to simulate streaming - for i := 0; i < len(testData); i += 2 { - end := i + 2 - if end > len(testData) { - end = len(testData) - } - _, err := w.Write(testData[i:end]) - if err != nil { - return err - } - time.Sleep(10 * time.Millisecond) // Small delay to allow progress tracking + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + // Simulate slow download that exceeds timeout + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(10 * time.Second): + return nil } - return nil }) - // Create downloader and set dataset size - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Start download - downloader.DownloadIndexFile() + // Act: Use a very short timeout + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() - // Initially bytes completed should be 0 - assert.Equal(suite.T(), int64(0), downloader.BytesCompleted(), "Initial bytes completed should be 0") - - // Wait for some progress - require.Eventually(suite.T(), func() bool { - return downloader.BytesCompleted() > 0 - }, 1*time.Second, 20*time.Millisecond, "Progress should increase") - - suite.T().Logf("Progress observed: %d bytes", downloader.BytesCompleted()) - - // Wait for download to complete - require.Eventually(suite.T(), func() bool { - return downloader.BytesCompleted() == int64(len(testData)) - }, 2*time.Second, 50*time.Millisecond, "Should download all bytes") + var output bytes.Buffer + err := downloader.DownloadIndexFileFromNetwork(ctx, testCid, &output) - assert.Equal(suite.T(), int64(len(testData)), downloader.BytesCompleted(), "All bytes should be downloaded") - suite.T().Logf("✅ Download progress tracked correctly: %d/%d bytes", downloader.BytesCompleted(), len(testData)) - - // Verify download is complete - assert.True(suite.T(), downloader.IsDownloadComplete(), "Download should be complete") - - // Verify no error was recorded - assert.NoError(suite.T(), downloader.GetError(), "No error should be recorded on successful download") + // Assert + require.Error(suite.T(), err, "Should return error on timeout") + assert.ErrorIs(suite.T(), err, context.DeadlineExceeded, "Error should be context.DeadlineExceeded") + suite.T().Log("✅ Correctly handled timeout") } -func (suite *CodexIndexDownloaderTestSuite) TestDownloadIndexFile_Cancellation() { - testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "cancel-test.bin") +// TestDownloadIndexFileFromNetwork_PartialWrite tests handling of partial write errors +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromNetwork_PartialWrite() { + // Arrange + testCid := "zDvZRwzmPartialWrite" + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) - // Setup mock with DoAndReturn to simulate slow download and check for cancellation - downloadStarted := make(chan struct{}) suite.mockClient.EXPECT(). DownloadWithContext(gomock.Any(), testCid, gomock.Any()). - DoAndReturn(func(ctx context.Context, cid string, w io.Writer) error { - close(downloadStarted) // Signal that download started - - // Simulate slow download with cancellation check - for range 100 { - select { - case <-ctx.Done(): - return ctx.Err() // Return cancellation error - default: - _, err := w.Write([]byte("x")) - if err != nil { - return err - } - time.Sleep(10 * time.Millisecond) - } + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + // Write some data first + _, err := output.Write([]byte("partial data")) + if err != nil { + return err } - return nil + // Then return an error + return errors.New("write interrupted") }) - // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Start download - downloader.DownloadIndexFile() - - // Wait for download to start - select { - case <-downloadStarted: - suite.T().Log("Download started") - case <-time.After(1 * time.Second): - suite.T().Fatal("Timeout waiting for download to start") - } - - // Trigger cancellation - close(suite.cancelChan) - suite.T().Log("Cancellation triggered") - - // Wait a bit for cancellation to take effect - time.Sleep(200 * time.Millisecond) + // Act + ctx := context.Background() + var output bytes.Buffer + err := downloader.DownloadIndexFileFromNetwork(ctx, testCid, &output) + + // Assert + require.Error(suite.T(), err, "Should return error on partial write") + assert.Contains(suite.T(), err.Error(), "write interrupted", "Error should indicate write interruption") + // Verify partial data was written + assert.Equal(suite.T(), "partial data", output.String(), "Partial data should be in output buffer") + suite.T().Log("✅ Correctly handled partial write error") +} - // Verify that download was stopped (bytes completed should be small) - bytesCompleted := downloader.BytesCompleted() - suite.T().Logf("✅ Download cancelled after %d bytes (should be < 100)", bytesCompleted) - assert.Less(suite.T(), bytesCompleted, int64(100), "Download should be cancelled before completing all 100 bytes") +// TestDownloadIndexFileFromLocalNode_EmptyOutput tests downloading empty content +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromLocalNode_EmptyOutput() { + // Arrange + testCid := "zDvZRwzmEmptyCID" + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) - // Verify download is not complete - assert.False(suite.T(), downloader.IsDownloadComplete(), "Download should not be complete on cancellation") + // Mock returns success but writes nothing (empty file) + suite.mockClient.EXPECT(). + LocalDownloadWithContext(gomock.Any(), testCid, gomock.Any()). + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + // Write nothing, just return success + return nil + }) - // Verify error was recorded (context cancellation) - assert.Error(suite.T(), downloader.GetError(), "Error should be recorded for cancellation") - assert.ErrorIs(suite.T(), downloader.GetError(), context.Canceled, "Error should be context.Canceled") - suite.T().Log("✅ Cancellation error was recorded correctly") + // Act + ctx := context.Background() + var output bytes.Buffer + err := downloader.DownloadIndexFileFromLocalNode(ctx, testCid, &output) - // Verify that the target file does NOT exist (atomic write should clean up temp file on cancellation) - _, err := os.Stat(filePath) - assert.True(suite.T(), os.IsNotExist(err), "Target file should not exist after cancellation") - suite.T().Log("✅ Target file does not exist after cancellation (temp file cleaned up)") - assert.False(suite.T(), downloader.IsDownloadComplete(), "✅ IsDownloadComplete should be false after cancellation") + // Assert + require.NoError(suite.T(), err, "Should succeed even with empty output") + assert.Zero(suite.T(), output.Len(), "Output should be empty") + suite.T().Log("✅ Correctly handled empty download") } -func (suite *CodexIndexDownloaderTestSuite) TestDownloadIndexFile_ErrorHandling() { - testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "error-test.bin") +// TestDownloadIndexFileFromNetwork_SuccessWithData tests successful network download with data +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromNetwork_SuccessWithData() { + // Arrange + testCid := "zDvZRwzmSuccessCID" + testData := []byte("test index file content") + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) - // Setup mock to return an error during download suite.mockClient.EXPECT(). DownloadWithContext(gomock.Any(), testCid, gomock.Any()). - Return(errors.New("download failed")) - - // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + _, err := output.Write(testData) + return err + }) - // Start download - downloader.DownloadIndexFile() + // Act + ctx := context.Background() + var output bytes.Buffer + err := downloader.DownloadIndexFileFromNetwork(ctx, testCid, &output) - // Wait a bit for the goroutine to run - time.Sleep(200 * time.Millisecond) + // Assert + require.NoError(suite.T(), err, "Should succeed") + assert.Equal(suite.T(), testData, output.Bytes(), "Output should match test data") + suite.T().Log("✅ Successfully downloaded data from network") +} - // Verify that no bytes were recorded on error - assert.Equal(suite.T(), int64(0), downloader.BytesCompleted(), "No bytes should be recorded on error") - suite.T().Log("✅ Error handling: no bytes recorded on download failure") +// TestDownloadIndexFileFromLocalNode_SuccessWithData tests successful local download with data +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromLocalNode_SuccessWithData() { + // Arrange + testCid := "zDvZRwzmLocalSuccessCID" + testData := []byte("local index file content") + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) - // Verify download is not complete - assert.False(suite.T(), downloader.IsDownloadComplete(), "Download should not be complete on error") + suite.mockClient.EXPECT(). + LocalDownloadWithContext(gomock.Any(), testCid, gomock.Any()). + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + _, err := output.Write(testData) + return err + }) - // Verify error was recorded - assert.Error(suite.T(), downloader.GetError(), "Error should be recorded") - assert.Contains(suite.T(), downloader.GetError().Error(), "download failed", "Error message should contain download failed") - suite.T().Log("✅ Error was recorded correctly") + // Act + ctx := context.Background() + var output bytes.Buffer + err := downloader.DownloadIndexFileFromLocalNode(ctx, testCid, &output) - // Verify that the target file does NOT exist (atomic write should clean up temp file) - _, err := os.Stat(filePath) - assert.True(suite.T(), os.IsNotExist(err), "Target file should not exist on download error") - suite.T().Log("✅ Target file does not exist after download error (temp file cleaned up)") - assert.False(suite.T(), downloader.IsDownloadComplete(), "✅ IsDownloadComplete should be false after cancellation") + // Assert + require.NoError(suite.T(), err, "Should succeed") + assert.Equal(suite.T(), testData, output.Bytes(), "Output should match test data") + suite.T().Log("✅ Successfully downloaded data from local node") } -func (suite *CodexIndexDownloaderTestSuite) TestLength_ReturnsDatasetSize() { - testCid := "zDvZRwzmTestCID123" - filePath := filepath.Join(suite.testDir, "index.bin") - expectedSize := 4096 +// TestDownloadIndexFileFromNetwork_MultipleChunks tests downloading data written in chunks +func (suite *CodexIndexDownloaderMockClientSuite) TestDownloadIndexFileFromNetwork_MultipleChunks() { + // Arrange + testCid := "zDvZRwzmChunkedCID" + chunk1 := []byte("chunk1") + chunk2 := []byte("chunk2") + chunk3 := []byte("chunk3") + expectedData := append(append(chunk1, chunk2...), chunk3...) - // Setup mock to return a manifest - expectedManifest := codex.Manifest{ - Cid: testCid, - } - expectedManifest.DatasetSize = expectedSize + downloader := communities.NewCodexIndexDownloader(suite.mockClient, suite.logger) suite.mockClient.EXPECT(). - FetchManifestWithContext(gomock.Any(), testCid). - Return(expectedManifest, nil) - - // Create downloader - downloader := communities.NewCodexIndexDownloader(suite.mockClient, testCid, filePath, suite.cancelChan, suite.logger) - - // Initially, Length should return 0 - assert.Equal(suite.T(), int64(0), downloader.Length(), "Initial length should be 0") + DownloadWithContext(gomock.Any(), testCid, gomock.Any()). + DoAndReturn(func(ctx context.Context, cid string, output io.Writer) error { + // Write in multiple chunks + if _, err := output.Write(chunk1); err != nil { + return err + } + if _, err := output.Write(chunk2); err != nil { + return err + } + if _, err := output.Write(chunk3); err != nil { + return err + } + return nil + }) - // Fetch manifest - manifestChan := downloader.GotManifest() - <-manifestChan + // Act + ctx := context.Background() + var output bytes.Buffer + err := downloader.DownloadIndexFileFromNetwork(ctx, testCid, &output) - // Now Length should return the dataset size - assert.Equal(suite.T(), int64(expectedSize), downloader.Length(), "Length should return dataset size") - suite.T().Logf("✅ Length() correctly returns dataset size: %d", downloader.Length()) + // Assert + require.NoError(suite.T(), err, "Should succeed with chunked writes") + assert.Equal(suite.T(), expectedData, output.Bytes(), "Output should contain all chunks") + suite.T().Log("✅ Successfully downloaded chunked data") } diff --git a/protocol/communities/codex_manager_archive_cancellation_test.go b/protocol/communities/codex_manager_archive_cancellation_test.go index bdabf2b123e..983c25298e9 100644 --- a/protocol/communities/codex_manager_archive_cancellation_test.go +++ b/protocol/communities/codex_manager_archive_cancellation_test.go @@ -4,7 +4,6 @@ import ( "context" "crypto/rand" "io" - "path/filepath" "testing" "time" @@ -50,10 +49,8 @@ func (s *MockCodexArchiveManagerSuite) buildManagers() (*communities.Manager, *c s.Require().NoError(err) s.Require().NoError(m.Start()) - rootDir := s.T().TempDir() codexConfig := ¶ms.CodexConfig{ - Enabled: true, - HistoryArchiveDataDir: filepath.Join(rootDir, "codex", "archivedata"), + Enabled: true, } amc := &communities.ArchiveManagerConfig{ @@ -88,9 +85,8 @@ func (s *MockCodexArchiveManagerSuite) TearDownTest() { s.Require().NoError(s.manager.Stop()) } -// TestMockDownloadCancellationBeforeManifestFetch tests cancellation before manifest is fetched -// This test is 100% deterministic - we control exactly when operations complete -func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationBeforeManifestFetch() { +// TestMockDownloadCancellationBeforeIndexIsDownloaded tests cancellation before index is downloaded +func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationBeforeIndexIsDownloaded() { // Subscribe to signals subscription := s.manager.Subscribe() @@ -98,26 +94,26 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationBeforeManifes communityID := types.HexBytes("mock-cancel-test-1") cancelChan := make(chan struct{}) - // Mock expectations: FetchManifestWithContext will be called but should be cancelled + // Mock expectations: DownloadWithContext may be called but should be cancelled immediately s.mockCodex.EXPECT(). - FetchManifestWithContext(gomock.Any(), indexCid). - DoAndReturn(func(ctx context.Context, cid string) (codex.Manifest, error) { + DownloadWithContext(gomock.Any(), indexCid, gomock.Any()). + DoAndReturn(func(ctx context.Context, cid string, output any) error { // Block until context is cancelled <-ctx.Done() - return codex.Manifest{}, ctx.Err() + return ctx.Err() }). MaxTimes(1) // May or may not be called depending on timing // Track signals - manifestFetchedReceived := false + indexDownloadCompletedReceived := false signalDone := make(chan struct{}) go func() { timeout := time.After(5 * time.Second) for { select { case event := <-subscription: - if event.ManifestFetchedSignal != nil { - manifestFetchedReceived = true + if event.IndexDownloadCompletedSignal != nil { + indexDownloadCompletedReceived = true } case <-timeout: close(signalDone) @@ -144,12 +140,12 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationBeforeManifes close(signalDone) time.Sleep(50 * time.Millisecond) - s.Require().False(manifestFetchedReceived, "ManifestFetchedSignal should not be received when cancelled early") + s.Require().False(indexDownloadCompletedReceived, "IndexDownloadCompletedSignal should not be received when cancelled early") s.T().Logf("✓ Mock test: Early cancellation verified with zero CodexClient calls") } // TestMockDownloadCancellationDuringIndexDownload tests cancellation during index download -// Uses mock to control exact timing of manifest fetch completion +// Uses mock to control exact timing of index download completion func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringIndexDownload() { subscription := s.manager.Subscribe() @@ -157,44 +153,31 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringIndexDo _, err := rand.Read(archiveData) s.Require().NoError(err) - archiveCid := "test-archive-cid-def456" + // archiveCid := "test-archive-cid-def456" indexCid := "test-index-cid-uvw123" - index := &protobuf.CodexWakuMessageArchiveIndex{ - Archives: map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata{ - "test-hash-large": { - Cid: archiveCid, - Metadata: &protobuf.WakuMessageArchiveMetadata{ - From: 1000, - To: 2000, - }, - }, - }, - } + // index := &protobuf.CodexWakuMessageArchiveIndex{ + // Archives: map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata{ + // "test-hash-large": { + // Cid: archiveCid, + // Metadata: &protobuf.WakuMessageArchiveMetadata{ + // From: 1000, + // To: 2000, + // }, + // }, + // }, + // } - codexIndexBytes, err := proto.Marshal(index) - s.Require().NoError(err) + // _ = index // Index created but not used in this test (would be marshaled on successful download) communityID := types.HexBytes("mock-cancel-test-2") cancelChan := make(chan struct{}) - // Mock expectations: Manifest fetch succeeds, but index download never completes - manifest := codex.Manifest{ - Cid: indexCid, - DatasetSize: len(codexIndexBytes), - } - - // FetchManifestWithContext will succeed - s.mockCodex.EXPECT(). - FetchManifestWithContext(gomock.Any(), indexCid). - Return(manifest, nil). - Times(1) - - // DownloadWithContext will be called but blocked until cancelled + // Mock expectations: Index download never completes due to cancellation downloadStarted := make(chan struct{}) s.mockCodex.EXPECT(). DownloadWithContext(gomock.Any(), indexCid, gomock.Any()). - DoAndReturn(func(ctx context.Context, cid string, output interface{}) error { + DoAndReturn(func(ctx context.Context, cid string, output any) error { close(downloadStarted) // Block until context is cancelled <-ctx.Done() @@ -203,7 +186,6 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringIndexDo Times(1) // Track signals - manifestFetchedReceived := false indexDownloadCompletedReceived := false signalDone := make(chan struct{}) @@ -212,13 +194,8 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringIndexDo for { select { case event := <-subscription: - if event.ManifestFetchedSignal != nil { - manifestFetchedReceived = true - s.T().Logf("Received ManifestFetchedSignal - waiting for download to start before cancelling") - // Wait for download to actually start - <-downloadStarted - s.T().Logf("Download started, now cancelling") - close(cancelChan) + if event == nil { + continue } if event.IndexDownloadCompletedSignal != nil { indexDownloadCompletedReceived = true @@ -232,6 +209,13 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringIndexDo } }() + // Wait for download to start, then cancel + go func() { + <-downloadStarted + s.T().Logf("Download started, now cancelling") + close(cancelChan) + }() + // Set short timeout for test s.archiveManager.SetDownloadTimeout(1 * time.Second) @@ -245,14 +229,12 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringIndexDo time.Sleep(50 * time.Millisecond) // Verify signals - s.Require().True(manifestFetchedReceived, "Should have received ManifestFetchedSignal") - s.Require().False(indexDownloadCompletedReceived, "Should NOT have received IndexDownloadCompletedSignal") + s.Require().False(indexDownloadCompletedReceived, "Should NOT have received IndexDownloadCompletedSignal when download is cancelled") s.T().Logf("✓ Mock test: Index download cancellation verified with controlled timing") } // TestMockDownloadCancellationDuringArchiveDownload tests cancellation during archive downloads -// Mock allows us to control exactly when first archive completes func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringArchiveDownload() { subscription := s.manager.Subscribe() @@ -296,21 +278,10 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringArchive cancelChan := make(chan struct{}) // Mock expectations - manifest := codex.Manifest{ - Cid: indexCid, - DatasetSize: len(codexIndexBytes), - } - - // Manifest fetch succeeds - s.mockCodex.EXPECT(). - FetchManifestWithContext(gomock.Any(), indexCid). - Return(manifest, nil). - Times(1) - // Index download succeeds s.mockCodex.EXPECT(). DownloadWithContext(gomock.Any(), indexCid, gomock.Any()). - DoAndReturn(func(ctx context.Context, cid string, output interface{}) error { + DoAndReturn(func(ctx context.Context, cid string, output any) error { // Write the index bytes to whatever writer we receive if w, ok := output.(io.Writer); ok { _, _ = w.Write(codexIndexBytes) @@ -319,12 +290,10 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringArchive }). Times(1) - // First archive download succeeds, triggers cancellation - // firstArchiveDownloaded := make(chan struct{}) + // First archive download succeeds s.mockCodex.EXPECT(). TriggerDownloadWithContext(gomock.Any(), archives[0].cid). DoAndReturn(func(ctx context.Context, cid string) (codex.Manifest, error) { - // close(firstArchiveDownloaded) return codex.Manifest{Cid: cid, DatasetSize: len(archives[0].data)}, nil }). Times(1) @@ -372,6 +341,7 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringArchive indexDownloadCompletedReceived = true } if event.HistoryArchiveDownloadedSignal != nil { + s.T().Logf("Received HistoryArchiveDownloadedSignal for archive CID") archivesDownloaded++ if archivesDownloaded == 1 { // We received the signal, which means HasCid returned true and count was incremented. @@ -388,8 +358,8 @@ func (s *MockCodexArchiveManagerSuite) TestMockDownloadCancellationDuringArchive } }() - // Set short timeout for test - s.archiveManager.SetDownloadTimeout(1 * time.Second) + // Set longer timeout for test to avoid timeout issues + s.archiveManager.SetDownloadTimeout(5 * time.Second) // Start download taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) diff --git a/protocol/communities/codex_manager_archive_test.go b/protocol/communities/codex_manager_archive_test.go index 7bf8cc315a6..dbd6e14f130 100644 --- a/protocol/communities/codex_manager_archive_test.go +++ b/protocol/communities/codex_manager_archive_test.go @@ -2,6 +2,7 @@ package communities_test import ( "bytes" + "context" "crypto/ecdsa" "crypto/rand" "encoding/hex" @@ -10,7 +11,7 @@ import ( "time" "github.com/codex-storage/codex-go-bindings/codex" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "github.com/status-im/status-go/appdatabase" "github.com/status-im/status-go/crypto" @@ -39,8 +40,7 @@ type CodexArchiveManagerSuite struct { func buildCodexConfig(t *testing.T) *params.CodexConfig { rootDir := t.TempDir() return ¶ms.CodexConfig{ - Enabled: true, - HistoryArchiveDataDir: filepath.Join(rootDir, "codex", "archivedata"), + Enabled: true, CodexNodeConfig: codex.Config{ DataDir: filepath.Join(rootDir, "codex", "codexdata"), BlockRetries: 5, @@ -198,7 +198,6 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { receivedSignals := struct { downloadingStarted bool archiveDownloaded map[string]bool // hash -> received - seedingSignal bool }{ archiveDownloaded: make(map[string]bool), } @@ -228,13 +227,6 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { } } } - if event.HistoryArchivesSeedingSignal != nil { - receivedSignals.seedingSignal = true - s.T().Logf("Received HistoryArchivesSeedingSignal for community: %s, MagnetLink: %v, IndexCid: %v", - event.HistoryArchivesSeedingSignal.CommunityID, - event.HistoryArchivesSeedingSignal.MagnetLink, - event.HistoryArchivesSeedingSignal.IndexCid) - } case <-timeout: close(done) return @@ -268,7 +260,6 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { // Verify that all expected signals were received s.Require().True(receivedSignals.downloadingStarted, "Should have received DownloadingHistoryArchivesStartedSignal") - s.Require().True(receivedSignals.seedingSignal, "Should have received HistoryArchivesSeedingSignal") // Verify that we received download signals for all archives for _, archive := range archives { @@ -279,7 +270,9 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { s.T().Logf("All signals verified successfully!") // Verify that the index file exists and has correct content - loadedIndex, err := s.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.identity, communityID) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + loadedIndex, err := s.archiveManager.CodexLoadHistoryArchiveIndex(ctx, s.identity, communityID, cid, true) s.Require().NoError(err, "Failed to load index file from disk") s.Require().NotNil(loadedIndex, "Loaded index should not be nil") s.Require().Equal(len(archives), len(loadedIndex.Archives), "Loaded index should contain all archives") @@ -295,518 +288,6 @@ func (s *CodexArchiveManagerSuite) TestDownloadingArchivesFromCodex() { } s.T().Logf("Index file content verified successfully!") - - // Verify that the CID file exists and contains the correct CID - storedCid, err := s.archiveManager.GetHistoryArchiveIndexCid(communityID) - s.Require().NoError(err, "Failed to read CID file") - s.Require().Equal(cid, storedCid, "Stored CID should match the uploaded index CID") - - s.T().Logf("CID file content verified successfully! CID: %s", storedCid) -} - -func (s *CodexArchiveManagerSuite) TestDownloadCancellationBeforeManifestFetch() { - // Subscribe to signals - subscription := s.manager.Subscribe() - - // Create a single test archive - archiveData := make([]byte, 256) - _, err := rand.Read(archiveData) - s.Require().NoError(err, "Failed to generate random data") - - // Upload archive to Codex - archiveCid, err := s.codexClient.Upload(bytes.NewReader(archiveData), "test-archive.bin") - s.Require().NoError(err, "Failed to upload archive") - s.uploadedCIDs = append(s.uploadedCIDs, archiveCid) - - // Create and upload index - index := &protobuf.CodexWakuMessageArchiveIndex{ - Archives: map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata{ - "test-hash": { - Cid: archiveCid, - Metadata: &protobuf.WakuMessageArchiveMetadata{ - From: 1000, - To: 2000, - }, - }, - }, - } - - codexIndexBytes, err := proto.Marshal(index) - s.Require().NoError(err, "Failed to marshal index") - - indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) - s.Require().NoError(err, "Failed to upload index") - s.uploadedCIDs = append(s.uploadedCIDs, indexCid) - - communityID := types.HexBytes("cancel-test-community-1") - cancelChan := make(chan struct{}) - - // Track signals - downloadStartedReceived := false - manifestFetchedReceived := false - signalDone := make(chan struct{}) - doneMarker := make(chan struct{}) - go func() { - timeout := time.After(10 * time.Second) - for { - select { - case event := <-subscription: - if event.DownloadingHistoryArchivesStartedSignal != nil { - downloadStartedReceived = true - } - if event.ManifestFetchedSignal != nil { - manifestFetchedReceived = true - } - case <-timeout: - close(signalDone) - return - case <-signalDone: - doneMarker <- struct{}{} - return - } - } - }() - - // Cancel immediately before the download starts - close(cancelChan) - - taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) - s.Require().NoError(err, "Download should return without error on cancellation") - s.Require().NotNil(taskInfo, "Task info should not be nil") - s.Require().True(taskInfo.Cancelled, "Download should be marked as cancelled") - s.Require().Equal(0, taskInfo.TotalDownloadedArchivesCount, "No archives should be downloaded") - - close(signalDone) - - // Wait for signal goroutine to finish with timeout - select { - case <-doneMarker: - // Goroutine finished successfully - case <-time.After(200 * time.Millisecond): - s.T().Fatal("Timeout waiting for signal goroutine to finish") - } - - // Verify that neither signal was received - s.Require().False(downloadStartedReceived, "DownloadingHistoryArchivesStartedSignal should not be received when cancelled early") - s.Require().False(manifestFetchedReceived, "ManifestFetchedSignal should not be received when cancelled early") - - s.T().Logf("Early cancellation test passed successfully!") -} - -func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringIndexDownload() { - // Subscribe to signals - subscription := s.manager.Subscribe() - - // Create a test archive - archiveData := make([]byte, 1024*10) // 10KB - _, err := rand.Read(archiveData) - s.Require().NoError(err, "Failed to generate random data") - - // Upload archive to Codex - archiveCid, err := s.codexClient.Upload(bytes.NewReader(archiveData), "test-archive-large.bin") - s.Require().NoError(err, "Failed to upload archive") - s.uploadedCIDs = append(s.uploadedCIDs, archiveCid) - - // Create and upload index - index := &protobuf.CodexWakuMessageArchiveIndex{ - Archives: map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata{ - "test-hash-large": { - Cid: archiveCid, - Metadata: &protobuf.WakuMessageArchiveMetadata{ - From: 1000, - To: 2000, - }, - }, - }, - } - - codexIndexBytes, err := proto.Marshal(index) - s.Require().NoError(err, "Failed to marshal index") - - indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) - s.Require().NoError(err, "Failed to upload index") - s.uploadedCIDs = append(s.uploadedCIDs, indexCid) - - communityID := types.HexBytes("cancel-test-community-2") - cancelChan := make(chan struct{}) - - // Track signals - manifestFetchedReceived := false - indexDownloadCompletedReceived := false - downloadStartedReceived := false - signalDone := make(chan struct{}) - doneMarker := make(chan struct{}) - - go func() { - timeout := time.After(10 * time.Second) - for { - select { - case event := <-subscription: - if event.ManifestFetchedSignal != nil { - manifestFetchedReceived = true - s.T().Logf("Received ManifestFetchedSignal - now cancelling during index download") - // Cancel as soon as we get the manifest (before index download completes) - close(cancelChan) - } - if event.IndexDownloadCompletedSignal != nil { - indexDownloadCompletedReceived = true - } - if event.DownloadingHistoryArchivesStartedSignal != nil { - downloadStartedReceived = true - } - case <-timeout: - close(signalDone) - return - case <-signalDone: - doneMarker <- struct{}{} - return - } - } - }() - - // Start download in goroutine - resultChan := make(chan struct { - taskInfo *communities.HistoryArchiveDownloadTaskInfo - err error - }, 1) - - go func() { - taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) - resultChan <- struct { - taskInfo *communities.HistoryArchiveDownloadTaskInfo - err error - }{taskInfo, err} - }() - - result := <-resultChan - s.Require().NoError(result.err, "Download should return without error on cancellation") - s.Require().NotNil(result.taskInfo, "Task info should not be nil") - s.Require().True(result.taskInfo.Cancelled, "Download should be marked as cancelled") - - close(signalDone) - - // Wait for signal goroutine to finish with timeout - select { - case <-doneMarker: - // Goroutine finished successfully - case <-time.After(200 * time.Millisecond): - s.T().Fatal("Timeout waiting for signal goroutine to finish") - } - - // Verify signals - s.Require().True(manifestFetchedReceived, "Should have received ManifestFetchedSignal") - s.Require().False(indexDownloadCompletedReceived, "Should NOT have received IndexDownloadCompletedSignal (cancelled before completion)") - s.Require().False(downloadStartedReceived, "Should NOT have received DownloadingHistoryArchivesStartedSignal (cancelled before archives start)") - - s.T().Logf("Index download cancellation test passed! Cancelled deterministically after manifest fetch.") -} - -func (s *CodexArchiveManagerSuite) TestDownloadCancellationDuringArchiveDownload() { - // Subscribe to signals - subscription := s.manager.Subscribe() - - // Create multiple test archives - archives := []struct { - hash string - from uint64 - to uint64 - data []byte - }{ - {"cancel-archive-1", 1000, 2000, make([]byte, 1024*5)}, // 5KB - {"cancel-archive-2", 2000, 3000, make([]byte, 1024*5)}, - {"cancel-archive-3", 3000, 4000, make([]byte, 1024*5)}, - } - - // Generate and upload archives - archiveCIDs := make(map[string]string) - for i := range archives { - _, err := rand.Read(archives[i].data) - s.Require().NoError(err, "Failed to generate random data") - - cid, err := s.codexClient.Upload(bytes.NewReader(archives[i].data), archives[i].hash+".bin") - s.Require().NoError(err, "Failed to upload archive") - archiveCIDs[archives[i].hash] = cid - s.uploadedCIDs = append(s.uploadedCIDs, cid) - } - - // Create and upload index - index := &protobuf.CodexWakuMessageArchiveIndex{ - Archives: make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata), - } - - for _, archive := range archives { - index.Archives[archive.hash] = &protobuf.CodexWakuMessageArchiveIndexMetadata{ - Cid: archiveCIDs[archive.hash], - Metadata: &protobuf.WakuMessageArchiveMetadata{ - From: archive.from, - To: archive.to, - }, - } - } - - codexIndexBytes, err := proto.Marshal(index) - s.Require().NoError(err, "Failed to marshal index") - - indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) - s.Require().NoError(err, "Failed to upload index") - s.uploadedCIDs = append(s.uploadedCIDs, indexCid) - - communityID := types.HexBytes("cancel-test-community-3") - cancelChan := make(chan struct{}) - - // Track signals - downloadStartedReceived := false - indexDownloadCompletedReceived := false - archivesDownloaded := 0 - signalDone := make(chan struct{}) - doneMarker := make(chan struct{}) - - go func() { - timeout := time.After(15 * time.Second) - for { - select { - case event := <-subscription: - if event.DownloadingHistoryArchivesStartedSignal != nil { - downloadStartedReceived = true - s.T().Logf("Received DownloadingHistoryArchivesStartedSignal") - } - if event.IndexDownloadCompletedSignal != nil { - indexDownloadCompletedReceived = true - s.T().Logf("Received IndexDownloadCompletedSignal - waiting for first archive download before cancelling") - } - if event.HistoryArchiveDownloadedSignal != nil { - archivesDownloaded++ - s.T().Logf("Received HistoryArchiveDownloadedSignal (%d archives downloaded so far)", archivesDownloaded) - // Cancel after the first archive is downloaded - if archivesDownloaded == 1 { - s.T().Logf("Cancelling after first archive download") - close(cancelChan) - } - } - case <-timeout: - close(signalDone) - return - case <-signalDone: - doneMarker <- struct{}{} - return - } - } - }() - - // Start download in goroutine - resultChan := make(chan struct { - taskInfo *communities.HistoryArchiveDownloadTaskInfo - err error - }, 1) - - go func() { - taskInfo, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) - resultChan <- struct { - taskInfo *communities.HistoryArchiveDownloadTaskInfo - err error - }{taskInfo, err} - }() - - result := <-resultChan - s.Require().NoError(result.err, "Download should return without error on cancellation") - s.Require().NotNil(result.taskInfo, "Task info should not be nil") - s.Require().True(result.taskInfo.Cancelled, "Download should be marked as cancelled") - - close(signalDone) - - // Wait for signal goroutine to finish with timeout - select { - case <-doneMarker: - // Goroutine finished successfully - case <-time.After(200 * time.Millisecond): - s.T().Fatal("Timeout waiting for signal goroutine to finish") - } - - // Verify signals - s.Require().True(downloadStartedReceived, "Should have received DownloadingHistoryArchivesStartedSignal") - s.Require().True(indexDownloadCompletedReceived, "Should have received IndexDownloadCompletedSignal") - s.Require().GreaterOrEqual(archivesDownloaded, 1, "Should have downloaded at least 1 archive before cancellation (via signals)") - - s.T().Logf("Archive download cancellation test passed! Cancelled deterministically after downloading %d archive(s)", archivesDownloaded) - s.T().Logf("Task info: TotalArchivesCount=%d, TotalDownloadedArchivesCount=%d, Cancelled=%v", - result.taskInfo.TotalArchivesCount, - result.taskInfo.TotalDownloadedArchivesCount, - result.taskInfo.Cancelled) - - // Note: Due to parallel downloads, the TotalDownloadedArchivesCount in taskInfo might not match - // the number of signals received because cancellation can happen while downloads are in-flight. - // The important thing is that we successfully cancelled based on a signal and the Cancelled flag is set. - s.T().Logf("Signals received: %d archives downloaded, TaskInfo reports: %d archives", - archivesDownloaded, result.taskInfo.TotalDownloadedArchivesCount) -} - -func (s *CodexArchiveManagerSuite) TestHistoryArchivesSeedingSignalWhenAllArchivesExist() { - // Subscribe to signals before starting the test - subscription := s.manager.Subscribe() - - // Create test archive data and upload archives to Codex - archives := []struct { - hash string - from uint64 - to uint64 - data []byte - }{ - {"existing-archive-1", 1000, 2000, make([]byte, 512)}, - {"existing-archive-2", 2000, 3000, make([]byte, 768)}, - } - - // Generate random data for each archive - archiveCIDs := make(map[string]string) - for i := range archives { - if _, err := rand.Read(archives[i].data); err != nil { - s.T().Fatalf("Failed to generate random data for %s: %v", archives[i].hash, err) - } - s.T().Logf("Generated %s data (first 16 bytes hex): %s", - archives[i].hash, hex.EncodeToString(archives[i].data[:16])) - } - - // Upload all archives to Codex - for _, archive := range archives { - cid, err := s.codexClient.Upload(bytes.NewReader(archive.data), archive.hash+".bin") - require.NoError(s.T(), err, "Failed to upload %s", archive.hash) - - archiveCIDs[archive.hash] = cid - s.uploadedCIDs = append(s.uploadedCIDs, cid) - s.T().Logf("Uploaded %s to CID: %s", archive.hash, cid) - - // Verify upload succeeded - exists, err := s.codexClient.HasCid(cid) - require.NoError(s.T(), err, "Failed to check CID existence for %s", archive.hash) - require.True(s.T(), exists, "CID %s should exist after upload", cid) - } - - // Create archive index - index := &protobuf.CodexWakuMessageArchiveIndex{ - Archives: make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata), - } - - for _, archive := range archives { - cid := archiveCIDs[archive.hash] - index.Archives[archive.hash] = &protobuf.CodexWakuMessageArchiveIndexMetadata{ - Cid: cid, - Metadata: &protobuf.WakuMessageArchiveMetadata{ - From: archive.from, - To: archive.to, - }, - } - } - - // Upload archive index to codex - codexIndexBytes, err := proto.Marshal(index) - s.Require().NoError(err, "Failed to marshal index") - - indexCid, err := s.codexClient.UploadArchive(codexIndexBytes) - s.Require().NoError(err, "Failed to upload archive index to Codex") - s.Require().NotEmpty(indexCid, "Uploaded index CID should not be empty") - - s.T().Logf("Uploaded archive index to CID: %s", indexCid) - - communityID := types.HexBytes("existing-archives-test") - cancelChan := make(chan struct{}) - - // FIRST RUN: Download all archives normally - s.T().Logf("=== FIRST RUN: Downloading archives for the first time ===") - - taskInfo1, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan) - s.Require().NoError(err, "Failed to download archives on first run") - s.Require().NotNil(taskInfo1, "Download task info should not be nil") - s.Require().Equal(len(archives), taskInfo1.TotalArchivesCount, "Should report all archives") - s.Require().Equal(len(archives), taskInfo1.TotalDownloadedArchivesCount, "Should have downloaded all archives") - s.Require().False(taskInfo1.Cancelled, "Download should not be cancelled") - - // Verify that archives are stored in persistence - for _, archive := range archives { - exists, err := s.manager.GetPersistence().HasMessageArchiveID(communityID, archive.hash) - s.Require().NoError(err, "Failed to check archive ID %s in persistence", archive.hash) - s.Require().True(exists, "Archive hash %s should be stored in persistence", archive.hash) - } - - s.T().Logf("First run completed successfully, all archives downloaded and stored") - - // SECOND RUN: Download again with same index - should abort early and emit seeding signal - s.T().Logf("=== SECOND RUN: Re-downloading with same index (all archives exist) ===") - - // Create a fresh subscription for the second run to avoid picking up signals from the first run - subscription = s.manager.Subscribe() - - // Track received signals for second run - receivedSeedingSignal := false - receivedDownloadingStarted := false - receivedArchiveDownloaded := false - signalDone := make(chan struct{}) - doneMarker := make(chan struct{}) - - go func() { - timeout := time.After(10 * time.Second) - for { - select { - case event := <-subscription: - if event.HistoryArchivesSeedingSignal != nil { - s.T().Logf("Received HistoryArchivesSeedingSignal for community: %s, MagnetLink: %v, IndexCid: %v", - event.HistoryArchivesSeedingSignal.CommunityID, - event.HistoryArchivesSeedingSignal.MagnetLink, - event.HistoryArchivesSeedingSignal.IndexCid) - receivedSeedingSignal = true - - // Verify the signal has correct values - s.Require().Equal(communityID.String(), event.HistoryArchivesSeedingSignal.CommunityID, - "CommunityID should match") - s.Require().False(event.HistoryArchivesSeedingSignal.MagnetLink, - "MagnetLink should be false") - s.Require().True(event.HistoryArchivesSeedingSignal.IndexCid, - "IndexCid should be true") - } - if event.DownloadingHistoryArchivesStartedSignal != nil { - s.T().Logf("WARNING: Received unexpected DownloadingHistoryArchivesStartedSignal") - receivedDownloadingStarted = true - } - if event.HistoryArchiveDownloadedSignal != nil { - s.T().Logf("WARNING: Received unexpected HistoryArchiveDownloadedSignal") - receivedArchiveDownloaded = true - } - case <-timeout: - close(signalDone) - return - case <-signalDone: - doneMarker <- struct{}{} - return - } - } - }() - - // Create a new cancel channel for the second run - cancelChan2 := make(chan struct{}) - taskInfo2, err := s.archiveManager.DownloadHistoryArchivesByIndexCid(communityID, indexCid, cancelChan2) - s.Require().NoError(err, "Second download should succeed without error") - s.Require().NotNil(taskInfo2, "Task info should not be nil") - - // Stop signal collection - close(signalDone) - - // Wait for signal goroutine to finish with timeout - select { - case <-doneMarker: - // Goroutine finished successfully - case <-time.After(200 * time.Millisecond): - s.T().Fatal("Timeout waiting for signal goroutine to finish") - } - - // Verify task info for second run - s.Require().Equal(len(archives), taskInfo2.TotalArchivesCount, "Should report all archives") - s.Require().Equal(len(archives), taskInfo2.TotalDownloadedArchivesCount, "Should report all archives as already downloaded") - s.Require().False(taskInfo2.Cancelled, "Download should not be marked as cancelled") - - // Verify that the seeding signal was received and no download signals were sent - s.Require().True(receivedSeedingSignal, "Should have received HistoryArchivesSeedingSignal when all archives exist") - s.Require().False(receivedDownloadingStarted, "Should NOT have received DownloadingHistoryArchivesStartedSignal when aborting") - s.Require().False(receivedArchiveDownloaded, "Should NOT have received HistoryArchiveDownloadedSignal when aborting") - - s.T().Logf("Second run completed successfully - seeding signal emitted correctly when all archives exist!") } // Run the integration test suite diff --git a/protocol/communities/codex_testutil_test.go b/protocol/communities/codex_testutil_test.go index 03db7613b83..413abbd350d 100644 --- a/protocol/communities/codex_testutil_test.go +++ b/protocol/communities/codex_testutil_test.go @@ -12,8 +12,7 @@ import ( func NewCodexClientTest(t *testing.T) communities.CodexClientInterface { client, err := communities.NewCodexClient(params.CodexConfig{ - Enabled: true, - HistoryArchiveDataDir: filepath.Join(t.TempDir(), "codex", "archivedata"), + Enabled: true, CodexNodeConfig: codex.Config{ DataDir: filepath.Join(t.TempDir(), "codex", "codexdata"), LogFormat: codex.LogFormatNoColors, diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index 2334b1faaa2..3138c4ae45f 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -192,17 +192,12 @@ type HistoryArchiveDownloadTaskInfo struct { type ArchiveFileService interface { CreateHistoryArchiveTorrentFromMessages(communityID types.HexBytes, messages []*messagingtypes.ReceivedMessage, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) CreateHistoryArchiveTorrentFromDB(communityID types.HexBytes, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) - CreateHistoryArchiveCodexFromMessages(communityID types.HexBytes, messages []*messagingtypes.ReceivedMessage, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) - CreateHistoryArchiveCodexFromDB(communityID types.HexBytes, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) SaveMessageArchiveID(communityID types.HexBytes, hash string) error GetMessageArchiveIDsToImport(communityID types.HexBytes) ([]string, error) SetMessageArchiveIDImported(communityID types.HexBytes, hash string, imported bool) error ExtractMessagesFromHistoryArchive(communityID types.HexBytes, archiveID string) ([]*protobuf.WakuMessage, error) - ExtractMessagesFromCodexHistoryArchive(communityID types.HexBytes, archiveID string) ([]*protobuf.WakuMessage, error) GetHistoryArchiveMagnetlink(communityID types.HexBytes) (string, error) - GetHistoryArchiveIndexCid(communityID types.HexBytes) (string, error) LoadHistoryArchiveIndexFromFile(myKey *ecdsa.PrivateKey, communityID types.HexBytes) (*protobuf.WakuMessageArchiveIndex, error) - CodexLoadHistoryArchiveIndexFromFile(myKey *ecdsa.PrivateKey, communityID types.HexBytes) (*protobuf.CodexWakuMessageArchiveIndex, error) } type ArchiveService interface { @@ -210,15 +205,10 @@ type ArchiveService interface { SetOnline(bool) SetTorrentConfig(*params.TorrentConfig) - SetCodexConfig(*params.CodexConfig) StartTorrentClient() error - StartCodexClient() error - SetCodexClient(client CodexClientInterface) - GetCodexClient() CodexClientInterface Stop() error IsReady() bool IsTorrentReady() bool - IsCodexReady() bool GetCommunityChatsFilters(communityID types.HexBytes) (messagingtypes.ChatFilters, error) GetCommunityChatsTopics(communityID types.HexBytes) ([]messagingtypes.ContentTopic, error) GetHistoryArchivePartitionStartTimestamp(communityID types.HexBytes) (uint64, error) @@ -226,18 +216,28 @@ type ArchiveService interface { StartHistoryArchiveTasksInterval(community *Community, interval time.Duration) StopHistoryArchiveTasksInterval(communityID types.HexBytes) SeedHistoryArchiveTorrent(communityID types.HexBytes) error - SeedHistoryArchiveIndexCid(communityID types.HexBytes) error UnseedHistoryArchiveTorrent(communityID types.HexBytes) - UnseedHistoryArchiveIndexCid(communityID types.HexBytes) IsSeedingHistoryArchiveTorrent(communityID types.HexBytes) bool - IsSeedingHistoryArchiveCodex(communityID types.HexBytes) bool GetHistoryArchiveDownloadTask(communityID string) *HistoryArchiveDownloadTask AddHistoryArchiveDownloadTask(communityID string, task *HistoryArchiveDownloadTask) DownloadHistoryArchivesByMagnetlink(communityID types.HexBytes, magnetlink string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) - DownloadHistoryArchivesByIndexCid(communityID types.HexBytes, indexCid string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) + PublishHistoryArchivesSeedingSignal(communityID types.HexBytes, magnetLink bool, indexCid bool) TorrentFileExists(communityID string) bool - CodexIndexCidFileExists(communityID types.HexBytes) bool GetDownloadedMessageArchiveIDs(communityID types.HexBytes) ([]string, error) + + SetCodexConfig(*params.CodexConfig) + SetCodexClient(client CodexClientInterface) + StartCodexClient() error + GetCodexClient() CodexClientInterface + IsCodexReady() bool + SeedHistoryArchiveIndexCid(communityID types.HexBytes, indexCid string) error + UnseedHistoryArchiveIndexCid(communityID types.HexBytes, indexCid string) + IsSeedingHistoryArchiveCodex(communityID types.HexBytes, indexCid string) bool + DownloadHistoryArchivesByIndexCid(communityID types.HexBytes, indexCid string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) + CreateHistoryArchiveCodexFromMessages(communityID types.HexBytes, messages []*messagingtypes.ReceivedMessage, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) + CreateHistoryArchiveCodexFromDB(communityID types.HexBytes, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) + ExtractMessagesFromCodexHistoryArchive(communityID types.HexBytes, archiveID string, codexIndex *protobuf.CodexWakuMessageArchiveIndex) ([]*protobuf.WakuMessage, error) + CodexLoadHistoryArchiveIndex(ctx context.Context, myKey *ecdsa.PrivateKey, communityID types.HexBytes, indexCid string, isLocal bool) (*protobuf.CodexWakuMessageArchiveIndex, error) } type ArchiveManagerConfig struct { diff --git a/protocol/communities/manager_archive.go b/protocol/communities/manager_archive.go index 5fe33f43adc..1d611d23602 100644 --- a/protocol/communities/manager_archive.go +++ b/protocol/communities/manager_archive.go @@ -10,9 +10,12 @@ package communities import ( + "bytes" + "context" "crypto/ecdsa" "errors" "fmt" + "maps" "net" "os" "path" @@ -20,11 +23,15 @@ import ( "sync" "time" + "github.com/golang/protobuf/proto" + "github.com/status-im/status-go/common" + "github.com/status-im/status-go/crypto" "github.com/status-im/status-go/crypto/types" "github.com/status-im/status-go/messaging" messagingtypes "github.com/status-im/status-go/messaging/types" "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/signal" "github.com/anacrolix/torrent" @@ -194,21 +201,6 @@ func (m *ArchiveManager) getTCPandUDPport(portNumber int) (int, error) { return 0, fmt.Errorf("no free port found") } -func (m *ArchiveManager) getFreeUDPPort() (int, error) { - udpAddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort("localhost", "0")) - if err != nil { - return 0, err - } - - udpListener, err := net.ListenUDP("udp", udpAddr) - if err != nil { - return 0, err - } - defer udpListener.Close() - - return udpListener.LocalAddr().(*net.UDPAddr).Port, nil -} - func (m *ArchiveManager) StartTorrentClient() error { if m.torrentConfig == nil { return fmt.Errorf("can't start torrent client: missing torrentConfig") @@ -511,19 +503,27 @@ func (m *ArchiveManager) CreateAndSeedHistoryArchive(communityID types.HexBytes, } if distributionPreference == params.ArchiveDistributionMethodCodex { + lastIndexCid, err := m.persistence.GetLastSeenIndexCid(communityID) + if err != nil { + m.UnseedHistoryArchiveIndexCid(communityID, lastIndexCid) + } else { + if err != nil { + m.logger.Debug("[CODEX][CreateAndSeedHistoryArchive] failed to get last seen index cid - proceeding without un-seeding", zap.Error(err)) + } + } archiveCodexCreatedSuccessfully = true - m.UnseedHistoryArchiveIndexCid(communityID) - codexArchiveIDs, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) + // codexArchiveIDs, errCodex := m.ArchiveFileManager.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) + codexArchiveIDs, errCodex := m.CreateHistoryArchiveCodexFromDB(communityID, topics, startDate, endDate, partition, encrypt) if errCodex != nil { archiveCodexCreatedSuccessfully = false m.logger.Error("[CODEX][CreateAndSeedHistoryArchive] failed to create history archive codex", zap.Error(errCodex)) } else { if len(codexArchiveIDs) == 0 { // no new codex archives were created - no need to distribute new index cid - // but we need to (re)start seeding what we stopped above + // but we need to (re)start seeding that we stopped above archiveCodexCreatedSuccessfully = false m.logger.Debug("[CODEX][CreateAndSeedHistoryArchive] no new codex archive ids were created - re-seeding existing index cid") - if err = m.SeedHistoryArchiveIndexCid(communityID); err != nil { + if err = m.SeedHistoryArchiveIndexCid(communityID, lastIndexCid); err != nil { m.logger.Error("[CODEX][CreateAndSeedHistoryArchive] failed to seed existing history archive codex index cid", zap.Error(err)) } } @@ -606,7 +606,14 @@ func (m *ArchiveManager) StartHistoryArchiveTasksInterval(community *Community, } case <-cancel: m.UnseedHistoryArchiveTorrent(community.ID()) - m.UnseedHistoryArchiveIndexCid(community.ID()) + lastIndexCid, err := m.persistence.GetLastSeenIndexCid(community.ID()) + if err != nil { + m.UnseedHistoryArchiveIndexCid(community.ID(), lastIndexCid) + } else { + if err != nil { + m.logger.Debug("[CODEX][start_history_archive_tasks_interval] failed to get last seen index cid - proceeding without un-seeding", zap.Error(err)) + } + } m.historyArchiveTasks.Delete(id) m.historyArchiveTasksWaitGroup.Done() return @@ -686,63 +693,42 @@ func (m *ArchiveManager) UnseedHistoryArchiveTorrent(communityID types.HexBytes) } } -func (m *ArchiveManager) SeedHistoryArchiveIndexCid(communityID types.HexBytes) error { +func (m *ArchiveManager) SeedHistoryArchiveIndexCid(communityID types.HexBytes, indexCid string) error { + if indexCid == "" { + return nil + } if !m.IsCodexReady() { return nil } // do not seed if already seeding - if m.IsSeedingHistoryArchiveCodex(communityID) { + if m.IsSeedingHistoryArchiveCodex(communityID, indexCid) { return nil } - exists, err := m.codexIndexFileExists(communityID) + + // for the purpose of seeding, we just need to make sure that the index cid + // is fetched to the codex node - codex will seed it by advertising it on DHT + _, err := m.codexClient.TriggerDownload(indexCid) if err != nil { return err } - if exists { - indexBytes, err := m.readCodexIndexFromFile(communityID) - if err != nil { - return err - } - cid, err := m.codexClient.UploadArchive(indexBytes) - if err != nil { - return err - } - err = m.writeCodexIndexCidToFile(communityID, cid) - var errs []error - if err != nil { - errs = append(errs, err) - err := m.codexClient.RemoveCid(cid) - if err != nil { - errs = append(errs, err) - } - return errors.Join(errs...) - } - } return nil } -func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes) { +func (m *ArchiveManager) UnseedHistoryArchiveIndexCid(communityID types.HexBytes, indexCid string) { + if indexCid == "" { + return + } if !m.IsCodexReady() { return } - if !m.IsSeedingHistoryArchiveCodex(communityID) { + if !m.IsSeedingHistoryArchiveCodex(communityID, indexCid) { return } - if m.CodexIndexCidFileExists(communityID) { - // get currently advertised index Cid - cid, err := m.GetHistoryArchiveIndexCid(communityID) + m.logger.Debug("[CODEX] Un-seeding index CID for community", zap.String("id", communityID.String()), zap.String("cid", indexCid)) - if err != nil { - m.logger.Debug("[CODEX] failed to get history archive index CID", zap.Error(err)) - return - } - - m.logger.Debug("[CODEX] Unseeding index CID for community", zap.String("id", communityID.String()), zap.String("cid", cid)) - - err = m.codexClient.RemoveCid(cid) - if err != nil { - m.logger.Error("[CODEX] failed to remove CID from Codex", zap.Error(err)) - } + err := m.codexClient.RemoveCid(indexCid) + if err != nil { + m.logger.Error("[CODEX] failed to remove CID from Codex", zap.Error(err)) } } @@ -753,24 +739,19 @@ func (m *ArchiveManager) IsSeedingHistoryArchiveTorrent(communityID types.HexByt return ok && torrent.Seeding() } -func (m *ArchiveManager) IsSeedingHistoryArchiveCodex(communityID types.HexBytes) bool { +func (m *ArchiveManager) IsSeedingHistoryArchiveCodex(communityID types.HexBytes, indexCid string) bool { + if indexCid == "" { + return false + } if !m.IsCodexReady() { return false } - if m.CodexIndexCidFileExists(communityID) { - cid, err := m.GetHistoryArchiveIndexCid(communityID) - if err != nil { - m.logger.Debug("[CODEX] failed to read Codex index CID", zap.String("communityID", communityID.String()), zap.Error(err)) - return false - } - hasCid, err := m.codexClient.HasCid(cid) - if err != nil { - m.logger.Debug("[CODEX] failed to verify Codex CID availability", zap.String("communityID", communityID.String()), zap.String("cid", cid), zap.Error(err)) - return false - } - return hasCid + hasCid, err := m.codexClient.HasCid(indexCid) + if err != nil { + m.logger.Debug("[CODEX] failed to verify Codex CID availability", zap.String("communityID", communityID.String()), zap.String("cid", indexCid), zap.Error(err)) + return false } - return false + return hasCid } func (m *ArchiveManager) GetHistoryArchiveDownloadTask(communityID string) *HistoryArchiveDownloadTask { @@ -954,213 +935,498 @@ func (m *ArchiveManager) DownloadHistoryArchivesByMagnetlink(communityID types.H } } -func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.HexBytes, indexCid string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) { +func (m *ArchiveManager) PublishHistoryArchivesSeedingSignal( + communityID types.HexBytes, + magnetLink bool, + indexCid bool, +) { + m.publisher.publish(&Subscription{ + HistoryArchivesSeedingSignal: &signal.HistoryArchivesSeedingSignal{ + CommunityID: communityID.String(), + MagnetLink: magnetLink, + IndexCid: indexCid, + }, + }) +} - id := communityID.String() +func (m *ArchiveManager) CreateHistoryArchiveCodexFromMessages(communityID types.HexBytes, messages []*messagingtypes.ReceivedMessage, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) { + return m.createHistoryArchiveCodex(communityID, messages, topics, startDate, endDate, partition, encrypt) +} - downloadTaskInfo := &HistoryArchiveDownloadTaskInfo{ - TotalDownloadedArchivesCount: 0, - TotalArchivesCount: 0, - Cancelled: false, +func (m *ArchiveManager) CreateHistoryArchiveCodexFromDB(communityID types.HexBytes, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) { + return m.createHistoryArchiveCodex(communityID, make([]*messagingtypes.ReceivedMessage, 0), topics, startDate, endDate, partition, encrypt) +} + +func (m *ArchiveManager) createHistoryArchiveCodex(communityID types.HexBytes, msgs []*messagingtypes.ReceivedMessage, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) { + + loadFromDB := len(msgs) == 0 + + from := startDate + to := from.Add(partition) + if to.After(endDate) { + to = endDate } - timeout := time.After(m.downloadTimeout) + codexWakuMessageArchiveIndexProto := &protobuf.CodexWakuMessageArchiveIndex{} + codexWakuMessageArchiveIndex := make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata) + codexArchiveIDs := make([]string, 0) - // Create separate cancel channel for the index downloader to avoid channel competition - indexDownloaderCancel := make(chan struct{}) + lastSeenIndexCid, err := m.persistence.GetLastSeenIndexCid(communityID) + if err != nil { + return codexArchiveIDs, err + } - if err := m.ensureCodexCommunityDir(communityID); err != nil { - m.logger.Error("[CODEX] failed to ensure Codex archive directory", zap.String("communityID", id), zap.Error(err)) - return nil, err + if m.IsSeedingHistoryArchiveCodex(communityID, lastSeenIndexCid) { + m.logger.Debug("[CODEX][createHistoryArchiveCodex] codex index file exists, loading from file") + ctx, cancel := context.WithTimeout(context.Background(), m.downloadTimeout) + defer cancel() + codexWakuMessageArchiveIndexProto, err = m.CodexLoadHistoryArchiveIndex(ctx, m.identity, communityID, lastSeenIndexCid, true) + if err != nil { + return codexArchiveIDs, err + } } - // Create index downloader with path to index file using helper function - indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) - indexDownloader := NewCodexIndexDownloader(m.codexClient, indexCid, indexFilePath, indexDownloaderCancel, m.logger) + maps.Copy(codexWakuMessageArchiveIndex, codexWakuMessageArchiveIndexProto.Archives) - m.logger.Debug("[CODEX] fetching history index from Codex", zap.String("indexCid", indexCid)) - select { - case <-timeout: - return nil, ErrIndexCidTimedout - case <-cancelTask: - m.logger.Debug("[CODEX] cancelled fetching history index from Codex") - close(indexDownloaderCancel) // Forward cancellation to index downloader - downloadTaskInfo.Cancelled = true - return downloadTaskInfo, nil - case <-indexDownloader.GotManifest(): - // Check if manifest fetch was actually successful - err := indexDownloader.GetError() - if indexDownloader.GetDatasetSize() == 0 || err != nil { + topicsAsByteArrays := topicsAsByteArrays(topics) + + m.publisher.publish(&Subscription{CreatingHistoryArchivesSignal: &signal.CreatingHistoryArchivesSignal{ + CommunityID: communityID.String(), + }}) + + m.logger.Debug("[CODEX][createHistoryArchiveCodex] creating archives", + zap.Any("startDate", startDate), + zap.Any("endDate", endDate), + zap.Duration("partition", partition), + ) + for { + if from.Equal(endDate) || from.After(endDate) { + break + } + m.logger.Debug("creating message archive", + zap.Any("from", from), + zap.Any("to", to), + ) + + var messages []messagingtypes.ReceivedMessage + if loadFromDB { + messages, err = m.persistence.GetWakuMessagesByFilterTopic(topics, uint64(from.Unix()), uint64(to.Unix())) if err != nil { - m.logger.Error("[CODEX] failed to fetch Codex manifest", zap.Error(err)) - } else { - m.logger.Error("[CODEX] failed to fetch Codex manifest - dataset size is 0") + return codexArchiveIDs, err + } + } else { + for _, msg := range msgs { + if int64(msg.Timestamp) >= from.Unix() && int64(msg.Timestamp) < to.Unix() { + messages = append(messages, *msg) + } } - return nil, fmt.Errorf("failed to fetch Codex manifest for CID %s: %w", indexCid, err) } - m.logger.Debug("[CODEX] got manifest of the index file from Codex", zap.String("indexCid", indexCid), zap.Int64("datasetSize", indexDownloader.GetDatasetSize())) + if len(messages) == 0 { + // No need to create an archive with zero messages + m.logger.Debug("[CODEX] no messages in this partition") + from = to + to = to.Add(partition) + if to.After(endDate) { + to = endDate + } + continue + } + + m.logger.Debug("[CODEX][createHistoryArchiveCodex] creating Codex archive with messages", zap.Int("messagesCount", len(messages))) + + // Not only do we partition messages, we also chunk them + // roughly by size, such that each chunk will not exceed a given + // size and archive data doesn't get too big + messageChunks := make([][]messagingtypes.ReceivedMessage, 0) + currentChunkSize := 0 + currentChunk := make([]messagingtypes.ReceivedMessage, 0) + + for _, msg := range messages { + msgSize := len(msg.Payload) + len(msg.Sig) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] message size", + zap.Int("messageSize", msgSize), + zap.String("contentTopic", string(msg.Topic[:])), + zap.ByteString("payload[0:31]", msg.Payload[:min(32, len(msg.Payload))]), + ) + if msgSize > maxArchiveSizeInBytes { + // we drop messages this big + m.logger.Debug("[CODEX][createHistoryArchiveCodex] dropping message due to size", zap.Int("messageSize", msgSize)) + continue + } + + if currentChunkSize+msgSize > maxArchiveSizeInBytes { + messageChunks = append(messageChunks, currentChunk) + currentChunk = make([]messagingtypes.ReceivedMessage, 0) + currentChunkSize = 0 + } + currentChunk = append(currentChunk, msg) + currentChunkSize = currentChunkSize + msgSize + } + messageChunks = append(messageChunks, currentChunk) + + for _, messages := range messageChunks { + wakuMessageArchive := m.createWakuMessageArchive(from, to, messages, topicsAsByteArrays) + encodedArchive, err := proto.Marshal(wakuMessageArchive) + if err != nil { + return codexArchiveIDs, err + } + + if encrypt { + encodedArchive, err = m.messaging.BuildHashRatchetMessage(communityID, encodedArchive) + if err != nil { + return codexArchiveIDs, err + } + } + + // upload archive to codex and get CID back + cid, err := m.codexClient.UploadArchive(encodedArchive) + if err != nil { + m.logger.Error("[CODEX] failed to upload to codex", zap.Error(err)) + return codexArchiveIDs, err + } + + m.logger.Debug("[CODEX][createHistoryArchiveCodex] archive uploaded to codex", zap.String("cid", cid)) + + codexWakuMessageArchiveIndexMetadata := &protobuf.CodexWakuMessageArchiveIndexMetadata{ + Metadata: wakuMessageArchive.Metadata, + Cid: cid, + } + + codexWakuMessageArchiveIndexMetadataBytes, err := proto.Marshal(codexWakuMessageArchiveIndexMetadata) + if err != nil { + return codexArchiveIDs, err + } + + codexArchiveID := crypto.Keccak256Hash(codexWakuMessageArchiveIndexMetadataBytes).String() + codexArchiveIDs = append(codexArchiveIDs, codexArchiveID) + codexWakuMessageArchiveIndex[codexArchiveID] = codexWakuMessageArchiveIndexMetadata + } + + from = to + to = to.Add(partition) + if to.After(endDate) { + to = endDate + } + } + + if len(codexArchiveIDs) > 0 { + codexWakuMessageArchiveIndexProto.Archives = codexWakuMessageArchiveIndex + codexIndexBytes, err := proto.Marshal(codexWakuMessageArchiveIndexProto) + if err != nil { + return codexArchiveIDs, err + } + + if encrypt { + codexIndexBytes, err = m.messaging.BuildHashRatchetMessage(communityID, codexIndexBytes) + if err != nil { + return codexArchiveIDs, err + } + } + + // upload index file to codex + cid, err := m.codexClient.UploadArchive(codexIndexBytes) + if err != nil { + m.logger.Error("[CODEX][createHistoryArchiveCodex] failed to upload to codex", zap.Error(err)) + return codexArchiveIDs, err + } + + m.logger.Debug("[CODEX][createHistoryArchiveCodex] index uploaded to codex", zap.String("cid", cid)) + m.logger.Debug("[CODEX][createHistoryArchiveCodex] archives uploaded to Codex", zap.Any("from", startDate.Unix()), zap.Any("to", endDate.Unix())) + + m.logger.Debug("[CODEX][create_history_archive_codex] updating last seen index cid", zap.String("cid", cid)) + err = m.persistence.UpdateLastSeenIndexCid(communityID, cid) + if err != nil { + return codexArchiveIDs, err + } - // Publish manifest fetched signal m.publisher.publish(&Subscription{ - ManifestFetchedSignal: &signal.ManifestFetchedSignal{ + HistoryArchivesCreatedSignal: &signal.HistoryArchivesCreatedSignal{ CommunityID: communityID.String(), - IndexCid: indexCid, + From: int(startDate.Unix()), + To: int(endDate.Unix()), }, }) + } else { + m.logger.Debug("[CODEX][createHistoryArchiveCodex] no archives created") + m.publisher.publish(&Subscription{ + NoHistoryArchivesCreatedSignal: &signal.NoHistoryArchivesCreatedSignal{ + CommunityID: communityID.String(), + From: int(startDate.Unix()), + To: int(endDate.Unix()), + }, + }) + } - // Start downloading the index file - indexDownloader.DownloadIndexFile() + lastMessageArchiveEndDate, err := m.persistence.GetLastMessageArchiveEndDate(communityID) + if err != nil { + return codexArchiveIDs, err + } - m.logger.Debug("[CODEX] downloading history archive index with CID:", zap.String("indexCid", indexCid)) + m.logger.Debug("[CODEX][create_history_archive_codex] updating lastMessageArchiveEndDate", zap.Uint64("lastMessageArchiveEndDate", lastMessageArchiveEndDate)) + err = m.persistence.UpdateLastMessageArchiveEndDate(communityID, uint64(from.Unix())) + if err != nil { + return codexArchiveIDs, err + } + return codexArchiveIDs, nil +} - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() +func (m *ArchiveManager) ExtractMessagesFromCodexHistoryArchive(communityID types.HexBytes, archiveID string, codexIndex *protobuf.CodexWakuMessageArchiveIndex) ([]*protobuf.WakuMessage, error) { + metadata, ok := codexIndex.Archives[archiveID] + if !ok || metadata == nil { + return nil, fmt.Errorf("archive %s missing from codex index", archiveID) + } + cid := metadata.Cid - for { - select { - case <-cancelTask: - m.logger.Debug("[CODEX] cancelled downloading archive index") - close(indexDownloaderCancel) // Forward cancellation to index downloader - downloadTaskInfo.Cancelled = true - return downloadTaskInfo, nil - case <-ticker.C: - err := indexDownloader.GetError() - if err != nil { - m.logger.Error("[CODEX] error during index download", zap.Error(err)) - return nil, err - } + var buf bytes.Buffer + err := m.codexClient.LocalDownload(cid, &buf) + if err != nil { + m.logger.Error("[CODEX] failed to download archive from codex", zap.Error(err)) + return nil, err + } + data := buf.Bytes() - if indexDownloader.IsDownloadComplete() { - m.logger.Info("[CODEX] history archive index download completed", zap.String("indexCid", indexCid)) + m.logger.Debug("extracting messages from history archive", + zap.String("communityID", communityID.String()), + zap.String("archiveID", archiveID), + zap.String("cid", cid), + ) - err := m.writeCodexIndexCidToFile(communityID, indexCid) - if err != nil { - m.logger.Error("[CODEX] failed to write Codex index CID to file", zap.Error(err)) - return nil, err - } + archive := &protobuf.WakuMessageArchive{} - // Publish index download completed signal - m.publisher.publish(&Subscription{ - IndexDownloadCompletedSignal: &signal.IndexDownloadCompletedSignal{ - CommunityID: communityID.String(), - IndexCid: indexCid, - }, - }) + err = proto.Unmarshal(data, archive) + if err != nil { + pk, err := crypto.DecompressPubkey(communityID) + if err != nil { + m.logger.Error("failed to decompress community pubkey", zap.Error(err)) + return nil, err + } - index, err := m.CodexLoadHistoryArchiveIndexFromFile(m.identity, communityID) - if err != nil { - return nil, err - } + decryptedData, err := m.messaging.DecryptMessage(m.identity, pk, data) + if err != nil { + m.logger.Error("failed to decrypt message archive", zap.Error(err)) + return nil, err + } - existingArchiveIDs, err := m.persistence.GetDownloadedMessageArchiveIDs(communityID) - if err != nil { - return nil, err - } + err = proto.Unmarshal(decryptedData, archive) + if err != nil { + m.logger.Error("failed to unmarshal message archive", zap.Error(err)) + return nil, err + } + } + return archive.Messages, nil +} - downloadTaskInfo.TotalDownloadedArchivesCount = len(existingArchiveIDs) - downloadTaskInfo.TotalArchivesCount = len(index.Archives) +func (m *ArchiveManager) CodexLoadHistoryArchiveIndex(ctx context.Context, myKey *ecdsa.PrivateKey, communityID types.HexBytes, indexCid string, isLocal bool) (*protobuf.CodexWakuMessageArchiveIndex, error) { + codexWakuMessageArchiveIndexProto := &protobuf.CodexWakuMessageArchiveIndex{} - if len(existingArchiveIDs) == len(index.Archives) { - m.logger.Debug("[CODEX] aborting download, no new archives") - m.publisher.publish(&Subscription{ - HistoryArchivesSeedingSignal: &signal.HistoryArchivesSeedingSignal{ - CommunityID: communityID.String(), - MagnetLink: false, // Not downloaded via magnet link - IndexCid: true, // Downloaded via Codex CID - }, - }) - return downloadTaskInfo, nil - } + indexDownloader := NewCodexIndexDownloader(m.codexClient, m.logger) - // Create separate cancel channel for the archive downloader to avoid channel competition - archiveDownloaderCancel := make(chan struct{}) + var indexBuf bytes.Buffer + if isLocal { + if err := indexDownloader.DownloadIndexFileFromLocalNode(ctx, indexCid, &indexBuf); err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return nil, ErrIndexCidTimedout + } + return nil, err + } + } else { + if err := indexDownloader.DownloadIndexFileFromNetwork(ctx, indexCid, &indexBuf); err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return nil, ErrIndexCidTimedout + } + return nil, err + } + } + indexData := indexBuf.Bytes() - // Create the archive downloader using the protobuf index directly - archiveDownloader := NewCodexArchiveDownloader(m.codexClient, index, id, existingArchiveIDs, archiveDownloaderCancel, m.logger) + err := proto.Unmarshal(indexData, codexWakuMessageArchiveIndexProto) + if err != nil { + return nil, err + } - // Set up callback for when individual archives are downloaded - archiveDownloader.SetOnArchiveDownloaded(func(hash string, from, to uint64) { - err = m.persistence.SaveMessageArchiveID(communityID, hash) - if err != nil { - m.logger.Error("[CODEX] couldn't save message archive ID", zap.Error(err)) - } - m.publisher.publish(&Subscription{ - HistoryArchiveDownloadedSignal: &signal.HistoryArchiveDownloadedSignal{ - CommunityID: communityID.String(), - From: int(from), - To: int(to), - }, - }) + if len(codexWakuMessageArchiveIndexProto.Archives) == 0 && len(indexData) > 0 { + // This means we're dealing with an encrypted index file, so we have to decrypt it first + pk, err := crypto.DecompressPubkey(communityID) + if err != nil { + return nil, err + } - m.logger.Debug("[CODEX] archive downloaded successfully", - zap.String("hash", hash), - zap.Uint64("from", from), - zap.Uint64("to", to)) - }) + decryptedData, err := m.messaging.DecryptMessage(myKey, pk, indexData) + if err != nil { + m.logger.Error("failed to decrypt message archive", zap.Error(err)) + return nil, err + } + + err = proto.Unmarshal(decryptedData, codexWakuMessageArchiveIndexProto) + if err != nil { + return nil, err + } + } - m.logger.Debug("[CODEX] starting downloading individual archives from Codex") + return codexWakuMessageArchiveIndexProto, nil +} - archiveDownloader.StartDownload() +func (m *ArchiveManager) DownloadHistoryArchivesByIndexCid(communityID types.HexBytes, indexCid string, cancelTask chan struct{}) (*HistoryArchiveDownloadTaskInfo, error) { - m.publisher.publish(&Subscription{ - DownloadingHistoryArchivesStartedSignal: &signal.DownloadingHistoryArchivesStartedSignal{ - CommunityID: communityID.String(), - }, - }) + id := communityID.String() - // Monitor archive download progress - archiveTicker := time.NewTicker(1 * time.Second) - defer archiveTicker.Stop() - - for { - select { - case <-cancelTask: - m.logger.Debug("[CODEX] cancelled downloading individual archives") - close(archiveDownloaderCancel) - downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() - downloadTaskInfo.Cancelled = true - return downloadTaskInfo, nil - case <-archiveTicker.C: - // IsDownloadComplete == true also even when no single archive - // has been downloaded (e.g. because of error or because of - // cancellation). - // To further check for cancellation, call IsCancelled(). - // To see if any archive was actually downloaded, check - // GetTotalDownloadedArchivesCount(). - // Notice that GetTotalDownloadedArchivesCount represents - // all successfully downloaded archives so far, not only - // archives downloaded in this session. - if archiveDownloader.IsDownloadComplete() { - // Always update final progress - downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() - - m.logger.Info("[CODEX] downloading archives from Codex completed", - zap.Int("totalArchives", downloadTaskInfo.TotalArchivesCount), - zap.Int("downloadedArchives", downloadTaskInfo.TotalDownloadedArchivesCount)) - - m.publisher.publish(&Subscription{ - HistoryArchivesSeedingSignal: &signal.HistoryArchivesSeedingSignal{ - CommunityID: communityID.String(), - MagnetLink: false, // Not downloaded via magnet link - IndexCid: true, // Downloaded via Codex CID - }, - }) + downloadTaskInfo := &HistoryArchiveDownloadTaskInfo{ + TotalDownloadedArchivesCount: 0, + TotalArchivesCount: 0, + Cancelled: false, + } - return downloadTaskInfo, nil - } else { - // Update progress - downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() - m.logger.Debug("[CODEX] downloading archives in progress", - zap.Int("completed", downloadTaskInfo.TotalDownloadedArchivesCount), - zap.Int("total", downloadTaskInfo.TotalArchivesCount), - zap.Int("inProgress in this session", archiveDownloader.GetPendingArchivesCount()), - zap.Int("total remaining archives to download", downloadTaskInfo.TotalArchivesCount-downloadTaskInfo.TotalDownloadedArchivesCount), - ) - } - } - } - } + indexCtx, cancel := context.WithTimeout(context.Background(), m.downloadTimeout) + defer cancel() + + done := make(chan struct{}) + + go func() { + defer common.LogOnPanic() + select { + case <-cancelTask: + m.logger.Debug("[CODEX] cancelling downloading index from Codex") + cancel() + case <-done: + } + }() + + index, err := m.CodexLoadHistoryArchiveIndex(indexCtx, + m.identity, communityID, indexCid, false) + close(done) + if err != nil { + // check if error is due to timeout + if errors.Is(err, context.DeadlineExceeded) { + return nil, ErrIndexCidTimedout + } + // check if error is due to cancellation + if errors.Is(err, context.Canceled) { + m.logger.Debug("[CODEX] cancelled downloading index from Codex") + downloadTaskInfo.Cancelled = true + return downloadTaskInfo, nil + } + return nil, err + } + + // Publish index download completed signal + m.publisher.publish(&Subscription{ + IndexDownloadCompletedSignal: &signal.IndexDownloadCompletedSignal{ + CommunityID: communityID.String(), + IndexCid: indexCid, + }, + }) + + existingArchiveIDs, err := m.persistence.GetDownloadedMessageArchiveIDs( + communityID) + if err != nil { + return nil, err + } + + downloadTaskInfo.TotalDownloadedArchivesCount = len(existingArchiveIDs) + downloadTaskInfo.TotalArchivesCount = len(index.Archives) + + if len(existingArchiveIDs) == len(index.Archives) { + m.logger.Debug("[CODEX] aborting download, no new archives") + return downloadTaskInfo, nil + } + + // Create separate cancel channel for the archive + // downloader to avoid channel competition + archiveDownloaderCancel := make(chan struct{}) + + // Create the archive downloader using the protobuf index directly + archiveDownloader := NewCodexArchiveDownloader( + m.codexClient, index, id, existingArchiveIDs, + archiveDownloaderCancel, m.logger) + + // Set up callback for when individual archives are downloaded + archiveDownloader.SetOnArchiveDownloaded(func(hash string, from, to uint64) { + err = m.persistence.SaveMessageArchiveID(communityID, hash) + if err != nil { + m.logger.Error("[CODEX] couldn't save message archive ID", zap.Error(err)) + } + m.publisher.publish(&Subscription{ + HistoryArchiveDownloadedSignal: &signal.HistoryArchiveDownloadedSignal{ + CommunityID: communityID.String(), + From: int(from), + To: int(to), + }, + }) + + m.logger.Debug("[CODEX] archive downloaded successfully", + zap.String("hash", hash), + zap.Uint64("from", from), + zap.Uint64("to", to)) + }) + + m.logger.Debug("[CODEX] starting downloading individual archives from Codex") + + archiveDownloader.StartDownload() + + m.publisher.publish(&Subscription{ + DownloadingHistoryArchivesStartedSignal: &signal.DownloadingHistoryArchivesStartedSignal{ + CommunityID: communityID.String(), + }, + }) + + timeout := time.After(m.downloadTimeout) + + // Monitor archive download progress + archiveTicker := time.NewTicker(1 * time.Second) + defer archiveTicker.Stop() + + for { + select { + case <-timeout: + return nil, ErrIndexCidTimedout + case <-cancelTask: + m.logger.Debug("[CODEX] cancelled downloading individual archives") + close(archiveDownloaderCancel) + downloadTaskInfo.TotalDownloadedArchivesCount = archiveDownloader.GetTotalDownloadedArchivesCount() + downloadTaskInfo.Cancelled = true + return downloadTaskInfo, nil + case <-archiveTicker.C: + // IsDownloadComplete == true also even when no single archive + // has been downloaded (e.g. because of error or because of + // cancellation). + // To further check for cancellation, call IsCancelled(). + // Notice however that it does not make sense to check for + // IsCancelled() here, because we would have already returned + // above (<-cancelTask) in that case: this where + // close(archiveDownloaderCancel) is called to stop the downloader. + // To see if any archive was actually downloaded, check + // GetTotalDownloadedArchivesCount(). + // Notice that GetTotalDownloadedArchivesCount represents + // all successfully downloaded archives so far, not only + // archives downloaded in this session. + if archiveDownloader.IsDownloadComplete() { + // Always update final progress + downloadTaskInfo.TotalDownloadedArchivesCount = + archiveDownloader.GetTotalDownloadedArchivesCount() + + m.logger.Info("[CODEX] downloading archives from Codex completed", + zap.Int("totalArchives", downloadTaskInfo.TotalArchivesCount), + zap.Int("downloadedArchives", downloadTaskInfo.TotalDownloadedArchivesCount)) + + return downloadTaskInfo, nil + } else { + // Update progress + downloadTaskInfo.TotalDownloadedArchivesCount = + archiveDownloader.GetTotalDownloadedArchivesCount() + m.logger.Debug("[CODEX] downloading archives in progress", + zap.Int("completed", downloadTaskInfo.TotalDownloadedArchivesCount), + zap.Int("total", downloadTaskInfo.TotalArchivesCount), + zap.Int( + "inProgress in this session", + archiveDownloader.GetPendingArchivesCount(), + ), + zap.Int( + "total remaining archives to download", + downloadTaskInfo.TotalArchivesCount- + downloadTaskInfo.TotalDownloadedArchivesCount, + ), + ) } } } @@ -1171,11 +1437,6 @@ func (m *ArchiveManager) TorrentFileExists(communityID string) bool { return err == nil } -func (m *ArchiveManager) CodexIndexCidFileExists(communityID types.HexBytes) bool { - _, err := os.Stat(m.codexHistoryArchiveIndexCidFilePath(communityID)) - return err == nil -} - func topicsAsByteArrays(topics []messagingtypes.ContentTopic) [][]byte { var topicsAsByteArrays [][]byte for _, t := range topics { diff --git a/protocol/communities/manager_archive_file.go b/protocol/communities/manager_archive_file.go index 5194a4db887..efa7dd695de 100644 --- a/protocol/communities/manager_archive_file.go +++ b/protocol/communities/manager_archive_file.go @@ -10,13 +10,9 @@ package communities import ( - "bytes" "crypto/ecdsa" - "errors" - "fmt" "os" "path" - "path/filepath" "time" "github.com/status-im/status-go/crypto" @@ -336,312 +332,10 @@ func (m *ArchiveFileManager) createHistoryArchiveTorrent(communityID types.HexBy return archiveIDs, nil } -func (m *ArchiveFileManager) createHistoryArchiveCodex(communityID types.HexBytes, msgs []*messagingtypes.ReceivedMessage, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) { - - loadFromDB := len(msgs) == 0 - - from := startDate - to := from.Add(partition) - if to.After(endDate) { - to = endDate - } - - codexArchiveDir := m.codexHistoryArchiveDataDirPath(communityID) - codexIndexPath := m.codexHistoryArchiveIndexFilePath(communityID) - - m.logger.Debug("[CODEX][createHistoryArchiveCodex] codexArchiveDir", zap.String("codexArchiveDir", codexArchiveDir)) - - codexWakuMessageArchiveIndexProto := &protobuf.CodexWakuMessageArchiveIndex{} - codexWakuMessageArchiveIndex := make(map[string]*protobuf.CodexWakuMessageArchiveIndexMetadata) - codexArchiveIDs := make([]string, 0) - - if err := m.ensureCodexCommunityDir(communityID); err != nil { - return codexArchiveIDs, err - } - - _, err := os.Stat(codexIndexPath) - if err == nil { - m.logger.Debug("[CODEX][createHistoryArchiveCodex] codex index file exists, loading from file") - codexWakuMessageArchiveIndexProto, err = m.CodexLoadHistoryArchiveIndexFromFile(m.identity, communityID) - if err != nil { - return codexArchiveIDs, err - } - } - - for hash, metadata := range codexWakuMessageArchiveIndexProto.Archives { - codexWakuMessageArchiveIndex[hash] = metadata - } - - topicsAsByteArrays := topicsAsByteArrays(topics) - - m.publisher.publish(&Subscription{CreatingHistoryArchivesSignal: &signal.CreatingHistoryArchivesSignal{ - CommunityID: communityID.String(), - }}) - - m.logger.Debug("[CODEX][createHistoryArchiveCodex] creating archives", - zap.Any("startDate", startDate), - zap.Any("endDate", endDate), - zap.Duration("partition", partition), - ) - for { - if from.Equal(endDate) || from.After(endDate) { - break - } - m.logger.Debug("creating message archive", - zap.Any("from", from), - zap.Any("to", to), - ) - - var messages []messagingtypes.ReceivedMessage - if loadFromDB { - messages, err = m.persistence.GetWakuMessagesByFilterTopic(topics, uint64(from.Unix()), uint64(to.Unix())) - if err != nil { - return codexArchiveIDs, err - } - } else { - for _, msg := range msgs { - if int64(msg.Timestamp) >= from.Unix() && int64(msg.Timestamp) < to.Unix() { - messages = append(messages, *msg) - } - } - } - - if len(messages) == 0 { - // No need to create an archive with zero messages - m.logger.Debug("[CODEX] no messages in this partition") - from = to - to = to.Add(partition) - if to.After(endDate) { - to = endDate - } - continue - } - - m.logger.Debug("[CODEX][createHistoryArchiveCodex] creating Codex archive with messages", zap.Int("messagesCount", len(messages))) - - // Not only do we partition messages, we also chunk them - // roughly by size, such that each chunk will not exceed a given - // size and archive data doesn't get too big - messageChunks := make([][]messagingtypes.ReceivedMessage, 0) - currentChunkSize := 0 - currentChunk := make([]messagingtypes.ReceivedMessage, 0) - - for _, msg := range messages { - msgSize := len(msg.Payload) + len(msg.Sig) - m.logger.Debug("[CODEX][createHistoryArchiveCodex] message size", - zap.Int("messageSize", msgSize), - zap.String("contentTopic", string(msg.Topic[:])), - zap.ByteString("payload[0:31]", msg.Payload[:min(32, len(msg.Payload))]), - ) - if msgSize > maxArchiveSizeInBytes { - // we drop messages this big - m.logger.Debug("[CODEX][createHistoryArchiveCodex] dropping message due to size", zap.Int("messageSize", msgSize)) - continue - } - - if currentChunkSize+msgSize > maxArchiveSizeInBytes { - messageChunks = append(messageChunks, currentChunk) - currentChunk = make([]messagingtypes.ReceivedMessage, 0) - currentChunkSize = 0 - } - currentChunk = append(currentChunk, msg) - currentChunkSize = currentChunkSize + msgSize - } - messageChunks = append(messageChunks, currentChunk) - - for _, messages := range messageChunks { - wakuMessageArchive := m.createWakuMessageArchive(from, to, messages, topicsAsByteArrays) - encodedArchive, err := proto.Marshal(wakuMessageArchive) - if err != nil { - return codexArchiveIDs, err - } - - if encrypt { - encodedArchive, err = m.messaging.BuildHashRatchetMessage(communityID, encodedArchive) - if err != nil { - return codexArchiveIDs, err - } - } - - // upload archive to codex and get CID back - cid, err := m.codexClient.UploadArchive(encodedArchive) - if err != nil { - m.logger.Error("[CODEX] failed to upload to codex", zap.Error(err)) - return codexArchiveIDs, err - } - - m.logger.Debug("[CODEX][createHistoryArchiveCodex] archive uploaded to codex", zap.String("cid", cid)) - - codexWakuMessageArchiveIndexMetadata := &protobuf.CodexWakuMessageArchiveIndexMetadata{ - Metadata: wakuMessageArchive.Metadata, - Cid: cid, - } - - codexWakuMessageArchiveIndexMetadataBytes, err := proto.Marshal(codexWakuMessageArchiveIndexMetadata) - if err != nil { - return codexArchiveIDs, err - } - - codexArchiveID := crypto.Keccak256Hash(codexWakuMessageArchiveIndexMetadataBytes).String() - codexArchiveIDs = append(codexArchiveIDs, codexArchiveID) - codexWakuMessageArchiveIndex[codexArchiveID] = codexWakuMessageArchiveIndexMetadata - } - - from = to - to = to.Add(partition) - if to.After(endDate) { - to = endDate - } - } - - if len(codexArchiveIDs) > 0 { - codexWakuMessageArchiveIndexProto.Archives = codexWakuMessageArchiveIndex - codexIndexBytes, err := proto.Marshal(codexWakuMessageArchiveIndexProto) - if err != nil { - return codexArchiveIDs, err - } - - if encrypt { - codexIndexBytes, err = m.messaging.BuildHashRatchetMessage(communityID, codexIndexBytes) - if err != nil { - return codexArchiveIDs, err - } - } - - // upload index file to codex - cid, err := m.codexClient.UploadArchive(codexIndexBytes) - if err != nil { - m.logger.Error("[CODEX][createHistoryArchiveCodex] failed to upload to codex", zap.Error(err)) - return codexArchiveIDs, err - } - - err = m.writeCodexIndexToFile(communityID, codexIndexBytes) - if err != nil { - return codexArchiveIDs, err - } - - err = m.writeCodexIndexCidToFile(communityID, cid) - if err != nil { - return codexArchiveIDs, err - } - - m.logger.Debug("[CODEX][createHistoryArchiveCodex] index uploaded to codex", zap.String("cid", cid)) - - m.logger.Debug("[CODEX][createHistoryArchiveCodex] archives uploaded to Codex", zap.Any("from", startDate.Unix()), zap.Any("to", endDate.Unix())) - - m.publisher.publish(&Subscription{ - HistoryArchivesCreatedSignal: &signal.HistoryArchivesCreatedSignal{ - CommunityID: communityID.String(), - From: int(startDate.Unix()), - To: int(endDate.Unix()), - }, - }) - } else { - m.logger.Debug("[CODEX][createHistoryArchiveCodex] no archives created") - m.publisher.publish(&Subscription{ - NoHistoryArchivesCreatedSignal: &signal.NoHistoryArchivesCreatedSignal{ - CommunityID: communityID.String(), - From: int(startDate.Unix()), - To: int(endDate.Unix()), - }, - }) - } - - lastMessageArchiveEndDate, err := m.persistence.GetLastMessageArchiveEndDate(communityID) - if err != nil { - return codexArchiveIDs, err - } - - m.logger.Debug("[CODEX][createHistoryArchiveCodex] updating/setting lastMessageArchiveEndDate", zap.Uint64("lastMessageArchiveEndDate", lastMessageArchiveEndDate)) - if lastMessageArchiveEndDate > 0 { - err = m.persistence.UpdateLastMessageArchiveEndDate(communityID, uint64(from.Unix())) - } else { - err = m.persistence.SaveLastMessageArchiveEndDate(communityID, uint64(from.Unix())) - } - if err != nil { - return codexArchiveIDs, err - } - return codexArchiveIDs, nil -} - func (m *ArchiveFileManager) archiveIndexFile(communityID string) string { return path.Join(m.torrentConfig.DataDir, communityID, "index") } -func (m *ArchiveFileManager) ensureCodexCommunityDir(communityID types.HexBytes) error { - if m.codexConfig == nil { - return fmt.Errorf("codex config not initialized") - } - - codexArchiveDir := m.codexHistoryArchiveDataDirPath(communityID) - if err := os.MkdirAll(codexArchiveDir, 0700); err != nil { - return fmt.Errorf("failed to create Codex archive directory %s: %w", codexArchiveDir, err) - } - return nil -} - -func (m *ArchiveFileManager) codexHistoryArchiveDataDirPath(communityID types.HexBytes) string { - return filepath.Join(m.codexConfig.HistoryArchiveDataDir, communityID.String()) -} - -func (m *ArchiveFileManager) codexHistoryArchiveIndexFilePath(communityID types.HexBytes) string { - return filepath.Join(m.codexConfig.HistoryArchiveDataDir, communityID.String(), "index") -} - -func (m *ArchiveFileManager) codexHistoryArchiveIndexCidFilePath(communityID types.HexBytes) string { - return filepath.Join(m.codexConfig.HistoryArchiveDataDir, communityID.String(), "index-cid") -} - -func (m *ArchiveFileManager) writeCodexIndexToFile(communityID types.HexBytes, bytes []byte) error { - indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) - return os.WriteFile(indexFilePath, bytes, 0644) // nolint: gosec -} - -func (m *ArchiveFileManager) readCodexIndexFromFile(communityID types.HexBytes) ([]byte, error) { - indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) - return os.ReadFile(indexFilePath) -} - -func (m *ArchiveFileManager) codexIndexFileExists(communityID types.HexBytes) (bool, error) { - indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) - _, err := os.Stat(indexFilePath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return false, nil - } - return false, err - } - return true, nil -} - -func (m *ArchiveFileManager) removeCodexIndexFile(communityID types.HexBytes) error { - indexFilePath := m.codexHistoryArchiveIndexFilePath(communityID) - err := os.Remove(indexFilePath) - if err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - return nil -} - -func (m *ArchiveFileManager) removeCodexIndexCidFile(communityID types.HexBytes) error { - indexCidFilePath := m.codexHistoryArchiveIndexCidFilePath(communityID) - err := os.Remove(indexCidFilePath) - if err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - return nil -} - -func (m *ArchiveFileManager) writeCodexIndexCidToFile(communityID types.HexBytes, cid string) error { - cidFilePath := m.codexHistoryArchiveIndexCidFilePath(communityID) - return os.WriteFile(cidFilePath, []byte(cid), 0644) // nolint: gosec -} - -func (m *ArchiveFileManager) readCodexIndexCidFromFile(communityID types.HexBytes) ([]byte, error) { - cidFilePath := m.codexHistoryArchiveIndexCidFilePath(communityID) - return os.ReadFile(cidFilePath) -} - func (m *ArchiveFileManager) createWakuMessageArchive(from time.Time, to time.Time, messages []messagingtypes.ReceivedMessage, topics [][]byte) *protobuf.WakuMessageArchive { var wakuMessages []*protobuf.WakuMessage @@ -679,14 +373,6 @@ func (m *ArchiveFileManager) CreateHistoryArchiveTorrentFromDB(communityID types return m.createHistoryArchiveTorrent(communityID, make([]*messagingtypes.ReceivedMessage, 0), topics, startDate, endDate, partition, encrypt) } -func (m *ArchiveFileManager) CreateHistoryArchiveCodexFromMessages(communityID types.HexBytes, messages []*messagingtypes.ReceivedMessage, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) { - return m.createHistoryArchiveCodex(communityID, messages, topics, startDate, endDate, partition, encrypt) -} - -func (m *ArchiveFileManager) CreateHistoryArchiveCodexFromDB(communityID types.HexBytes, topics []messagingtypes.ContentTopic, startDate time.Time, endDate time.Time, partition time.Duration, encrypt bool) ([]string, error) { - return m.createHistoryArchiveCodex(communityID, make([]*messagingtypes.ReceivedMessage, 0), topics, startDate, endDate, partition, encrypt) -} - func (m *ArchiveFileManager) GetMessageArchiveIDsToImport(communityID types.HexBytes) ([]string, error) { return m.persistence.GetMessageArchiveIDsToImport(communityID) } @@ -716,15 +402,6 @@ func (m *ArchiveFileManager) GetHistoryArchiveMagnetlink(communityID types.HexBy return metaInfo.Magnet(nil, &info).String(), nil } -func (m *ArchiveFileManager) GetHistoryArchiveIndexCid(communityID types.HexBytes) (string, error) { - cidData, err := m.readCodexIndexCidFromFile(communityID) - if err != nil { - return "", err - } - - return string(cidData), nil -} - func (m *ArchiveFileManager) archiveDataFile(communityID string) string { return path.Join(m.torrentConfig.DataDir, communityID, "data") } @@ -787,54 +464,6 @@ func (m *ArchiveFileManager) ExtractMessagesFromHistoryArchive(communityID types return archive.Messages, nil } -func (m *ArchiveFileManager) ExtractMessagesFromCodexHistoryArchive(communityID types.HexBytes, archiveID string) ([]*protobuf.WakuMessage, error) { - index, err := m.CodexLoadHistoryArchiveIndexFromFile(m.identity, communityID) - if err != nil { - return nil, err - } - - metadata := index.Archives[archiveID] - cid := metadata.Cid - - var buf bytes.Buffer - err = m.codexClient.LocalDownload(cid, &buf) - if err != nil { - m.logger.Error("[CODEX] failed to download archive from codex", zap.Error(err)) - return nil, err - } - data := buf.Bytes() - - m.logger.Debug("extracting messages from history archive", - zap.String("communityID", communityID.String()), - zap.String("archiveID", archiveID), - zap.String("cid", cid), - ) - - archive := &protobuf.WakuMessageArchive{} - - err = proto.Unmarshal(data, archive) - if err != nil { - pk, err := crypto.DecompressPubkey(communityID) - if err != nil { - m.logger.Error("failed to decompress community pubkey", zap.Error(err)) - return nil, err - } - - decryptedData, err := m.messaging.DecryptMessage(m.identity, pk, data) - if err != nil { - m.logger.Error("failed to decrypt message archive", zap.Error(err)) - return nil, err - } - - err = proto.Unmarshal(decryptedData, archive) - if err != nil { - m.logger.Error("failed to unmarshal message archive", zap.Error(err)) - return nil, err - } - } - return archive.Messages, nil -} - func (m *ArchiveFileManager) LoadHistoryArchiveIndexFromFile(myKey *ecdsa.PrivateKey, communityID types.HexBytes) (*protobuf.WakuMessageArchiveIndex, error) { wakuMessageArchiveIndexProto := &protobuf.WakuMessageArchiveIndex{} @@ -870,38 +499,3 @@ func (m *ArchiveFileManager) LoadHistoryArchiveIndexFromFile(myKey *ecdsa.Privat return wakuMessageArchiveIndexProto, nil } - -func (m *ArchiveFileManager) CodexLoadHistoryArchiveIndexFromFile(myKey *ecdsa.PrivateKey, communityID types.HexBytes) (*protobuf.CodexWakuMessageArchiveIndex, error) { - codexWakuMessageArchiveIndexProto := &protobuf.CodexWakuMessageArchiveIndex{} - - indexData, err := m.readCodexIndexFromFile(communityID) - if err != nil { - return nil, err - } - - err = proto.Unmarshal(indexData, codexWakuMessageArchiveIndexProto) - if err != nil { - return nil, err - } - - if len(codexWakuMessageArchiveIndexProto.Archives) == 0 && len(indexData) > 0 { - // This means we're dealing with an encrypted index file, so we have to decrypt it first - pk, err := crypto.DecompressPubkey(communityID) - if err != nil { - return nil, err - } - - decryptedData, err := m.messaging.DecryptMessage(myKey, pk, indexData) - if err != nil { - m.logger.Error("failed to decrypt message archive", zap.Error(err)) - return nil, err - } - - err = proto.Unmarshal(decryptedData, codexWakuMessageArchiveIndexProto) - if err != nil { - return nil, err - } - } - - return codexWakuMessageArchiveIndexProto, nil -} diff --git a/protocol/communities/manager_test.go b/protocol/communities/manager_test.go index 8a9d0b2287d..ce83c34cbdd 100644 --- a/protocol/communities/manager_test.go +++ b/protocol/communities/manager_test.go @@ -1675,8 +1675,7 @@ func buildTorrentConfig() *params.TorrentConfig { func buildCodexConfig(t *testing.T) *params.CodexConfig { return ¶ms.CodexConfig{ - Enabled: true, - HistoryArchiveDataDir: filepath.Join(t.TempDir(), "codex", "archivedata"), + Enabled: true, CodexNodeConfig: codex.Config{ DataDir: filepath.Join(t.TempDir(), "codex", "codexdata"), BlockRetries: 5, diff --git a/protocol/communities/persistence.go b/protocol/communities/persistence.go index 4c499fe2bd4..1e469f4bec3 100644 --- a/protocol/communities/persistence.go +++ b/protocol/communities/persistence.go @@ -1021,30 +1021,43 @@ func (p *Persistence) GetMagnetlinkMessageClock(communityID types.HexBytes) (uin } func (p *Persistence) SaveCommunityArchiveInfo(communityID types.HexBytes, magnetLinkClock uint64, lastArchiveEndDate uint64, indexCidClock uint64) error { - _, err := p.db.Exec(`INSERT INTO communities_archive_info (magnetlink_clock, last_message_archive_end_date, community_id, index_cid_clock) VALUES (?, ?, ?, ?)`, + _, err := p.db.Exec(` + INSERT INTO communities_archive_info ( + community_id, magnetlink_clock, last_message_archive_end_date, index_cid_clock + ) VALUES (?, ?, ?, ?) + ON CONFLICT(community_id) DO UPDATE SET + magnetlink_clock = excluded.magnetlink_clock, + last_message_archive_end_date = excluded.last_message_archive_end_date, + index_cid_clock = excluded.index_cid_clock`, + communityID.String(), magnetLinkClock, lastArchiveEndDate, - communityID.String(), indexCidClock, ) return err } func (p *Persistence) UpdateMagnetlinkMessageClock(communityID types.HexBytes, magnetLinkClock uint64) error { - _, err := p.db.Exec(`UPDATE communities_archive_info SET - magnetlink_clock = ? - WHERE community_id = ?`, + _, err := p.db.Exec(` + INSERT INTO communities_archive_info (community_id, magnetlink_clock) + VALUES (?, ?) + ON CONFLICT(community_id) DO UPDATE SET + magnetlink_clock = excluded.magnetlink_clock`, + communityID.String(), magnetLinkClock, - communityID.String()) + ) return err } func (p *Persistence) UpdateLastSeenMagnetlink(communityID types.HexBytes, magnetlinkURI string) error { - _, err := p.db.Exec(`UPDATE communities_archive_info SET - last_magnetlink_uri = ? - WHERE community_id = ?`, + _, err := p.db.Exec(` + INSERT INTO communities_archive_info (community_id, last_magnetlink_uri) + VALUES (?, ?) + ON CONFLICT(community_id) DO UPDATE SET + last_magnetlink_uri = excluded.last_magnetlink_uri`, + communityID.String(), magnetlinkURI, - communityID.String()) + ) return err } @@ -1067,30 +1080,49 @@ func (p *Persistence) GetIndexCidMessageClock(communityID types.HexBytes) (uint6 } func (p *Persistence) UpdateLastSeenIndexCid(communityID types.HexBytes, indexCid string) error { - _, err := p.db.Exec(`UPDATE communities_archive_info SET last_index_cid = ? WHERE community_id = ?`, - indexCid, communityID.String()) + _, err := p.db.Exec(` + INSERT INTO communities_archive_info (community_id, last_index_cid) + VALUES (?, ?) + ON CONFLICT(community_id) DO UPDATE SET + last_index_cid = excluded.last_index_cid`, + communityID.String(), + indexCid, + ) return err } func (p *Persistence) UpdateIndexCidMessageClock(communityID types.HexBytes, clock uint64) error { - _, err := p.db.Exec(`UPDATE communities_archive_info SET index_cid_clock = ? WHERE community_id = ?`, - clock, communityID.String()) + _, err := p.db.Exec(` + INSERT INTO communities_archive_info (community_id, index_cid_clock) + VALUES (?, ?) + ON CONFLICT(community_id) DO UPDATE SET + index_cid_clock = excluded.index_cid_clock`, + communityID.String(), + clock, + ) return err } func (p *Persistence) SaveLastMessageArchiveEndDate(communityID types.HexBytes, endDate uint64) error { - _, err := p.db.Exec(`INSERT INTO communities_archive_info (last_message_archive_end_date, community_id) VALUES (?, ?)`, + _, err := p.db.Exec(` + INSERT INTO communities_archive_info (community_id, last_message_archive_end_date) VALUES (?, ?) + ON CONFLICT(community_id) DO UPDATE SET + last_message_archive_end_date = excluded.last_message_archive_end_date`, + communityID.String(), endDate, - communityID.String()) + ) return err } func (p *Persistence) UpdateLastMessageArchiveEndDate(communityID types.HexBytes, endDate uint64) error { - _, err := p.db.Exec(`UPDATE communities_archive_info SET - last_message_archive_end_date = ? - WHERE community_id = ?`, + _, err := p.db.Exec(` + INSERT INTO communities_archive_info (community_id, last_message_archive_end_date) + VALUES (?, ?) + ON CONFLICT(community_id) DO UPDATE SET + last_message_archive_end_date = excluded.last_message_archive_end_date`, + communityID.String(), endDate, - communityID.String()) + ) return err } diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index 6b2c06804e1..ac0c4364984 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -2322,7 +2322,7 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedArchiveMe close(s.bob.importDelayer.wait) }) cancel := make(chan struct{}) - err = s.bob.importHistoryArchives(community.ID(), cancel) + err = s.bob.importHistoryArchives(community.ID(), cancel, "") s.Require().NoError(err) // Ensure message1 wasn't imported, as it's encrypted, and we don't have access to the channel @@ -2381,16 +2381,14 @@ func PrintArchiveIndex(index *protobuf.CodexWakuMessageArchiveIndex) { } } -func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArchiveMessages() { +func (s *MessengerCommunitiesTokenPermissionsSuite) TestUploadDownloadCodexHistoryArchives_withSharedCodexClient() { dataDir := s.T().TempDir() - archiveDataDir := filepath.Join(dataDir, "codex", "archivedata") codexDataDir := filepath.Join(dataDir, "codex", "codexdata") - log.Println("Data directory:", archiveDataDir) + log.Println("Codex data directory:", codexDataDir) codexConfig := params.CodexConfig{ - Enabled: false, - HistoryArchiveDataDir: archiveDataDir, + Enabled: false, CodexNodeConfig: codex.Config{ DataDir: codexDataDir, BlockRetries: 10, @@ -2400,8 +2398,7 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch }, } - // Share archive directory between all users - // so that bob can access owner's created archive + // Share CodexClient between owner and bob s.owner.archiveManager.SetCodexConfig(&codexConfig) s.bob.archiveManager.SetCodexConfig(&codexConfig) @@ -2545,252 +2542,17 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestImportDecryptedCodexArch // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4403 // Ensure owner has archive - archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) - s.Require().NoError(err) - s.Require().Len(archiveIndex.Archives, 1) - - PrintArchiveIndex(archiveIndex) - - // Ensure bob has archive (because they share same local directory) - archiveIndex, err = s.bob.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.bob.identity, community.ID()) - s.Require().NoError(err) - s.Require().Len(archiveIndex.Archives, 1) - - PrintArchiveIndex(archiveIndex) - - archiveHash := maps.Keys(archiveIndex.Archives)[0] - - // Save message archive ID as in - // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4325-L4336 - err = s.bob.archiveManager.SaveMessageArchiveID(community.ID(), archiveHash) - s.Require().NoError(err) - - // Import archive - s.bob.importDelayer.once.Do(func() { - close(s.bob.importDelayer.wait) - }) - cancel := make(chan struct{}) - err = s.bob.importHistoryArchives(community.ID(), cancel) - s.Require().NoError(err) - - // Ensure message1 wasn't imported, as it's encrypted, and we don't have access to the channel - receivedMessage1, err := s.bob.MessageByID(message1.ID) - s.Require().Nil(receivedMessage1) - s.Require().Error(err) - - chatID := []byte(chat.ID) - hashRatchetMessagesCount, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) - s.Require().NoError(err) - s.Require().Equal(1, hashRatchetMessagesCount) - - // Make bob satisfy channel criteria - waitOnChannelKeyToBeDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { - action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] - if !ok || action.ActionType != communities.EncryptionKeySendToMembers { - return false - } - _, ok = action.Members[crypto.PubkeyToHex(&s.bob.identity.PublicKey)] - return ok - }) - - s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, channelPermission.TokenCriteria[0]) - - // force owner to reevaluate channel members - // in production it will happen automatically, by periodic check - err = s.owner.communitiesManager.ForceMembersReevaluation(community.ID()) - s.Require().NoError(err) - - err = <-waitOnChannelKeyToBeDistributedToBob - s.Require().NoError(err) - - // Finally ensure that the message from archive was retrieved and decrypted - - // NOTE: In theory a single RetrieveAll call should be enough, - // because we immediately process all hash ratchet messages - response, err = s.bob.RetrieveAll() - s.Require().NoError(err) - s.Require().Len(response.Messages(), 1) - - receivedMessage1, ok := response.messages[message1.ID] - log.Printf("Received message: %+v, ok: %v", receivedMessage1, ok) - s.Require().True(ok) - s.Require().Equal(messageText1, receivedMessage1.Text) -} - -func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration() { - - dataDir := s.T().TempDir() - archiveDataDir := filepath.Join(dataDir, "codex", "archivedata") - codexDataDir := filepath.Join(dataDir, "codex", "codexdata") - - log.Println("Data directory:", archiveDataDir) - - codexConfig := params.CodexConfig{ - Enabled: false, - HistoryArchiveDataDir: archiveDataDir, - CodexNodeConfig: codex.Config{ - DataDir: codexDataDir, - BlockRetries: 10, - LogLevel: "ERROR", - LogFormat: codex.LogFormatNoColors, - Nat: "none", - }, - } - - // Share archive directory between all users - // so that bob can access owner's created archive - s.owner.archiveManager.SetCodexConfig(&codexConfig) - s.bob.archiveManager.SetCodexConfig(&codexConfig) - - err := s.owner.archiveManager.StartCodexClient() - s.Require().NoError(err) - codexClient := s.owner.archiveManager.GetCodexClient() - s.Require().NotNil(codexClient) - // no need to stop codex client, as it will be stopped during messenger Stop - // defer codexClient.Stop() //nolint: errcheck - - s.bob.archiveManager.SetCodexClient(codexClient) - - // 1.1. Create community - community, chat := s.createCommunity() - - archiveDistributionPreferenceOwner, err := s.owner.GetArchiveDistributionPreference() - s.Require().NoError(err) - log.Println("Archive distribution preference for owner:", archiveDistributionPreferenceOwner) - s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceOwner) - - archiveDistributionPreferenceBob, err := s.bob.GetArchiveDistributionPreference() - s.Require().NoError(err) - log.Println("Archive distribution preference for bob:", archiveDistributionPreferenceBob) - s.Require().Equal(communities.ArchiveDistributionMethodCodex, archiveDistributionPreferenceBob) - - // 1.2. Setup permissions - communityPermission := &requests.CreateCommunityTokenPermission{ - CommunityID: community.ID(), - Type: protobuf.CommunityTokenPermission_BECOME_MEMBER, - TokenCriteria: []*protobuf.TokenCriteria{ - { - Type: protobuf.CommunityTokenType_ERC20, - ContractAddresses: map[uint64]string{testChainID1: "0x124"}, - Symbol: "TEST2", - AmountInWei: "100000000000000000000", - Decimals: uint64(18), - }, - }, - } - - channelPermission := &requests.CreateCommunityTokenPermission{ - CommunityID: community.ID(), - Type: protobuf.CommunityTokenPermission_CAN_VIEW_AND_POST_CHANNEL, - ChatIds: []string{chat.ID}, - TokenCriteria: []*protobuf.TokenCriteria{ - { - Type: protobuf.CommunityTokenType_ERC20, - ContractAddresses: map[uint64]string{testChainID1: "0x124"}, - Symbol: "TEST2", - AmountInWei: "200000000000000000000", - Decimals: uint64(18), - }, - }, - } - - waitOnChannelKeyAdded := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { - action, ok := sub.keyActions.ChannelKeysActions[chat.CommunityChatID()] - if !ok || action.ActionType != communities.EncryptionKeyAdd { - return false - } - _, ok = action.Members[crypto.PubkeyToHex(&s.owner.identity.PublicKey)] - return ok - }) - - waitOnCommunityPermissionCreated := waitOnCommunitiesEvent(s.owner, func(sub *communities.Subscription) bool { - return len(sub.Community.TokenPermissions()) == 2 - }) - - response, err := s.owner.CreateCommunityTokenPermission(communityPermission) - s.Require().NoError(err) - s.Require().NotNil(response) - s.Require().Len(response.Communities(), 1) - - response, err = s.owner.CreateCommunityTokenPermission(channelPermission) + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + indexCid, err := s.owner.communitiesManager.GetLastSeenIndexCid(community.ID()) s.Require().NoError(err) - s.Require().NotNil(response) - s.Require().Len(response.Communities(), 1) - - community = response.Communities()[0] - s.Require().True(community.HasTokenPermissions()) - s.Require().Len(community.TokenPermissions(), 2) - - err = <-waitOnCommunityPermissionCreated - s.Require().NoError(err) - s.Require().True(community.Encrypted()) - - err = <-waitOnChannelKeyAdded - s.Require().NoError(err) - - // 2. Owner: Send a message A - messageText1 := RandomLettersString(10) - message1 := s.sendChatMessage(s.owner, chat.ID, messageText1) - - // 2.2. Retrieve own message (to make it stored in the archive later) - _, err = s.owner.RetrieveAll() - s.Require().NoError(err) - - log.Println("Message sent with ID:", message1.ID) - - // 3. Owner: Create community archive - const partition = 2 * time.Minute - messageDate := time.UnixMilli(int64(message1.Timestamp)) - startDate := messageDate.Add(-time.Minute) - endDate := messageDate.Add(time.Minute) - topic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(chat.ID)) - communityCommonTopic := messagingtypes.BytesToContentTopic(messaging.ToContentTopic(community.UniversalChatID())) - topics := []messagingtypes.ContentTopic{topic, communityCommonTopic} - - s.owner.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} - s.bob.config.messengerSignalsHandler = &MessengerSignalsHandlerMock{} - - archiveIDs, err := s.owner.archiveManager.CreateHistoryArchiveCodexFromDB(community.ID(), topics, startDate, endDate, partition, community.Encrypted()) - s.Require().NoError(err) - s.Require().Len(archiveIDs, 1) - - community, err = s.owner.GetCommunityByID(community.ID()) - s.Require().NoError(err) - - // 4. Bob: join community (satisfying membership, but not channel permissions) - s.makeAddressSatisfyTheCriteria(testChainID1, bobAddress, communityPermission.TokenCriteria[0]) - s.advertiseCommunityTo(community, s.bob) - - waitForKeysDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { - action := sub.keyActions.CommunityKeyAction - if action.ActionType != communities.EncryptionKeySendToMembers { - return false - } - _, ok := action.Members[s.bob.IdentityPublicKeyString()] - return ok - }) - - s.joinCommunity(community, s.bob) - - err = <-waitForKeysDistributedToBob - s.Require().NoError(err) - - // 5. Bob: Import community archive - // The archive is successfully decrypted, but the message inside is not. - // https://github.com/status-im/status-desktop/issues/13105 can be reproduced at this stage - // by forcing `encryption.ErrHashRatchetGroupIDNotFound` in `ExtractMessagesFromHistoryArchive` after decryption here: - // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4403 - - // Ensure owner has archive - archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) + s.Require().NotEmpty(indexCid) + archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndex(ctx, s.owner.identity, community.ID(), indexCid, true) s.Require().NoError(err) s.Require().Len(archiveIndex.Archives, 1) PrintArchiveIndex(archiveIndex) - indexCid, err := s.owner.archiveManager.GetHistoryArchiveIndexCid(community.ID()) - s.Require().NoError(err) - // log s.T().Logf("Codex archive OWNER index CID: %s", indexCid) @@ -2802,7 +2564,6 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration() { }) s.bob.downloadAndImportCodexHistoryArchives(community.ID(), indexCid, cancelChan) - s.Require().NoError(err) // Ensure message1 wasn't imported, as it's encrypted, and we don't have access to the channel receivedMessage1, err := s.bob.MessageByID(message1.ID) @@ -2848,19 +2609,17 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration() { s.Require().Equal(messageText1, receivedMessage1.Text) } -func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration2() { +func (s *MessengerCommunitiesTokenPermissionsSuite) TestUploadDownloadCodexHistoryArchives_withSharedCodexClient_usingHandleCommunityMessageArchiveIndexCid() { // skip for now - WIP s.T().Skip("WIP") dataDir := s.T().TempDir() - archiveDataDir := filepath.Join(dataDir, "codex", "archivedata") codexDataDir := filepath.Join(dataDir, "codex", "codexdata") - log.Println("Data directory:", archiveDataDir) + log.Println("Codex data directory:", codexDataDir) codexConfig := params.CodexConfig{ - Enabled: false, - HistoryArchiveDataDir: archiveDataDir, + Enabled: false, CodexNodeConfig: codex.Config{ DataDir: codexDataDir, BlockRetries: 10, @@ -2870,8 +2629,7 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration2() }, } - // Share archive directory between all users - // so that bob can access owner's created archive + // Share CodexClient between owner and bob s.owner.archiveManager.SetCodexConfig(&codexConfig) s.bob.archiveManager.SetCodexConfig(&codexConfig) @@ -3015,13 +2773,18 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration2() // https://github.com/status-im/status-go/blob/6c82a6c2be7ebed93bcae3b9cf5053da3820de50/protocol/communities/manager.go#L4403 // Ensure owner has archive - archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + indexCid, err := s.owner.communitiesManager.GetLastSeenIndexCid(community.ID()) + s.Require().NoError(err) + s.Require().NotEmpty(indexCid) + archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndex(ctx, s.owner.identity, community.ID(), indexCid, true) s.Require().NoError(err) s.Require().Len(archiveIndex.Archives, 1) PrintArchiveIndex(archiveIndex) - indexCid, err := s.owner.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + indexCid, err = s.owner.communitiesManager.GetLastSeenIndexCid(community.ID()) s.Require().NoError(err) // log @@ -3137,7 +2900,7 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestFullCodexIntegration2() s.Require().Equal(messageText1, receivedMessage1.Text) } -func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabase() { +func (s *MessengerCommunitiesTokenPermissionsSuite) TestUploadDownloadCodexHistoryArchives() { // The messengers used in the tests in this suite use the helper newTestMessenger (protocol/messenger_builder_test.go). In the config setup (config.complete), tmc.nodeConfig defaults to an empty params.NodeConfig{} unless the test overrides it. The default params.NodeConfig zero-value has all nested configs (including CodexConfig.Enabled) set to false. // During newTestMessenger, the in-memory appDb is migrated and then sDB.CreateSettings(*config.appSettings, *config.nodeConfig) is called (messenger_builder_test.go (line 120)). If you don’t override config.nodeConfig beforehand, this writes CodexConfig.Enabled = false into the node-config tables—mirroring what a brand-new install would do. @@ -3295,13 +3058,18 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas s.Require().Len(archiveIDs, 1) // Ensure owner has archive - archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.owner.identity, community.ID()) + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + indexCid, err := s.owner.communitiesManager.GetLastSeenIndexCid(community.ID()) + s.Require().NoError(err) + s.Require().NotEmpty(indexCid) + archiveIndex, err := s.owner.archiveManager.CodexLoadHistoryArchiveIndex(ctx, s.owner.identity, community.ID(), indexCid, true) s.Require().NoError(err) s.Require().Len(archiveIndex.Archives, 1) PrintArchiveIndex(archiveIndex) - indexCid, err := s.owner.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + indexCid, err = s.owner.communitiesManager.GetLastSeenIndexCid(community.ID()) s.Require().NoError(err) // log @@ -3341,19 +3109,22 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas close(s.bob.importDelayer.wait) }) + s.bob.ratchetNotFoundDelay = 1 * time.Second + s.bob.downloadAndImportCodexHistoryArchives(community.ID(), indexCid, cancelChan) - s.Require().NoError(err) - // Ensure owner has archive - archiveIndex, err = s.bob.archiveManager.CodexLoadHistoryArchiveIndexFromFile(s.bob.identity, community.ID()) + // Ensure bob has archive + ctx, cancel = context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + indexCid, err = s.bob.communitiesManager.GetLastSeenIndexCid(community.ID()) + s.Require().NoError(err) + s.Require().NotEmpty(indexCid) + archiveIndex, err = s.bob.archiveManager.CodexLoadHistoryArchiveIndex(ctx, s.bob.identity, community.ID(), indexCid, true) s.Require().NoError(err) s.Require().Len(archiveIndex.Archives, 1) PrintArchiveIndex(archiveIndex) - indexCid, err = s.bob.archiveManager.GetHistoryArchiveIndexCid(community.ID()) - s.Require().NoError(err) - // log s.T().Logf("Codex archive BOB index CID: %s", indexCid) @@ -3363,9 +3134,14 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas s.Require().Error(err) chatID := []byte(chat.ID) - hashRatchetMessagesCount, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) - s.Require().NoError(err) - s.Require().Equal(1, hashRatchetMessagesCount) + s.Require().Eventually(func() bool { + count, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) + s.Require().NoError(err) + return count == 1 + }, 30*time.Second, 500*time.Millisecond) + // hashRatchetMessagesCount, err := s.bob.persistence.GetHashRatchetMessagesCountForGroup(chatID) + // s.Require().NoError(err) + // s.Require().Equal(1, hashRatchetMessagesCount) // Make bob satisfy channel criteria waitOnChannelKeyToBeDistributedToBob := s.waitOnKeyDistribution(func(sub *CommunityAndKeyActions) bool { @@ -3399,7 +3175,6 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestLoadingConfigFromDatabas log.Printf("Received message: %+v, ok: %v", receivedMessage1, ok) s.Require().True(ok) s.Require().Equal(messageText1, receivedMessage1.Text) - } func (s *MessengerCommunitiesTokenPermissionsSuite) TestDeleteChannelWithTokenPermission() { diff --git a/protocol/messenger.go b/protocol/messenger.go index 1b8b011ec2a..fdef688f0ac 100644 --- a/protocol/messenger.go +++ b/protocol/messenger.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" gocommon "github.com/status-im/status-go/common" - utils "github.com/status-im/status-go/common" "github.com/status-im/status-go/connection" "github.com/status-im/status-go/contracts" "github.com/status-im/status-go/crypto" @@ -141,6 +140,7 @@ type Messenger struct { wait chan struct{} once sync.Once } + ratchetNotFoundDelay time.Duration connectionState connection.State contractMaker *contracts.ContractMaker @@ -427,8 +427,9 @@ func NewMessenger( wait chan struct{} once sync.Once }{wait: make(chan struct{})}, - browserDatabase: c.browserDatabase, - httpServer: c.httpServer, + ratchetNotFoundDelay: 1 * time.Hour, + browserDatabase: c.browserDatabase, + httpServer: c.httpServer, shutdownTasks: []func() error{ pushNotificationClient.Stop, communitiesManager.Stop, @@ -661,7 +662,7 @@ func (m *Messenger) Start() (*MessengerResponse, error) { if err != nil { return nil, err } - if err := utils.ValidateDisplayName(&displayName); err != nil { + if err := gocommon.ValidateDisplayName(&displayName); err != nil { // Somehow a wrong display name was saved. We need to update it so that others accept our messages pubKey, err := m.settings.GetPublicKey() if err != nil { diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index f7b9cf57130..4c042fbf133 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -2016,15 +2016,17 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ requestToJoinResponseProto.MagnetUri = magnetlink } - if m.archiveManager.IsCodexReady() && m.archiveManager.CodexIndexCidFileExists(community.ID()) { - m.logger.Debug("[CODEX][acceptRequestToJoinCommunity] calling GetHistoryArchiveIndexCid", zap.String("communityID", community.IDString())) - cid, err := m.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + if m.archiveManager.IsCodexReady() { + m.logger.Debug("[CODEX][acceptRequestToJoinCommunity] checking if currently seeding", zap.String("communityID", community.IDString())) + cid, err := m.communitiesManager.GetLastSeenIndexCid(community.ID()) if err != nil { m.logger.Warn("couldn't get codex index cid for community", zap.Error(err)) return nil, err } - m.logger.Debug("[CODEX][acceptRequestToJoinCommunity] setting requestToJoinResponseProto.IndexCid", zap.String("communityID", community.IDString()), zap.String("cid", cid)) - requestToJoinResponseProto.IndexCid = cid + if m.archiveManager.IsSeedingHistoryArchiveCodex(community.ID(), cid) { + m.logger.Debug("[CODEX][acceptRequestToJoinCommunity] setting requestToJoinResponseProto.IndexCid", zap.String("communityID", community.IDString()), zap.String("cid", cid)) + requestToJoinResponseProto.IndexCid = cid + } } payload, err := proto.Marshal(requestToJoinResponseProto) @@ -2856,11 +2858,15 @@ func (m *Messenger) EditCommunity(request *requests.EditCommunity) (*MessengerRe } id := community.ID() + currentIndexCid, err := m.communitiesManager.GetLastSeenIndexCid(id) + if err != nil { + return nil, err + } if m.archiveManager.IsReady() { if !communitySettings.HistoryArchiveSupportEnabled { m.archiveManager.StopHistoryArchiveTasksInterval(id) - } else if !m.archiveManager.IsSeedingHistoryArchiveTorrent(id) && !m.archiveManager.IsSeedingHistoryArchiveCodex(id) { + } else if !m.archiveManager.IsSeedingHistoryArchiveTorrent(id) && !m.archiveManager.IsSeedingHistoryArchiveCodex(id, currentIndexCid) { var communities []*communities.Community communities = append(communities, community) go m.InitHistoryArchiveTasks(communities) @@ -3846,9 +3852,15 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community if preference == params.ArchiveDistributionMethodCodex { // Check if there's already a codex file for this community and seed it - err = m.archiveManager.SeedHistoryArchiveIndexCid(c.ID()) + currenctIndexCid, err := m.communitiesManager.GetLastSeenIndexCid(c.ID()) if err != nil { - m.logger.Error("failed to seed history archive", zap.Error(err)) + m.logger.Error("[CODEX][init_history_archive_tasks] failed to get last seen index cid", zap.Error(err)) + } + if err == nil { + err = m.archiveManager.SeedHistoryArchiveIndexCid(c.ID(), currenctIndexCid) + if err != nil { + m.logger.Error("[CODEX][init_history_archive_tasks] failed to seed history archive", zap.Error(err)) + } } } @@ -3929,9 +3941,14 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community } } if preference == params.ArchiveDistributionMethodCodex { - err := m.archiveManager.SeedHistoryArchiveIndexCid(c.ID()) + currenctIndexCid, err := m.communitiesManager.GetLastSeenIndexCid(c.ID()) if err != nil { - m.logger.Error("failed to seed history archive", zap.Error(err)) + m.logger.Error("[CODEX][init_history_archive_tasks] failed to get last seen index cid", zap.Error(err)) + } else { + err := m.archiveManager.SeedHistoryArchiveIndexCid(c.ID(), currenctIndexCid) + if err != nil { + m.logger.Error("[CODEX][init_history_archive_tasks] failed to seed history archive", zap.Error(err)) + } } } // we do not have to explicitly seed to codex. If codex is enabled @@ -4024,7 +4041,12 @@ func (m *Messenger) resumeHistoryArchivesImport(communityID types.HexBytes) erro go func() { defer gocommon.LogOnPanic() defer task.Waiter.Done() - err := m.importHistoryArchives(communityID, task.CancelChan) + lastSeenIndexCid, err := m.communitiesManager.GetLastSeenIndexCid(communityID) + if err != nil { + m.logger.Error("failed to get last seen index cid", zap.Error(err)) + return + } + err = m.importHistoryArchives(communityID, task.CancelChan, lastSeenIndexCid) if err != nil { m.logger.Error("failed to import history archives", zap.Error(err)) } @@ -4041,7 +4063,7 @@ func (m *Messenger) SlowdownArchivesImport() { m.importRateLimiter.SetLimit(rate.Every(importSlowRate)) } -func (m *Messenger) importHistoryArchives(communityID types.HexBytes, cancel chan struct{}) error { +func (m *Messenger) importHistoryArchives(communityID types.HexBytes, cancel chan struct{}, indexCid string) error { importTicker := time.NewTicker(100 * time.Millisecond) defer importTicker.Stop() @@ -4052,6 +4074,20 @@ func (m *Messenger) importHistoryArchives(communityID types.HexBytes, cancel cha cancelFunc() }() + preference, err := m.GetArchiveDistributionPreference() + if err != nil { + m.logger.Warn("[CODEX][importHistoryArchives] failed to get archive distribution preference, using codex", zap.Error(err)) + preference = communities.ArchiveDistributionMethodCodex + } + var codexIndex *protobuf.CodexWakuMessageArchiveIndex + if preference == communities.ArchiveDistributionMethodCodex { + codexIndex, err = m.archiveManager.CodexLoadHistoryArchiveIndex( + ctx, m.identity, communityID, indexCid, true) + if err != nil { + return err + } + } + m.logger.Debug("[CODEX][importHistoryArchives] waiting to start importing history archive messages (importDelayer.wait)", zap.String("communityID", types.EncodeHex(communityID))) // don't proceed until initial import delay has passed @@ -4072,7 +4108,7 @@ importMessageArchivesLoop: case <-ctx.Done(): m.logger.Debug("[CODEX][importHistoryArchives] interrupted importing history archive messages") return nil - case <-time.After(1 * time.Hour): + case <-time.After(m.ratchetNotFoundDelay): delayImport = false } } @@ -4104,19 +4140,16 @@ importMessageArchivesLoop: downloadedArchiveID := archiveIDsToImport[0] var archiveMessages []*protobuf.WakuMessage - preference, err := m.GetArchiveDistributionPreference() - if err != nil { - m.logger.Warn("[CODEX][importHistoryArchives] failed to get archive distribution preference, using codex", zap.Error(err)) - preference = communities.ArchiveDistributionMethodCodex - } + if preference == communities.ArchiveDistributionMethodCodex { m.logger.Debug("[CODEX][importHistoryArchives] using codex to extract messages") - archiveMessages, err = m.archiveManager.ExtractMessagesFromCodexHistoryArchive(communityID, downloadedArchiveID) + archiveMessages, err = m.archiveManager.ExtractMessagesFromCodexHistoryArchive(communityID, downloadedArchiveID, codexIndex) } else { archiveMessages, err = m.archiveManager.ExtractMessagesFromHistoryArchive(communityID, downloadedArchiveID) } if err != nil { if errors.Is(err, messagingtypes.ErrHashRatchetGroupIDNotFound) { + m.logger.Error("[CODEX][importHistoryArchives] ErrHashRatchetGroupIDNotFound", zap.Error(err)) // In case we're missing hash ratchet keys, best we can do is // to wait for them to be received and try import again. delayImport = true @@ -4211,7 +4244,7 @@ func (m *Messenger) dispatchIndexCidMessage(communityID string) error { return err } - indexCid, err := m.archiveManager.GetHistoryArchiveIndexCid(community.ID()) + indexCid, err := m.communitiesManager.GetLastSeenIndexCid(community.ID()) if err != nil { return err } @@ -5291,8 +5324,12 @@ func (m *Messenger) GetArchiveDistributionPreference() (string, error) { return m.communitiesManager.GetArchiveDistributionPreference() } -func (m *Messenger) CodexIndexCidFileExists(communityID types.HexBytes) bool { - return m.archiveManager.CodexIndexCidFileExists(communityID) +func (m *Messenger) IsSeedingHistoryArchiveCodex(communityID types.HexBytes) bool { + currentIndexCid, err := m.communitiesManager.GetLastSeenIndexCid(communityID) + if err != nil { + return false + } + return m.archiveManager.IsSeedingHistoryArchiveCodex(communityID, currentIndexCid) } func (m *Messenger) Connect(peerId string, addrs []string) error { diff --git a/protocol/messenger_communities_import_discord.go b/protocol/messenger_communities_import_discord.go index 030f32ebbfd..f5768aa46cc 100644 --- a/protocol/messenger_communities_import_discord.go +++ b/protocol/messenger_communities_import_discord.go @@ -1021,9 +1021,14 @@ func (m *Messenger) RequestImportDiscordChannel(request *requests.ImportDiscordC } if m.archiveManager.IsCodexReady() && communitySettings.HistoryArchiveSupportEnabled { - err = m.archiveManager.SeedHistoryArchiveIndexCid(request.CommunityID) + lastSeenIndexCid, err := m.communitiesManager.GetLastSeenIndexCid(request.CommunityID) if err != nil { - m.logger.Error("[CODEX][RequestImportDiscordChannel] failed to seed history archive index cid", zap.Error(err)) + m.logger.Error("[CODEX][RequestImportDiscordChannel] failed to get last seen index cid", zap.Error(err)) + } else { + err = m.archiveManager.SeedHistoryArchiveIndexCid(request.CommunityID, lastSeenIndexCid) + if err != nil { + m.logger.Error("[CODEX][RequestImportDiscordChannel] failed to seed history archive index cid", zap.Error(err), zap.String("indexCid", lastSeenIndexCid)) + } } } @@ -1831,9 +1836,14 @@ func (m *Messenger) RequestImportDiscordCommunity(request *requests.ImportDiscor } if m.archiveManager.IsCodexReady() && communitySettings.HistoryArchiveSupportEnabled { - err = m.archiveManager.SeedHistoryArchiveIndexCid(discordCommunity.ID()) + lastSeenIndexCid, err := m.communitiesManager.GetLastSeenIndexCid(discordCommunity.ID()) if err != nil { - m.logger.Error("[CODEX][RequestImportDiscordCommunity] failed to seed history archive index cid", zap.Error(err)) + m.logger.Error("[CODEX][RequestImportDiscordCommunity] failed to get last seen index cid", zap.Error(err)) + } else { + err = m.archiveManager.SeedHistoryArchiveIndexCid(discordCommunity.ID(), lastSeenIndexCid) + if err != nil { + m.logger.Error("[CODEX][RequestImportDiscordCommunity] failed to seed history archive index cid", zap.Error(err), zap.String("indexCid", lastSeenIndexCid)) + } } } diff --git a/protocol/messenger_handler.go b/protocol/messenger_handler.go index 6a6ca53cacf..9e184e7b5e6 100644 --- a/protocol/messenger_handler.go +++ b/protocol/messenger_handler.go @@ -1363,7 +1363,7 @@ func (m *Messenger) HandleHistoryArchiveIndexCidMessage(state *ReceivedMessageSt // All checks passed - proceed with download m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Unseeding existing history archive index CID for community (if any)", zap.String("communityID", community.IDString())) - m.archiveManager.UnseedHistoryArchiveIndexCid(id) + m.archiveManager.UnseedHistoryArchiveIndexCid(id, lastSeenCid) currentTask := m.archiveManager.GetHistoryArchiveDownloadTask(id.String()) m.logger.Debug("[CODEX][HandleHistoryArchiveIndexCidMessage] Starting download and import of history archives", zap.String("cid", cid)) @@ -1439,7 +1439,7 @@ func (m *Messenger) downloadAndImportHistoryArchives(id types.HexBytes, magnetli return } - err = m.importHistoryArchives(id, cancel) + err = m.importHistoryArchives(id, cancel, "") if err != nil { m.logger.Error("failed to import history archives", zap.Error(err)) m.config.messengerSignalsHandler.DownloadingHistoryArchivesFinished(types.EncodeHex(id)) @@ -1452,33 +1452,32 @@ func (m *Messenger) downloadAndImportHistoryArchives(id types.HexBytes, magnetli func (m *Messenger) downloadAndImportCodexHistoryArchives(id types.HexBytes, indexCid string, cancel chan struct{}) { downloadTaskInfo, err := m.archiveManager.DownloadHistoryArchivesByIndexCid(id, indexCid, cancel) if err != nil { - logMsg := "[CODEX][downloadAndImportCodexHistoryArchives] failed to download history archive data" - if err == communities.ErrIndexCidTimedout { - m.logger.Debug("[CODEX][downloadAndImportCodexHistoryArchives] downloading indexCid has timed out, trying once more...") - downloadTaskInfo, err = m.archiveManager.DownloadHistoryArchivesByIndexCid(id, indexCid, cancel) - if err != nil { - m.logger.Error(logMsg, zap.Error(err)) - return - } - } else { - m.logger.Debug(logMsg, zap.Error(err)) - return - } + m.logger.Error( + "[CODEX][downloadAndImportCodexHistoryArchives] failed to download history archive data", + zap.Error(err), + ) + return } if downloadTaskInfo.Cancelled { if downloadTaskInfo.TotalDownloadedArchivesCount > 0 { m.logger.Debug(fmt.Sprintf("[CODEX][downloadAndImportCodexHistoryArchives] downloaded %d of %d archives so far", downloadTaskInfo.TotalDownloadedArchivesCount, downloadTaskInfo.TotalArchivesCount)) } - m.archiveManager.UnseedHistoryArchiveIndexCid(id) + m.archiveManager.UnseedHistoryArchiveIndexCid(id, indexCid) return } + m.logger.Debug("[CODEX][download_and_import_codex_history_archives] Updating last seen indexCid", + zap.String("indexCid", indexCid)) err = m.communitiesManager.UpdateLastSeenIndexCid(id, indexCid) if err != nil { - m.logger.Error("[CODEX][downloadAndImportCodexHistoryArchives] couldn't update last seen indexCid", zap.Error(err)) + m.logger.Error("[CODEX][download_and_import_codex_history_archives] couldn't update last seen indexCid", + zap.String("indexCid", indexCid), zap.Error(err)) + return } + m.archiveManager.PublishHistoryArchivesSeedingSignal(id, false, true) + err = m.checkIfIMemberOfCommunity(id) if err != nil { return @@ -1486,7 +1485,7 @@ func (m *Messenger) downloadAndImportCodexHistoryArchives(id types.HexBytes, ind m.logger.Debug("[CODEX][downloadAndImportCodexHistoryArchives] Importing history archives now") - err = m.importHistoryArchives(id, cancel) + err = m.importHistoryArchives(id, cancel, indexCid) if err != nil { m.logger.Error("[CODEX][downloadAndImportCodexHistoryArchives] failed to import history archives", zap.Error(err)) m.config.messengerSignalsHandler.DownloadingHistoryArchivesFinished(types.EncodeHex(id)) diff --git a/protocol/messenger_handler_test.go b/protocol/messenger_handler_test.go index c1b2936745d..5e6d6cbbdf8 100644 --- a/protocol/messenger_handler_test.go +++ b/protocol/messenger_handler_test.go @@ -213,8 +213,7 @@ func (s *EventToSystemMessageSuite) TestHandleHistoryArchiveIndexCidMessageWithC s.Require().NoError(err) s.m.archiveManager.SetCodexConfig(¶ms.CodexConfig{ - Enabled: true, - HistoryArchiveDataDir: filepath.Join(s.T().TempDir(), "codex", "archivedata"), + Enabled: true, CodexNodeConfig: codex.Config{ DataDir: filepath.Join(s.T().TempDir(), "codex", "codexdata"), LogFormat: codex.LogFormatNoColors, diff --git a/services/ext/api.go b/services/ext/api.go index 6fcb39549fa..452727b693b 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -1606,7 +1606,7 @@ func (api *PublicAPI) PeerID() string { } func (m *PublicAPI) HasCommunityArchive(communityID types.HexBytes) bool { - return m.service.messenger.CodexIndexCidFileExists(communityID) + return m.service.messenger.IsSeedingHistoryArchiveCodex(communityID) } func (m *PublicAPI) Connect(peerId string, addrs []string) error { diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 83847c0c833..8bd21db7055 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -110,9 +110,9 @@ def test_community_archive_index_exists(self): logging.info("Checking that community owner has local index CID file...") - # Ensure that the community archive index exists in the file system of the community owner. - # We test this by checking the corresponding archive index CID file exists. - # This index CID file contains the Codex CID of the archive index. + # Ensure that the community archive index is being seeded by the community owner. + # has_community_archive returns true if lastSeenIndexCid from DB is not empty + # and HasCid on the CodexClient returns true. has_archive_index = self.creator.wakuext_service.has_community_archive(community_id) assert has_archive_index is True, "Creator should have community archive index after messages are sent" @@ -122,31 +122,26 @@ def test_community_archive_index_exists(self): # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds archive_timeout = 10 - logging.info("Waiting for community member to download manifest of the archive index...") - # Wait for the community member to download the archive index manifest - self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_MANIFEST_FETCHED.value, timeout=archive_timeout) - logging.info("Success! Manifest of the archive index fetched!") - - # When wait for index download completed signal - at this stage the index and index CID files - # should both exist in the file system of the member. + # Wait for index download completed signal. This signal is emitted + # immediately after archive index is downloaded from Codex node. logging.info("Waiting for community member to download archive index...") self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) logging.info("Success! Archive index downloaded!") - # Ensure that the community archive index CID file exists in the file system for the member. - # After successfully downloading the archive index, its CID is stored in the the - # index CID file and the file is written immediately after the archive index has been downloaded. - # Notice that at this stage, the node still does not have any single archive downloaded. + # The HistoryArchivesSeedingSignal is emitted right after all archives + # are downloaded to the Codex node and the corresponding index CID is + # recorded in the database as "lastSeenIndexCid". + logging.info("Waiting for community member to download ALL history archives...") + self.member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) + logging.info("Success! Community member has downloaded ALL history archives!") + + # The archive index should be "seeding": index CID in the database and + # HasCid on the CodexClient returns true. logging.info("Verifying that community member has index CID file...") has_archive_index = self.member.wakuext_service.has_community_archive(community_id) assert has_archive_index is True, "Member should have community archive index after messages are sent" logging.info("Success! Community member has index CID file!") - # Wait for the community archives to be downloaded for the first member. - logging.info("Waiting for community member to download ALL history archives...") - self.member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) - logging.info("Success! Community member has downloaded ALL history archives!") - # Once the historyArchivesSeeding signal is received, the database # should be already updated: archive ID (HASH) should be stored in the database. logging.info("Verifying that archive ID (HASH) is recorded in the database...") @@ -170,30 +165,23 @@ def test_community_archive_index_exists(self): assert message is None, "Another member should not have messages before archive is dispatched, downloaded and imported" logging.info("Verified that another member does not have the message before archive import.") - # Wait for another community member to download the archive index manifest - logging.info("Waiting for another member to download manifest of the archive index...") - self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_MANIFEST_FETCHED.value, timeout=archive_timeout) - logging.info("Success! Manifest of the archive index fetched by another member!") - - # Then wait for index download completed signal - at this stage the index and index CID files - # should both exist in the file system of another member. + # Wait for index download completed signal - index should be now downloaded + # for Codex. logging.info("Waiting for another member to download archive index...") self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) logging.info("Success! Archive index downloaded by another member!") - # Ensure that the community archive index exists in the file system of another member - logging.info("Verifying that another member has index CID file...") - has_archive_index = self.another_member.wakuext_service.has_community_archive(community_id) - assert has_archive_index is True, "Another member should have community archive index after messages are sent" - logging.info("Success! Another member has index CID file.") - - # Wait for the community archives to be downloaded for another member. + # Wait for seeding signal - all archives should be now downloaded to Codex node + # and index should be seeding. logging.info("Waiting for another member to download ALL history archives...") self.another_member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) logging.info("Success! Another member has downloaded ALL history archives!") - # Wait for the archive to be downloaded by another member - # self.another_member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) + # IndexCid in the database and HasCid on the CodexClient returns true (seeding). + logging.info("Verifying that another member has index CID file...") + has_archive_index = self.another_member.wakuext_service.has_community_archive(community_id) + assert has_archive_index is True, "Another member should have community archive index after messages are sent" + logging.info("Success! Another member has index CID file.") # Ensure that another member has downloaded the community archive and stored its ID in database logging.info("Verifying that another member has archive ID (HASH) recorded in the database...") @@ -244,7 +232,7 @@ def test_community_archive_exists_for_default_chat(self): # Wait for the community archive to be created for the community owner self.creator.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_CREATED.value, timeout=message_archive_interval + 10) - # Ensure that the community archive exists in the file system for the community owner + # Ensure that the archive index is seeding. has_archive = self.creator.wakuext_service.has_community_archive(community_id) assert has_archive is True, "Creator should have community archive after messages are sent" @@ -258,7 +246,7 @@ def test_archive_is_not_created_without_messages(self): time.sleep(message_archive_interval + 10) - # Ensure that the community archive exists in the file system for the community owner + # Ensure that no archive index is seeding. has_archive = self.creator.wakuext_service.has_community_archive(community_id) assert has_archive is False, "Creator should not have community archive without message" @@ -286,31 +274,28 @@ def test_different_archives_are_created_with_multiple_messages(self): # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds archive_timeout = 10 - # When wait for index download completed signal - at this stage the index and index CID files - # should both exist in the file system of the member. + # Wait for the archive index to be downloaded from Codex node. logging.info("Waiting for community member to download archive index...") self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) logging.info("Success! Archive index downloaded!") - # Ensure that the community archive index CID file exists in the file system for the member. - # After successfully downloading the archive index, its CID is stored in the the - # index CID file and the file is written immediately after the archive index has been downloaded. - # Notice that at this stage, the node still does not have any single archive downloaded. + # Wait for the seeding signal. + logging.info("Waiting for community member to download ALL history archives...") + self.member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) + logging.info("Success! Community member has downloaded ALL history archives!") + + # The archive index should be "seeding": index CID in the database and + # HasCid on the CodexClient returns true. logging.info("Verifying that community member has index CID file...") has_archive_index = self.member.wakuext_service.has_community_archive(community_id) assert has_archive_index is True, "Member should have community archive index after messages are sent" logging.info("Success! Community member has index CID file!") - # Wait for the community archives to be downloaded for the first member. - logging.info("Waiting for community member to download ALL history archives...") - self.member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) - logging.info("Success! Community member has downloaded ALL history archives!") - # Once the historyArchivesSeeding signal is received, the database # should be already updated: archive ID (HASH) should be stored in the database. logging.info("Verifying that archive ID (HASH) is recorded in the database...") download_archive_ids = self.member.wakuext_service.get_downloaded_message_archive_ids(community_id) - assert len(download_archive_ids) == i + 1, "Member should have exactly 1 archive ID downloaded" + assert len(download_archive_ids) == i + 1, f"Member should have exactly {i+1} archive IDs downloaded" logging.info("Success! Archive ID (HASH) is recorded in the database!") def test_archive_is_downloaded_after_logout_login(self): @@ -363,21 +348,17 @@ def test_archive_is_downloaded_after_logout_login(self): # We need to wait for the archive dispatch + download + import which should not take more than 10 seconds archive_timeout = 20 - logging.info("Waiting for community member to download manifest of the archive index...") - # Wait for the community member to download the archive index manifest - self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_MANIFEST_FETCHED.value, timeout=archive_timeout) - logging.info("Success! Manifest of the archive index fetched!") - - # When wait for index download completed signal - at this stage the index and index CID files - # should both exist in the file system of the member. + # When wait for index download completed signal. logging.info("Waiting for community member to download archive index...") self.member.wait_for_signal(SignalType.COMMUNITY_ARCHIVE_INDEX_DOWNLOAD_COMPLETED.value, timeout=archive_timeout) logging.info("Success! Archive index downloaded!") - # Ensure that the community archive index CID file exists in the file system for the member. - # After successfully downloading the archive index, its CID is stored in the the - # index CID file and the file is written immediately after the archive index has been downloaded. - # Notice that at this stage, the node still does not have any single archive downloaded. + # Wait for the seeding signal. + logging.info("Waiting for community member to download ALL history archives...") + self.member.wait_for_signal(SignalType.COMMUNITY_HISTORY_ARCHIVES_SEEDING.value, timeout=archive_timeout) + logging.info("Success! Community member has downloaded ALL history archives!") + + # Confirm that the archive index is seeding. logging.info("Verifying that community member has index CID file...") has_archive_index = self.member.wakuext_service.has_community_archive(community_id) assert has_archive_index is True, "Member should have community archive index after messages are sent" From 41186b38b1a376803ca7d12a2ccbd53fe1b8cb30 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 18 Nov 2025 19:22:33 +0100 Subject: [PATCH 75/75] Add more comments --- .../tests/test_wakuext_community_archives.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests-functional/tests/test_wakuext_community_archives.py b/tests-functional/tests/test_wakuext_community_archives.py index 8bd21db7055..2b25dada340 100644 --- a/tests-functional/tests/test_wakuext_community_archives.py +++ b/tests-functional/tests/test_wakuext_community_archives.py @@ -69,6 +69,10 @@ def setup_backends(self, backend_new_profile): } def test_community_archive_index_exists(self): + # Set message archive interval to 80 seconds which is longer that the retention policy + # of the Waky node. + # So we are expecting to retrieve the archive from Codex even after the Waky node + # has already deleted the messages locally. message_archive_interval = 80 self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) @@ -212,6 +216,8 @@ def test_community_archive_index_exists(self): logging.info("Success! Another member has the message after importing history archive.") def test_community_archive_exists_for_default_chat(self): + # Set message archive interval to 10 seconds for faster test, + # we only want to check that the archive is created for the default chat. message_archive_interval = 10 self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) @@ -237,6 +243,8 @@ def test_community_archive_exists_for_default_chat(self): assert has_archive is True, "Creator should have community archive after messages are sent" def test_archive_is_not_created_without_messages(self): + # Set message archive interval to 10 seconds for faster test, + # we only want to check that no archive is created when there is no message. message_archive_interval = 10 self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) @@ -251,6 +259,9 @@ def test_archive_is_not_created_without_messages(self): assert has_archive is False, "Creator should not have community archive without message" def test_different_archives_are_created_with_multiple_messages(self): + # Set message archive interval to 10 seconds for faster test. + # We want to check that different archives are created for multiple messages, + # so it does not matter if the Waku stode node has the messages locally. message_archive_interval = 10 self.creator.wakuext_service.update_message_archive_interval(message_archive_interval) @@ -299,6 +310,10 @@ def test_different_archives_are_created_with_multiple_messages(self): logging.info("Success! Archive ID (HASH) is recorded in the database!") def test_archive_is_downloaded_after_logout_login(self): + # Set message archive interval to 10 seconds for faster test. + # We want to check that the archive is downloaded after logout/login, + # so it does not matter if the Waku stode node has the messages locally. + message_archive_interval = 10 self.creator.wakuext_service.update_message_archive_interval(message_archive_interval)