Skip to content

Commit 5336b7f

Browse files
committed
fs_store and InMemory/repo.rs had different visions of what compact should do. I believe this rectifies them.
1 parent 6c221cd commit 5336b7f

File tree

7 files changed

+28
-36
lines changed

7 files changed

+28
-36
lines changed

src/fs_store.rs

+8-31
Original file line numberDiff line numberDiff line change
@@ -151,36 +151,30 @@ impl FsStore {
151151
Ok(())
152152
}
153153

154-
pub fn compact(&self, id: &DocumentId, full_doc: &[u8]) -> Result<(), Error> {
154+
pub fn compact(
155+
&self,
156+
id: &DocumentId,
157+
full_doc: &[u8],
158+
new_heads: Vec<ChangeHash>,
159+
) -> Result<(), Error> {
155160
let paths = DocIdPaths::from(id);
156161

157162
// Load all the data we have into a doc
158163
match Chunks::load(&self.root, id) {
159164
Ok(Some(chunks)) => {
160-
let doc = chunks
161-
.to_doc()
162-
.map_err(|e| Error(ErrorKind::LoadDocToCompact(e)))?;
163-
164165
// Write the snapshot
165-
let output_chunk_name = SavedChunkName::new_snapshot(doc.get_heads());
166-
let chunk = doc.save();
167-
write_chunk(&self.root, &paths, &chunk, output_chunk_name.clone())?;
166+
let output_chunk_name = SavedChunkName::new_snapshot(new_heads);
167+
write_chunk(&self.root, &paths, full_doc, output_chunk_name.clone())?;
168168

169169
// Remove all the old data
170170
for incremental in chunks.incrementals.keys() {
171171
let path = paths.chunk_path(&self.root, incremental);
172172
std::fs::remove_file(&path)
173173
.map_err(|e| Error(ErrorKind::DeleteChunk(path, e)))?;
174174
}
175-
let just_wrote = paths.chunk_path(&self.root, &output_chunk_name);
176175
for snapshot in chunks.snapshots.keys() {
177176
let path = paths.chunk_path(&self.root, snapshot);
178177

179-
if path == just_wrote {
180-
tracing::trace!("Somehow trying to delete the same path we just wrote to. Not today Satan");
181-
continue;
182-
}
183-
184178
std::fs::remove_file(&path)
185179
.map_err(|e| Error(ErrorKind::DeleteChunk(path, e)))?;
186180
}
@@ -441,21 +435,6 @@ impl Chunks {
441435
incrementals,
442436
}))
443437
}
444-
445-
fn to_doc(&self) -> Result<automerge::Automerge, automerge::AutomergeError> {
446-
let mut bytes = Vec::new();
447-
for chunk in self.snapshots.values() {
448-
bytes.extend(chunk);
449-
}
450-
for chunk in self.incrementals.values() {
451-
bytes.extend(chunk);
452-
}
453-
454-
automerge::Automerge::load_with_options(
455-
&bytes,
456-
automerge::LoadOptions::new().on_partial_load(automerge::OnPartialLoad::Ignore),
457-
)
458-
}
459438
}
460439

461440
mod error {
@@ -499,8 +478,6 @@ mod error {
499478
ErrReadingChunkFile(PathBuf, std::io::Error),
500479
#[error("error creating level 2 path {0}: {1}")]
501480
CreateLevel2Path(PathBuf, std::io::Error),
502-
#[error("error loading doc to compact: {0}")]
503-
LoadDocToCompact(automerge::AutomergeError),
504481
#[error("error creating temp file: {0}")]
505482
CreateTempFile(std::io::Error),
506483
#[error("error writing temp file {0}: {1}")]

src/interfaces.rs

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use automerge::ChangeHash;
12
use futures::future::BoxFuture;
23
use serde::{Deserialize, Serialize};
34
use std::fmt::{Display, Formatter};
@@ -113,5 +114,6 @@ pub trait Storage: Send {
113114
&self,
114115
_id: DocumentId,
115116
_full_doc: Vec<u8>,
117+
_new_heads: Vec<ChangeHash>,
116118
) -> BoxFuture<'static, Result<(), StorageError>>;
117119
}

src/repo.rs

+4-1
Original file line numberDiff line numberDiff line change
@@ -743,7 +743,10 @@ impl DocumentInfo {
743743
(doc.automerge.save(), doc.automerge.get_heads())
744744
};
745745
self.patches_since_last_compact = 0;
746-
(storage.compact(document_id.clone(), to_save), new_heads)
746+
(
747+
storage.compact(document_id.clone(), to_save, new_heads.clone()),
748+
new_heads,
749+
)
747750
} else {
748751
let (to_save, new_heads) = {
749752
let doc = self.document.read();

src/tokio/fs_storage.rs

+7-1
Original file line numberDiff line numberDiff line change
@@ -78,11 +78,17 @@ impl Storage for FsStorage {
7878
&self,
7979
id: crate::DocumentId,
8080
full_doc: Vec<u8>,
81+
new_heads: Vec<automerge::ChangeHash>,
8182
) -> BoxFuture<'static, Result<(), StorageError>> {
8283
let inner = Arc::clone(&self.inner);
8384
let inner_id = id.clone();
8485
self.handle
85-
.spawn_blocking(move || inner.lock().unwrap().compact(&inner_id, &full_doc))
86+
.spawn_blocking(move || {
87+
inner
88+
.lock()
89+
.unwrap()
90+
.compact(&inner_id, &full_doc, &new_heads)
91+
})
8692
.map(handle_joinerror)
8793
.map_err(move |e| {
8894
tracing::error!(err=?e, doc=?id, "error compacting chunk to filesystem");

test_utils/src/storage_utils.rs

+4
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use automerge::ChangeHash;
12
use automerge_repo::{DocumentId, Storage, StorageError};
23
use futures::future::{BoxFuture, TryFutureExt};
34
use futures::FutureExt;
@@ -30,6 +31,7 @@ impl Storage for SimpleStorage {
3031
&self,
3132
_id: DocumentId,
3233
_chunk: Vec<u8>,
34+
_new_heads: Vec<ChangeHash>,
3335
) -> BoxFuture<'static, Result<(), StorageError>> {
3436
futures::future::ready(Ok(())).boxed()
3537
}
@@ -76,6 +78,7 @@ impl Storage for InMemoryStorage {
7678
&self,
7779
id: DocumentId,
7880
full_doc: Vec<u8>,
81+
_new_heads: Vec<ChangeHash>,
7982
) -> BoxFuture<'static, Result<(), StorageError>> {
8083
let mut documents = self.documents.lock();
8184
documents.insert(id, full_doc);
@@ -202,6 +205,7 @@ impl Storage for AsyncInMemoryStorage {
202205
&self,
203206
id: DocumentId,
204207
full_doc: Vec<u8>,
208+
_new_heads: Vec<ChangeHash>,
205209
) -> BoxFuture<'static, Result<(), StorageError>> {
206210
let (tx, rx) = oneshot();
207211
self.chan

tests/document_changed.rs

-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ fn test_document_changed_over_sync() {
8787

8888
// Spawn a task that awaits the document change.
8989
let (done_sync_sender, mut done_sync_receiver) = channel(1);
90-
let repo_id = repo_handle_1.get_repo_id().clone();
9190
rt.spawn(async move {
9291
loop {
9392
// Await changes until the edit comes through over sync.

tests/fs_storage/main.rs

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
use automerge::transaction::Transactable;
22
use automerge_repo::fs_store;
33
use itertools::Itertools;
4-
use uuid::Uuid;
54

65
/// Asserts that the &[u8] in `data` is some permutation of the chunks of Vec<&[u8> in `expected`
76
macro_rules! assert_permutation_of {
@@ -45,7 +44,9 @@ fn fs_store_crud() {
4544
assert_permutation_of!(result, vec![change1.bytes(), change2.bytes()]);
4645

4746
// now compact
48-
store.compact(&doc_id, &[]).unwrap();
47+
store
48+
.compact(&doc_id, &doc.save(), doc.get_heads())
49+
.unwrap();
4950
let result = store.get(&doc_id).unwrap().unwrap();
5051
let expected = doc.save();
5152
assert_eq!(result, expected);

0 commit comments

Comments
 (0)