From bab4d005d748568c1e6c8563ada0d56646925613 Mon Sep 17 00:00:00 2001
From: Alexander Medvedev <lilalexmed@proton.me>
Date: Mon, 17 Mar 2025 21:31:35 +0100
Subject: [PATCH 1/3] entity saving

---
 .../src/client/play/chunk_data.rs             |   2 +-
 pumpkin-world/src/block/state.rs              |   2 +-
 pumpkin-world/src/chunk/format/mod.rs         | 167 -----
 pumpkin-world/src/generation/generator.rs     |   2 +-
 .../src/generation/generic_generator.rs       |   3 +-
 .../src/generation/implementation/test.rs     |   3 +-
 pumpkin-world/src/level.rs                    |  43 +-
 pumpkin-world/src/lib.rs                      |   2 +-
 .../src/storage/format/anvil/chunk.rs         | 303 ++++++++
 .../src/storage/format/anvil/entity.rs        |  97 +++
 .../anvil.rs => storage/format/anvil/mod.rs}  | 650 +++++++-----------
 .../src/{chunk => storage}/format/linear.rs   |  27 +-
 pumpkin-world/src/storage/format/mod.rs       | 118 ++++
 .../io/chunk_file_manager.rs                  |   2 +-
 .../src/{chunk => storage}/io/mod.rs          |   2 +-
 pumpkin-world/src/{chunk => storage}/mod.rs   |   3 +-
 pumpkin-world/src/world_info/anvil.rs         |   2 +
 .../src/plugin/api/events/world/chunk_load.rs |   2 +-
 .../src/plugin/api/events/world/chunk_save.rs |   2 +-
 .../src/plugin/api/events/world/chunk_send.rs |   2 +-
 pumpkin/src/world/mod.rs                      |   2 +-
 21 files changed, 825 insertions(+), 611 deletions(-)
 delete mode 100644 pumpkin-world/src/chunk/format/mod.rs
 create mode 100644 pumpkin-world/src/storage/format/anvil/chunk.rs
 create mode 100644 pumpkin-world/src/storage/format/anvil/entity.rs
 rename pumpkin-world/src/{chunk/format/anvil.rs => storage/format/anvil/mod.rs} (81%)
 rename pumpkin-world/src/{chunk => storage}/format/linear.rs (95%)
 create mode 100644 pumpkin-world/src/storage/format/mod.rs
 rename pumpkin-world/src/{chunk => storage}/io/chunk_file_manager.rs (99%)
 rename pumpkin-world/src/{chunk => storage}/io/mod.rs (98%)
 rename pumpkin-world/src/{chunk => storage}/mod.rs (99%)

diff --git a/pumpkin-protocol/src/client/play/chunk_data.rs b/pumpkin-protocol/src/client/play/chunk_data.rs
index cb89f8c35..ca7057a68 100644
--- a/pumpkin-protocol/src/client/play/chunk_data.rs
+++ b/pumpkin-protocol/src/client/play/chunk_data.rs
@@ -5,7 +5,7 @@ use pumpkin_data::packet::clientbound::PLAY_LEVEL_CHUNK_WITH_LIGHT;
 use pumpkin_macros::packet;
 use pumpkin_world::{
     DIRECT_PALETTE_BITS,
-    chunk::{ChunkData, SUBCHUNKS_COUNT},
+    storage::{ChunkData, SUBCHUNKS_COUNT},
 };
 
 #[packet(PLAY_LEVEL_CHUNK_WITH_LIGHT)]
diff --git a/pumpkin-world/src/block/state.rs b/pumpkin-world/src/block/state.rs
index edb2f80df..13c344ca5 100644
--- a/pumpkin-world/src/block/state.rs
+++ b/pumpkin-world/src/block/state.rs
@@ -1,4 +1,4 @@
-use crate::chunk::format::PaletteEntry;
+use crate::storage::format::PaletteEntry;
 
 use super::registry::{get_block, get_state_by_state_id};
 
diff --git a/pumpkin-world/src/chunk/format/mod.rs b/pumpkin-world/src/chunk/format/mod.rs
deleted file mode 100644
index 69b5832d0..000000000
--- a/pumpkin-world/src/chunk/format/mod.rs
+++ /dev/null
@@ -1,167 +0,0 @@
-use std::collections::HashMap;
-
-use pumpkin_data::chunk::ChunkStatus;
-use pumpkin_nbt::{from_bytes, nbt_long_array};
-
-use pumpkin_util::math::{ceil_log2, vector2::Vector2};
-use serde::{Deserialize, Serialize};
-
-use crate::{
-    block::ChunkBlockState,
-    coordinates::{ChunkRelativeBlockCoordinates, Height},
-};
-
-use super::{
-    CHUNK_AREA, ChunkBlocks, ChunkData, ChunkHeightmaps, ChunkParsingError, SUBCHUNK_VOLUME,
-};
-
-pub mod anvil;
-pub mod linear;
-
-// I can't use an tag because it will break ChunkNBT, but status need to have a big S, so "Status"
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "PascalCase")]
-pub struct ChunkStatusWrapper {
-    status: ChunkStatus,
-}
-
-impl ChunkData {
-    pub fn from_bytes(
-        chunk_data: &[u8],
-        position: Vector2<i32>,
-    ) -> Result<Self, ChunkParsingError> {
-        // TODO: Implement chunk stages?
-        if from_bytes::<ChunkStatusWrapper>(chunk_data)
-            .map_err(ChunkParsingError::FailedReadStatus)?
-            .status
-            != ChunkStatus::Full
-        {
-            return Err(ChunkParsingError::ChunkNotGenerated);
-        }
-
-        let chunk_data = from_bytes::<ChunkNbt>(chunk_data)
-            .map_err(|e| ChunkParsingError::ErrorDeserializingChunk(e.to_string()))?;
-
-        if chunk_data.x_pos != position.x || chunk_data.z_pos != position.z {
-            return Err(ChunkParsingError::ErrorDeserializingChunk(format!(
-                "Expected data for chunk {},{} but got it for {},{}!",
-                position.x, position.z, chunk_data.x_pos, chunk_data.z_pos,
-            )));
-        }
-
-        // this needs to be boxed, otherwise it will cause a stack-overflow
-        let mut blocks = ChunkBlocks::Homogeneous(0);
-        let mut block_index = 0; // which block we're currently at
-
-        for section in chunk_data.sections.into_iter() {
-            let block_states = match section.block_states {
-                Some(states) => states,
-                None => continue, // TODO @lukas0008 this should instead fill all blocks with the only element of the palette
-            };
-
-            let palette = block_states
-                .palette
-                .iter()
-                .map(ChunkBlockState::from_palette)
-                .collect::<Vec<_>>();
-
-            let block_data = match block_states.data {
-                None => {
-                    // We skipped placing an empty subchunk.
-                    // We need to increase the y coordinate of the next subchunk being placed.
-                    block_index += SUBCHUNK_VOLUME;
-                    continue;
-                }
-                Some(d) => d,
-            };
-
-            // How many bits each block has in one of the palette u64s
-            let block_bit_size = if palette.len() < 16 {
-                4
-            } else {
-                ceil_log2(palette.len() as u32).max(4)
-            };
-            // How many blocks there are in one of the palettes u64s
-            let blocks_in_palette = 64 / block_bit_size;
-
-            let mask = (1 << block_bit_size) - 1;
-            'block_loop: for block in block_data.iter() {
-                for i in 0..blocks_in_palette {
-                    let index = (block >> (i * block_bit_size)) & mask;
-                    let block = &palette[index as usize];
-
-                    // TODO allow indexing blocks directly so we can just use block_index and save some time?
-                    // this is fine because we initialized the heightmap of `blocks`
-                    // from the cached value in the world file
-                    blocks.set_block_no_heightmap_update(
-                        ChunkRelativeBlockCoordinates {
-                            z: ((block_index % CHUNK_AREA) / 16).into(),
-                            y: Height::from_absolute((block_index / CHUNK_AREA) as u16),
-                            x: (block_index % 16).into(),
-                        },
-                        block.get_id(),
-                    );
-
-                    block_index += 1;
-
-                    // if `SUBCHUNK_VOLUME `is not divisible by `blocks_in_palette` the block_data
-                    // can sometimes spill into other subchunks. We avoid that by aborting early
-                    if (block_index % SUBCHUNK_VOLUME) == 0 {
-                        break 'block_loop;
-                    }
-                }
-            }
-        }
-
-        Ok(ChunkData {
-            blocks,
-            heightmap: chunk_data.heightmaps,
-            position,
-            // This chunk is read from disk, so it has not been modified
-            dirty: false,
-        })
-    }
-}
-
-#[derive(Serialize, Deserialize, Debug, Clone)]
-#[serde(rename_all = "PascalCase")]
-pub struct PaletteEntry {
-    // block name
-    pub name: String,
-    #[serde(skip_serializing_if = "Option::is_none")]
-    pub properties: Option<HashMap<String, String>>,
-}
-
-#[derive(Serialize, Deserialize, Debug)]
-struct ChunkSection {
-    #[serde(rename = "Y")]
-    y: i8,
-    #[serde(skip_serializing_if = "Option::is_none")]
-    block_states: Option<ChunkSectionBlockStates>,
-}
-
-#[derive(Serialize, Deserialize, Debug, Clone)]
-struct ChunkSectionBlockStates {
-    #[serde(
-        serialize_with = "nbt_long_array",
-        skip_serializing_if = "Option::is_none"
-    )]
-    data: Option<Box<[i64]>>,
-    palette: Vec<PaletteEntry>,
-}
-
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "PascalCase")]
-struct ChunkNbt {
-    data_version: i32,
-    #[serde(rename = "xPos")]
-    x_pos: i32,
-    // #[serde(rename = "yPos")]
-    //y_pos: i32,
-    #[serde(rename = "zPos")]
-    z_pos: i32,
-    status: ChunkStatus,
-    #[serde(rename = "sections")]
-    sections: Vec<ChunkSection>,
-    heightmaps: ChunkHeightmaps,
-}
diff --git a/pumpkin-world/src/generation/generator.rs b/pumpkin-world/src/generation/generator.rs
index e83ab134b..630dfac92 100644
--- a/pumpkin-world/src/generation/generator.rs
+++ b/pumpkin-world/src/generation/generator.rs
@@ -4,9 +4,9 @@ use pumpkin_util::math::vector2::Vector2;
 use pumpkin_util::math::vector3::Vector3;
 
 use crate::block::state::ChunkBlockState;
-use crate::chunk::{ChunkBlocks, ChunkData};
 use crate::coordinates::{BlockCoordinates, ChunkRelativeBlockCoordinates, XZBlockCoordinates};
 use crate::generation::Seed;
+use crate::storage::{ChunkBlocks, ChunkData};
 
 pub trait GeneratorInit {
     fn new(seed: Seed) -> Self;
diff --git a/pumpkin-world/src/generation/generic_generator.rs b/pumpkin-world/src/generation/generic_generator.rs
index 5e268e7e7..3a74a6323 100644
--- a/pumpkin-world/src/generation/generic_generator.rs
+++ b/pumpkin-world/src/generation/generic_generator.rs
@@ -3,8 +3,8 @@ use pumpkin_util::math::vector2::Vector2;
 
 use crate::{
     WORLD_LOWEST_Y,
-    chunk::{ChunkBlocks, ChunkData},
     coordinates::{ChunkRelativeBlockCoordinates, ChunkRelativeXZBlockCoordinates},
+    storage::{ChunkBlocks, ChunkData},
 };
 
 use super::{
@@ -76,6 +76,7 @@ impl<B: BiomeGenerator, T: PerlinTerrainGenerator> WorldGenerator for GenericGen
             blocks,
             heightmap: Default::default(),
             position: at,
+            entities: vec![], // TODO: chunks do have inital entities
             // We just generated this chunk! Mark it as dirty
             dirty: true,
         }
diff --git a/pumpkin-world/src/generation/implementation/test.rs b/pumpkin-world/src/generation/implementation/test.rs
index 8b0d01683..4dfbc3cc5 100644
--- a/pumpkin-world/src/generation/implementation/test.rs
+++ b/pumpkin-world/src/generation/implementation/test.rs
@@ -2,13 +2,13 @@ use pumpkin_util::math::{vector2::Vector2, vector3::Vector3};
 
 use crate::{
     WORLD_LOWEST_Y, WORLD_MAX_Y,
-    chunk::{ChunkBlocks, ChunkData},
     coordinates::ChunkRelativeBlockCoordinates,
     generation::{
         GlobalRandomConfig, Seed, WorldGenerator, generator::GeneratorInit,
         noise_router::proto_noise_router::GlobalProtoNoiseRouter, proto_chunk::ProtoChunk,
     },
     noise_router::NOISE_ROUTER_ASTS,
+    storage::{ChunkBlocks, ChunkData},
 };
 
 pub struct TestGenerator {
@@ -57,6 +57,7 @@ impl WorldGenerator for TestGenerator {
             blocks,
             heightmap: Default::default(),
             position: at,
+            entities: vec![], // TODO: chunks do have inital entities
             // This chunk was just created! We want to say its been changed
             dirty: true,
         }
diff --git a/pumpkin-world/src/level.rs b/pumpkin-world/src/level.rs
index 7343661d1..66467be4f 100644
--- a/pumpkin-world/src/level.rs
+++ b/pumpkin-world/src/level.rs
@@ -11,13 +11,16 @@ use tokio::{
 };
 
 use crate::{
-    chunk::{
+    generation::{Seed, WorldGenerator, get_world_gen},
+    lock::{LevelLocker, anvil::AnvilLevelLocker},
+    storage::{
         ChunkData, ChunkParsingError, ChunkReadingError,
-        format::{anvil::AnvilChunkFile, linear::LinearFile},
+        format::{
+            anvil::{AnvilChunkFile, chunk::AnvilChunkFormat},
+            linear::LinearFile,
+        },
         io::{ChunkIO, LoadedData, chunk_file_manager::ChunkFileManager},
     },
-    generation::{Seed, WorldGenerator, get_world_gen},
-    lock::{LevelLocker, anvil::AnvilLevelLocker},
     world_info::{
         LevelData, WorldInfoError, WorldInfoReader, WorldInfoWriter,
         anvil::{AnvilLevelInfo, LEVEL_DAT_BACKUP_FILE_NAME, LEVEL_DAT_FILE_NAME},
@@ -49,7 +52,7 @@ pub struct Level {
     loaded_chunks: Arc<DashMap<Vector2<i32>, SyncChunk>>,
     chunk_watchers: Arc<DashMap<Vector2<i32>, usize>>,
 
-    chunk_saver: Arc<dyn ChunkIO<Data = SyncChunk>>,
+    chunk_io: Arc<dyn ChunkIO<Data = SyncChunk>>,
     world_gen: Arc<dyn WorldGenerator>,
     // Gets unlocked when dropped
     // TODO: Make this a trait
@@ -59,6 +62,7 @@ pub struct Level {
 #[derive(Clone)]
 pub struct LevelFolder {
     pub root_folder: PathBuf,
+    pub entities_folder: PathBuf,
     pub region_folder: PathBuf,
 }
 
@@ -67,10 +71,15 @@ impl Level {
         // If we are using an already existing world we want to read the seed from the level.dat, If not we want to check if there is a seed in the config, if not lets create a random one
         let region_folder = root_folder.join("region");
         if !region_folder.exists() {
-            std::fs::create_dir_all(&region_folder).expect("Failed to create Region folder");
+            std::fs::create_dir_all(&region_folder).expect("Failed to create region folder");
+        }
+        let entities_folder = root_folder.join("entities");
+        if !entities_folder.exists() {
+            std::fs::create_dir_all(&region_folder).expect("Failed to create entities folder");
         }
         let level_folder = LevelFolder {
             root_folder,
+            entities_folder,
             region_folder,
         };
 
@@ -110,10 +119,10 @@ impl Level {
         let seed = Seed(level_info.world_gen_settings.seed as u64);
         let world_gen = get_world_gen(seed).into();
 
-        let chunk_saver: Arc<dyn ChunkIO<Data = SyncChunk>> = match advanced_config().chunk.format {
+        let chunk_io: Arc<dyn ChunkIO<Data = SyncChunk>> = match advanced_config().chunk.format {
             //ChunkFormat::Anvil => (Arc::new(AnvilChunkFormat), Arc::new(AnvilChunkFormat)),
             ChunkFormat::Linear => Arc::new(ChunkFileManager::<LinearFile>::default()),
-            ChunkFormat::Anvil => Arc::new(ChunkFileManager::<AnvilChunkFile>::default()),
+            ChunkFormat::Anvil => Arc::new(ChunkFileManager::<AnvilChunkFormat>::default()),
         };
 
         Self {
@@ -121,7 +130,7 @@ impl Level {
             world_gen,
             world_info_writer: Arc::new(AnvilLevelInfo),
             level_folder,
-            chunk_saver,
+            chunk_io,
             spawn_chunks: Arc::new(DashMap::new()),
             loaded_chunks: Arc::new(DashMap::new()),
             chunk_watchers: Arc::new(DashMap::new()),
@@ -134,7 +143,7 @@ impl Level {
         log::info!("Saving level...");
 
         // wait for chunks currently saving in other threads
-        self.chunk_saver.block_and_await_ongoing_tasks().await;
+        self.chunk_io.block_and_await_ongoing_tasks().await;
 
         // save all chunks currently in memory
         let chunks_to_write = self
@@ -145,7 +154,7 @@ impl Level {
         self.loaded_chunks.clear();
 
         // TODO: I think the chunk_saver should be at the server level
-        self.chunk_saver.clear_watched_chunks().await;
+        self.chunk_io.clear_watched_chunks().await;
         self.write_chunks(chunks_to_write).await;
 
         // then lets save the world info
@@ -166,7 +175,7 @@ impl Level {
     }
 
     pub async fn clean_up_log(&self) {
-        self.chunk_saver.clean_up_log().await;
+        self.chunk_io.clean_up_log().await;
     }
 
     pub fn list_cached(&self) {
@@ -197,9 +206,7 @@ impl Level {
             }
         }
 
-        self.chunk_saver
-            .watch_chunks(&self.level_folder, chunks)
-            .await;
+        self.chunk_io.watch_chunks(&self.level_folder, chunks).await;
     }
 
     #[inline]
@@ -233,7 +240,7 @@ impl Level {
             }
         }
 
-        self.chunk_saver
+        self.chunk_io
             .unwatch_chunks(&self.level_folder, chunks)
             .await;
         chunks_to_clean
@@ -320,7 +327,7 @@ impl Level {
             return;
         }
 
-        let chunk_saver = self.chunk_saver.clone();
+        let chunk_saver = self.chunk_io.clone();
         let level_folder = self.level_folder.clone();
 
         trace!("Sending chunks to ChunkIO {:}", chunks_to_write.len());
@@ -465,7 +472,7 @@ impl Level {
         set.spawn(handle_load);
         set.spawn(handle_generate);
 
-        self.chunk_saver
+        self.chunk_io
             .fetch_chunks(&self.level_folder, &remaining_chunks, load_bridge_send)
             .await;
         let _ = set.join_all().await;
diff --git a/pumpkin-world/src/lib.rs b/pumpkin-world/src/lib.rs
index 9d39cfd0c..68fd828fb 100644
--- a/pumpkin-world/src/lib.rs
+++ b/pumpkin-world/src/lib.rs
@@ -2,7 +2,6 @@ use pumpkin_util::math::vector2::Vector2;
 
 pub mod biome;
 pub mod block;
-pub mod chunk;
 pub mod coordinates;
 pub mod cylindrical_chunk_iterator;
 pub mod dimension;
@@ -11,6 +10,7 @@ pub mod item;
 pub mod level;
 mod lock;
 mod noise_router;
+pub mod storage;
 pub mod world_info;
 pub const WORLD_HEIGHT: usize = 384;
 pub const WORLD_LOWEST_Y: i16 = -64;
diff --git a/pumpkin-world/src/storage/format/anvil/chunk.rs b/pumpkin-world/src/storage/format/anvil/chunk.rs
new file mode 100644
index 000000000..99eb49ef1
--- /dev/null
+++ b/pumpkin-world/src/storage/format/anvil/chunk.rs
@@ -0,0 +1,303 @@
+use std::{
+    collections::{HashMap, HashSet},
+    path::PathBuf,
+};
+
+use super::{
+    AnvilFile, ChunkNbt, ChunkSection, ChunkSectionBlockStates, PaletteEntry, WORLD_DATA_VERSION,
+};
+use async_trait::async_trait;
+use bytes::Bytes;
+use indexmap::IndexMap;
+use pumpkin_data::{block::Block, chunk::ChunkStatus};
+use pumpkin_nbt::{from_bytes, to_bytes};
+use pumpkin_util::math::{ceil_log2, vector2::Vector2};
+
+use crate::{
+    block::ChunkBlockState,
+    coordinates::{ChunkRelativeBlockCoordinates, Height},
+    storage::{
+        CHUNK_AREA, ChunkBlocks, ChunkData, ChunkParsingError, ChunkReadingError,
+        ChunkSerializingError, ChunkWritingError, SUBCHUNK_VOLUME,
+        format::{BytesToData, ChunkStatusWrapper, DataToBytes, get_chunk_index},
+        io::{ChunkSerializer, LoadedData},
+    },
+};
+#[derive(Default)]
+pub struct AnvilChunkFormat {
+    anvil: AnvilFile,
+}
+
+#[async_trait]
+impl ChunkSerializer for AnvilChunkFormat {
+    type Data = ChunkData;
+    type WriteBackend = PathBuf;
+
+    fn get_chunk_key(chunk: &Vector2<i32>) -> String {
+        AnvilFile::get_chunk_key(chunk)
+    }
+
+    fn should_write(&self, is_watched: bool) -> bool {
+        self.anvil.should_write(is_watched)
+    }
+
+    async fn write(&self, path: PathBuf) -> Result<(), std::io::Error> {
+        self.anvil.write(path);
+    }
+
+    fn read(bytes: Bytes) -> Result<Self, ChunkReadingError> {
+        let anvil = AnvilFile::read(bytes)?;
+        Ok(Self { anvil })
+    }
+
+    async fn update_chunk(&mut self, chunk: &ChunkData) -> Result<(), ChunkWritingError> {
+        self.anvil.update_chunk::<Self>(chunk.position, chunk).await
+    }
+
+    async fn get_chunks(
+        &self,
+        chunks: &[Vector2<i32>],
+        stream: tokio::sync::mpsc::Sender<LoadedData<ChunkData, ChunkReadingError>>,
+    ) {
+        // Create an unbounded buffer so we don't block the rayon thread pool
+        let (bridge_send, mut bridge_recv) = tokio::sync::mpsc::unbounded_channel();
+
+        // Don't par iter here so we can prevent backpressure with the await in the async
+        // runtime
+        for chunk in chunks.iter().cloned() {
+            let index = get_chunk_index(&chunk);
+            match &self.anvil.chunks_data[index] {
+                None => stream
+                    .send(LoadedData::Missing(chunk))
+                    .await
+                    .expect("Failed to send chunk"),
+                Some(chunk_metadata) => {
+                    let send = bridge_send.clone();
+                    let chunk_data = chunk_metadata.serialized_data.clone();
+                    rayon::spawn(move || {
+                        let result = match chunk_data.to_chunk::<Self>(chunk) {
+                            Ok(chunk) => LoadedData::Loaded(chunk),
+                            Err(err) => LoadedData::Error((chunk, err)),
+                        };
+
+                        send.send(result)
+                            .expect("Failed to send anvil chunks from rayon thread");
+                    });
+                }
+            }
+        }
+        // Drop the original so streams clean-up
+        drop(bridge_send);
+
+        // We don't want to waste work, so recv unbounded from the rayon thread pool, then re-send
+        // to the channel
+
+        while let Some(data) = bridge_recv.recv().await {
+            stream
+                .send(data)
+                .await
+                .expect("Failed to send anvil chunks from bridge");
+        }
+    }
+}
+
+impl DataToBytes for AnvilChunkFormat {
+    type Data = ChunkData;
+
+    fn data_to_bytes(chunk_data: &ChunkData) -> Result<Vec<u8>, ChunkSerializingError> {
+        let mut sections = Vec::new();
+
+        for (i, blocks) in chunk_data.blocks.array_iter_subchunks().enumerate() {
+            // get unique blocks
+            let unique_blocks: HashSet<_> = blocks.iter().collect();
+
+            let palette: IndexMap<_, _> = unique_blocks
+                .into_iter()
+                .enumerate()
+                .map(|(i, block)| {
+                    let name = Block::from_state_id(*block).unwrap().name;
+                    (block, (name, i))
+                })
+                .collect();
+
+            // Determine the number of bits needed to represent the largest index in the palette
+            let block_bit_size = if palette.len() < 16 {
+                4
+            } else {
+                ceil_log2(palette.len() as u32).max(4)
+            };
+
+            let mut section_longs = Vec::new();
+            let mut current_pack_long: i64 = 0;
+            let mut bits_used_in_pack: u32 = 0;
+
+            // Empty data if the palette only contains one index https://minecraft.fandom.com/wiki/Chunk_format
+            // if palette.len() > 1 {}
+            // TODO: Update to write empty data. Rn or read does not handle this elegantly
+            for block in blocks.iter() {
+                // Push if next bit does not fit
+                if bits_used_in_pack + block_bit_size as u32 > 64 {
+                    section_longs.push(current_pack_long);
+                    current_pack_long = 0;
+                    bits_used_in_pack = 0;
+                }
+                let index = palette.get(block).expect("Just added all unique").1;
+                current_pack_long |= (index as i64) << bits_used_in_pack;
+                bits_used_in_pack += block_bit_size as u32;
+
+                assert!(bits_used_in_pack <= 64);
+
+                // If the current 64-bit integer is full, push it to the section_longs and start a new one
+                if bits_used_in_pack >= 64 {
+                    section_longs.push(current_pack_long);
+                    current_pack_long = 0;
+                    bits_used_in_pack = 0;
+                }
+            }
+
+            // Push the last 64-bit integer if it contains any data
+            if bits_used_in_pack > 0 {
+                section_longs.push(current_pack_long);
+            }
+
+            sections.push(ChunkSection {
+                y: i as i8 - 4,
+                block_states: Some(ChunkSectionBlockStates {
+                    data: Some(section_longs.into_boxed_slice()),
+                    palette: palette
+                        .into_iter()
+                        .map(|entry| PaletteEntry {
+                            name: entry.1.0.to_string(),
+                            properties: {
+                                let block = Block::from_state_id(*entry.0).unwrap();
+                                if let Some(properties) = block.properties(*entry.0) {
+                                    let props = properties.to_props();
+                                    let mut props_map = HashMap::new();
+                                    for prop in props {
+                                        props_map.insert(prop.0.clone(), prop.1.clone());
+                                    }
+                                    Some(props_map)
+                                } else {
+                                    None
+                                }
+                            },
+                        })
+                        .collect(),
+                }),
+            });
+        }
+
+        let nbt = ChunkNbt {
+            data_version: WORLD_DATA_VERSION,
+            x_pos: chunk_data.position.x,
+            z_pos: chunk_data.position.z,
+            status: ChunkStatus::Full,
+            heightmaps: chunk_data.heightmap.clone(),
+            sections,
+        };
+
+        let mut result = Vec::new();
+        to_bytes(&nbt, &mut result).map_err(ChunkSerializingError::ErrorSerializingChunk)?;
+        Ok(result)
+    }
+}
+
+impl BytesToData for AnvilChunkFormat {
+    type Data = ChunkData;
+    fn bytes_to_data(
+        chunk_data: &[u8],
+        position: Vector2<i32>,
+    ) -> Result<ChunkData, ChunkParsingError> {
+        // TODO: Implement chunk stages?
+        if from_bytes::<ChunkStatusWrapper>(chunk_data)
+            .map_err(ChunkParsingError::FailedReadStatus)?
+            .status
+            != ChunkStatus::Full
+        {
+            return Err(ChunkParsingError::ChunkNotGenerated);
+        }
+
+        let chunk_data = from_bytes::<ChunkNbt>(chunk_data)
+            .map_err(|e| ChunkParsingError::ErrorDeserializingChunk(e.to_string()))?;
+
+        if chunk_data.x_pos != position.x || chunk_data.z_pos != position.z {
+            return Err(ChunkParsingError::ErrorDeserializingChunk(format!(
+                "Expected data for chunk {},{} but got it for {},{}!",
+                position.x, position.z, chunk_data.x_pos, chunk_data.z_pos,
+            )));
+        }
+
+        // this needs to be boxed, otherwise it will cause a stack-overflow
+        let mut blocks = ChunkBlocks::Homogeneous(0);
+        let mut block_index = 0; // which block we're currently at
+
+        for section in chunk_data.sections.into_iter() {
+            let block_states = match section.block_states {
+                Some(states) => states,
+                None => continue, // TODO @lukas0008 this should instead fill all blocks with the only element of the palette
+            };
+
+            let palette = block_states
+                .palette
+                .iter()
+                .map(ChunkBlockState::from_palette)
+                .collect::<Vec<_>>();
+
+            let block_data = match block_states.data {
+                None => {
+                    // We skipped placing an empty subchunk.
+                    // We need to increase the y coordinate of the next subchunk being placed.
+                    block_index += SUBCHUNK_VOLUME;
+                    continue;
+                }
+                Some(d) => d,
+            };
+
+            // How many bits each block has in one of the palette u64s
+            let block_bit_size = if palette.len() < 16 {
+                4
+            } else {
+                ceil_log2(palette.len() as u32).max(4)
+            };
+            // How many blocks there are in one of the palettes u64s
+            let blocks_in_palette = 64 / block_bit_size;
+
+            let mask = (1 << block_bit_size) - 1;
+            'block_loop: for block in block_data.iter() {
+                for i in 0..blocks_in_palette {
+                    let index = (block >> (i * block_bit_size)) & mask;
+                    let block = &palette[index as usize];
+
+                    // TODO allow indexing blocks directly so we can just use block_index and save some time?
+                    // this is fine because we initialized the heightmap of `blocks`
+                    // from the cached value in the world file
+                    blocks.set_block_no_heightmap_update(
+                        ChunkRelativeBlockCoordinates {
+                            z: ((block_index % CHUNK_AREA) / 16).into(),
+                            y: Height::from_absolute((block_index / CHUNK_AREA) as u16),
+                            x: (block_index % 16).into(),
+                        },
+                        block.get_id(),
+                    );
+
+                    block_index += 1;
+
+                    // if `SUBCHUNK_VOLUME `is not divisible by `blocks_in_palette` the block_data
+                    // can sometimes spill into other subchunks. We avoid that by aborting early
+                    if (block_index % SUBCHUNK_VOLUME) == 0 {
+                        break 'block_loop;
+                    }
+                }
+            }
+        }
+
+        Ok(ChunkData {
+            blocks,
+            heightmap: chunk_data.heightmaps,
+            position,
+            entities: vec![],
+            // This chunk is read from disk, so it has not been modified
+            dirty: false,
+        })
+    }
+}
diff --git a/pumpkin-world/src/storage/format/anvil/entity.rs b/pumpkin-world/src/storage/format/anvil/entity.rs
new file mode 100644
index 000000000..dd5f1dc7c
--- /dev/null
+++ b/pumpkin-world/src/storage/format/anvil/entity.rs
@@ -0,0 +1,97 @@
+use std::path::PathBuf;
+
+use bytes::Bytes;
+use pumpkin_nbt::compound::NbtCompound;
+use pumpkin_util::math::vector2::Vector2;
+
+use crate::storage::{
+    ChunkData, ChunkReadingError, ChunkSerializingError, ChunkWritingError,
+    format::{DataToBytes, EntityNbt, get_chunk_index},
+    io::{ChunkSerializer, LoadedData},
+};
+
+use super::{AnvilFile, chunk::AnvilChunkFormat};
+
+#[derive(Default)]
+pub struct AnvilEntityFormat {
+    anvil: AnvilFile,
+}
+
+#[async_trait]
+impl ChunkSerializer for AnvilEntityFormat {
+    type Data = EntityNbt;
+    type WriteBackend = PathBuf;
+
+    fn get_chunk_key(chunk: &Vector2<i32>) -> String {
+        AnvilFile::get_chunk_key(chunk)
+    }
+
+    fn should_write(&self, is_watched: bool) -> bool {
+        self.anvil.should_write(is_watched)
+    }
+
+    async fn write(&self, path: PathBuf) -> Result<(), std::io::Error> {
+        self.anvil.write(path).await
+    }
+
+    fn read(bytes: Bytes) -> Result<Self, ChunkReadingError> {
+        let anvil = AnvilFile::read(bytes)?;
+        Ok(Self { anvil })
+    }
+
+    async fn update_chunk(&mut self, chunk: &Self::Data) -> Result<(), ChunkWritingError> {
+        self.anvil.update_chunk::<Self>(chunk.position, chunk).await
+    }
+
+    async fn get_chunks(
+        &self,
+        chunks: &[Vector2<i32>],
+        stream: tokio::sync::mpsc::Sender<LoadedData<ChunkData, ChunkReadingError>>,
+    ) {
+        // Create an unbounded buffer so we don't block the rayon thread pool
+        let (bridge_send, mut bridge_recv) = tokio::sync::mpsc::unbounded_channel();
+
+        // Don't par iter here so we can prevent backpressure with the await in the async
+        // runtime
+        for chunk in chunks.iter().cloned() {
+            let index = get_chunk_index(&chunk);
+            match &self.anvil.chunks_data[index] {
+                None => stream
+                    .send(LoadedData::Missing(chunk))
+                    .await
+                    .expect("Failed to send chunk"),
+                Some(chunk_metadata) => {
+                    let send = bridge_send.clone();
+                    let chunk_data = chunk_metadata.serialized_data.clone();
+                    rayon::spawn(move || {
+                        let result = match chunk_data.to_chunk::<Self>(chunk) {
+                            Ok(chunk) => LoadedData::Loaded(chunk),
+                            Err(err) => LoadedData::Error((chunk, err)),
+                        };
+
+                        send.send(result)
+                            .expect("Failed to send anvil chunks from rayon thread");
+                    });
+                }
+            }
+        }
+        // Drop the original so streams clean-up
+        drop(bridge_send);
+
+        // We don't want to waste work, so recv unbounded from the rayon thread pool, then re-send
+        // to the channel
+
+        while let Some(data) = bridge_recv.recv().await {
+            stream
+                .send(data)
+                .await
+                .expect("Failed to send anvil chunks from bridge");
+        }
+    }
+}
+
+impl DataToBytes for AnvilEntityFormat {
+    type Data = EntityNbt;
+
+    fn data_to_bytes(chunk_data: &Self::Data) -> Result<Vec<u8>, ChunkSerializingError> {}
+}
diff --git a/pumpkin-world/src/chunk/format/anvil.rs b/pumpkin-world/src/storage/format/anvil/mod.rs
similarity index 81%
rename from pumpkin-world/src/chunk/format/anvil.rs
rename to pumpkin-world/src/storage/format/anvil/mod.rs
index 7cb308600..b0a249a70 100644
--- a/pumpkin-world/src/chunk/format/anvil.rs
+++ b/pumpkin-world/src/storage/format/anvil/mod.rs
@@ -1,5 +1,6 @@
 use async_trait::async_trait;
 use bytes::*;
+use chunk::AnvilChunkFormat;
 use flate2::read::{GzDecoder, GzEncoder, ZlibDecoder, ZlibEncoder};
 use indexmap::IndexMap;
 use itertools::Itertools;
@@ -19,21 +20,23 @@ use tokio::{
     sync::Mutex,
 };
 
-use crate::chunk::{
-    ChunkData, ChunkReadingError, ChunkSerializingError, ChunkWritingError, CompressionError,
+use crate::storage::{
+    ChunkData, ChunkReadingError, ChunkWritingError, CompressionError,
+    format::get_region_coords,
     io::{ChunkSerializer, LoadedData},
 };
 
-use super::{ChunkNbt, ChunkSection, ChunkSectionBlockStates, PaletteEntry};
+pub mod chunk;
+pub mod entity;
+
+use super::{
+    BytesToData, ChunkNbt, ChunkSection, ChunkSectionBlockStates, DataToBytes, PaletteEntry,
+    get_chunk_index,
+};
 
 /// The side size of a region in chunks (one region is 32x32 chunks)
 pub const REGION_SIZE: usize = 32;
 
-/// The number of bits that identify two chunks in the same region
-pub const SUBREGION_BITS: u8 = pumpkin_util::math::ceil_log2(REGION_SIZE as u32);
-
-pub const SUBREGION_AND: i32 = i32::pow(2, SUBREGION_BITS as u32) - 1;
-
 /// The number of chunks in a region
 pub const CHUNK_COUNT: usize = REGION_SIZE * REGION_SIZE;
 
@@ -43,9 +46,6 @@ const SECTOR_BYTES: usize = 4096;
 // 1.21.4
 const WORLD_DATA_VERSION: i32 = 4189;
 
-#[derive(Clone, Default)]
-pub struct AnvilChunkFormat;
-
 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
 #[repr(u8)]
 pub enum Compression {
@@ -76,7 +76,7 @@ impl<R: Read> Read for CompressionRead<R> {
 }
 
 #[derive(Default, Clone)]
-pub struct AnvilChunkData {
+pub struct AnvilData {
     compression: Option<Compression>,
     // Length is always the length of this + compression byte (1) so we dont need to save a length
     compressed_data: Bytes,
@@ -106,239 +106,21 @@ impl WriteAction {
     }
 }
 
-struct AnvilChunkMetadata {
-    serialized_data: AnvilChunkData,
+struct AnvilMetadata {
+    serialized_data: AnvilData,
     timestamp: u32,
 
     // NOTE: This is only valid if our WriteAction is `Parts`
     file_sector_offset: u32,
 }
 
-pub struct AnvilChunkFile {
-    chunks_data: [Option<AnvilChunkMetadata>; CHUNK_COUNT],
+pub struct AnvilFile {
+    chunks_data: [Option<AnvilMetadata>; CHUNK_COUNT],
     end_sector: u32,
     write_action: Mutex<WriteAction>,
 }
 
-impl Compression {
-    const GZIP_ID: u8 = 1;
-    const ZLIB_ID: u8 = 2;
-    const NO_COMPRESSION_ID: u8 = 3;
-    const LZ4_ID: u8 = 4;
-    const CUSTOM_ID: u8 = 127;
-
-    fn decompress_data(&self, compressed_data: &[u8]) -> Result<Box<[u8]>, CompressionError> {
-        match self {
-            Compression::GZip => {
-                let mut decoder = GzDecoder::new(compressed_data);
-                let mut chunk_data = Vec::new();
-                decoder
-                    .read_to_end(&mut chunk_data)
-                    .map_err(CompressionError::GZipError)?;
-                Ok(chunk_data.into_boxed_slice())
-            }
-            Compression::ZLib => {
-                let mut decoder = ZlibDecoder::new(compressed_data);
-                let mut chunk_data = Vec::new();
-                decoder
-                    .read_to_end(&mut chunk_data)
-                    .map_err(CompressionError::ZlibError)?;
-                Ok(chunk_data.into_boxed_slice())
-            }
-            Compression::LZ4 => {
-                let mut decoder =
-                    lz4::Decoder::new(compressed_data).map_err(CompressionError::LZ4Error)?;
-                let mut decompressed_data = Vec::new();
-                decoder
-                    .read_to_end(&mut decompressed_data)
-                    .map_err(CompressionError::LZ4Error)?;
-                Ok(decompressed_data.into_boxed_slice())
-            }
-            Compression::Custom => todo!(),
-        }
-    }
-
-    fn compress_data(
-        &self,
-        uncompressed_data: &[u8],
-        compression_level: u32,
-    ) -> Result<Vec<u8>, CompressionError> {
-        match self {
-            Compression::GZip => {
-                let mut encoder = GzEncoder::new(
-                    uncompressed_data,
-                    flate2::Compression::new(compression_level),
-                );
-                let mut chunk_data = Vec::new();
-                encoder
-                    .read_to_end(&mut chunk_data)
-                    .map_err(CompressionError::GZipError)?;
-                Ok(chunk_data)
-            }
-            Compression::ZLib => {
-                let mut encoder = ZlibEncoder::new(
-                    uncompressed_data,
-                    flate2::Compression::new(compression_level),
-                );
-                let mut chunk_data = Vec::new();
-                encoder
-                    .read_to_end(&mut chunk_data)
-                    .map_err(CompressionError::ZlibError)?;
-                Ok(chunk_data)
-            }
-
-            Compression::LZ4 => {
-                let mut compressed_data = Vec::new();
-                let mut encoder = lz4::EncoderBuilder::new()
-                    .level(compression_level)
-                    .build(&mut compressed_data)
-                    .map_err(CompressionError::LZ4Error)?;
-                if let Err(err) = encoder.write_all(uncompressed_data) {
-                    return Err(CompressionError::LZ4Error(err));
-                }
-                if let (_output, Err(err)) = encoder.finish() {
-                    return Err(CompressionError::LZ4Error(err));
-                }
-                Ok(compressed_data)
-            }
-            Compression::Custom => todo!(),
-        }
-    }
-
-    /// Returns Ok when a compression is found otherwise an Err
-    #[allow(clippy::result_unit_err)]
-    pub fn from_byte(byte: u8) -> Result<Option<Self>, ()> {
-        match byte {
-            Self::GZIP_ID => Ok(Some(Self::GZip)),
-            Self::ZLIB_ID => Ok(Some(Self::ZLib)),
-            // Uncompressed (since a version before 1.15.1)
-            Self::NO_COMPRESSION_ID => Ok(None),
-            Self::LZ4_ID => Ok(Some(Self::LZ4)),
-            Self::CUSTOM_ID => Ok(Some(Self::Custom)),
-            // Unknown format
-            _ => Err(()),
-        }
-    }
-}
-
-impl From<pumpkin_config::chunk::Compression> for Compression {
-    fn from(value: pumpkin_config::chunk::Compression) -> Self {
-        // :c
-        match value {
-            pumpkin_config::chunk::Compression::GZip => Self::GZip,
-            pumpkin_config::chunk::Compression::ZLib => Self::ZLib,
-            pumpkin_config::chunk::Compression::LZ4 => Self::LZ4,
-            pumpkin_config::chunk::Compression::Custom => Self::Custom,
-        }
-    }
-}
-
-impl AnvilChunkData {
-    /// Raw size of serialized chunk
-    #[inline]
-    fn raw_write_size(&self) -> usize {
-        // 4 bytes for the *length* and 1 byte for the *compression* method
-        self.compressed_data.remaining() + 4 + 1
-    }
-
-    /// Size of serialized chunk with padding
-    #[inline]
-    fn padded_size(&self) -> usize {
-        let sector_count = self.sector_count() as usize;
-        sector_count * SECTOR_BYTES
-    }
-
-    #[inline]
-    fn sector_count(&self) -> u32 {
-        let total_size = self.raw_write_size();
-        total_size.div_ceil(SECTOR_BYTES) as u32
-    }
-
-    fn from_bytes(bytes: Bytes) -> Result<Self, ChunkReadingError> {
-        let mut bytes = bytes;
-        // Minus one for the compression byte
-        let length = bytes.get_u32() as usize - 1;
-
-        let compression_method = bytes.get_u8();
-        let compression = Compression::from_byte(compression_method)
-            .map_err(|_| ChunkReadingError::Compression(CompressionError::UnknownCompression))?;
-
-        Ok(AnvilChunkData {
-            compression,
-            // If this has padding, we need to trim it
-            compressed_data: bytes.slice(..length),
-        })
-    }
-
-    async fn write(&self, w: &mut (impl AsyncWrite + Unpin + Send)) -> Result<(), std::io::Error> {
-        let padded_size = self.padded_size();
-
-        w.write_u32((self.compressed_data.remaining() + 1) as u32)
-            .await?;
-        w.write_u8(
-            self.compression
-                .map_or(Compression::NO_COMPRESSION_ID, |c| c as u8),
-        )
-        .await?;
-
-        w.write_all(&self.compressed_data).await?;
-        for _ in 0..(padded_size - self.raw_write_size()) {
-            w.write_u8(0).await?;
-        }
-
-        Ok(())
-    }
-
-    fn to_chunk(&self, pos: Vector2<i32>) -> Result<ChunkData, ChunkReadingError> {
-        let chunk = if let Some(compression) = self.compression {
-            let decompress_bytes = compression
-                .decompress_data(&self.compressed_data)
-                .map_err(ChunkReadingError::Compression)?;
-
-            ChunkData::from_bytes(&decompress_bytes, pos)
-        } else {
-            ChunkData::from_bytes(&self.compressed_data, pos)
-        }
-        .map_err(ChunkReadingError::ParsingError)?;
-
-        Ok(chunk)
-    }
-
-    fn from_chunk(
-        chunk: &ChunkData,
-        compression: Option<Compression>,
-    ) -> Result<Self, ChunkWritingError> {
-        let raw_bytes = chunk_to_bytes(chunk)
-            .map_err(|err| ChunkWritingError::ChunkSerializingError(err.to_string()))?;
-
-        let compression = compression
-            .unwrap_or_else(|| advanced_config().chunk.compression.algorithm.clone().into());
-
-        // We need to buffer here anyway so theres no use in making an impl Write for this
-        let compressed_data = compression
-            .compress_data(&raw_bytes, advanced_config().chunk.compression.level)
-            .map_err(ChunkWritingError::Compression)?;
-
-        Ok(AnvilChunkData {
-            compression: Some(compression),
-            compressed_data: compressed_data.into(),
-        })
-    }
-}
-
-impl AnvilChunkFile {
-    pub const fn get_region_coords(at: &Vector2<i32>) -> (i32, i32) {
-        // Divide by 32 for the region coordinates
-        (at.x >> SUBREGION_BITS, at.z >> SUBREGION_BITS)
-    }
-
-    pub const fn get_chunk_index(pos: &Vector2<i32>) -> usize {
-        let local_x = pos.x & SUBREGION_AND;
-        let local_z = pos.z & SUBREGION_AND;
-        let index = (local_z << SUBREGION_BITS) + local_x;
-        index as usize
-    }
-
+impl AnvilFile {
     async fn write_indices(&self, path: &Path, indices: &[usize]) -> Result<(), std::io::Error> {
         log::trace!("Writing in place: {:?}", path);
 
@@ -489,33 +271,15 @@ impl AnvilChunkFile {
         log::trace!("Wrote file to Disk: {:?}", path);
         Ok(())
     }
-}
-
-impl Default for AnvilChunkFile {
-    fn default() -> Self {
-        Self {
-            chunks_data: [const { None }; CHUNK_COUNT],
-            write_action: Mutex::new(WriteAction::Pass),
-            // Two sectors for offset + timestamp
-            end_sector: 2,
-        }
-    }
-}
-
-#[async_trait]
-impl ChunkSerializer for AnvilChunkFile {
-    type Data = ChunkData;
-    type WriteBackend = PathBuf;
 
     fn should_write(&self, is_watched: bool) -> bool {
         !is_watched
     }
 
     fn get_chunk_key(chunk: &Vector2<i32>) -> String {
-        let (region_x, region_z) = Self::get_region_coords(chunk);
+        let (region_x, region_z) = get_region_coords(chunk);
         format!("./r.{}.{}.mca", region_x, region_z)
     }
-
     async fn write(&self, path: PathBuf) -> Result<(), std::io::Error> {
         let mut write_action = self.write_action.lock().await;
         match &*write_action {
@@ -548,7 +312,7 @@ impl ChunkSerializer for AnvilChunkFile {
         let headers = raw_file_bytes.split_to(SECTOR_BYTES * 2);
         let (mut location_bytes, mut timestamp_bytes) = headers.split_at(SECTOR_BYTES);
 
-        let mut chunk_file = AnvilChunkFile::default();
+        let mut chunk_file = AnvilFile::default();
 
         let mut last_offset = 2;
         for i in 0..CHUNK_COUNT {
@@ -573,11 +337,11 @@ impl ChunkSerializer for AnvilChunkFile {
             let bytes_offset = (sector_offset - 2) * SECTOR_BYTES;
             let bytes_count = sector_count * SECTOR_BYTES;
 
-            let serialized_data = AnvilChunkData::from_bytes(
+            let serialized_data = AnvilData::from_bytes(
                 raw_file_bytes.slice(bytes_offset..bytes_offset + bytes_count),
             )?;
 
-            chunk_file.chunks_data[i] = Some(AnvilChunkMetadata {
+            chunk_file.chunks_data[i] = Some(AnvilMetadata {
                 serialized_data,
                 timestamp,
                 file_sector_offset: sector_offset as u32,
@@ -588,29 +352,32 @@ impl ChunkSerializer for AnvilChunkFile {
         Ok(chunk_file)
     }
 
-    async fn update_chunk(&mut self, chunk: &ChunkData) -> Result<(), ChunkWritingError> {
-        let epoch = SystemTime::now()
-            .duration_since(UNIX_EPOCH)
-            .unwrap()
-            .as_secs() as u32;
-
-        let index = AnvilChunkFile::get_chunk_index(&chunk.position);
+    async fn update_chunk<W: DataToBytes>(
+        &mut self,
+        pos: Vector2<i32>,
+        chunk: &W::Data,
+    ) -> Result<(), ChunkWritingError> {
+        let index = get_chunk_index(&pos);
         // Default to the compression type read from the file
         let compression_type = self.chunks_data[index]
             .as_ref()
             .and_then(|chunk_data| chunk_data.serialized_data.compression);
-        let new_chunk_data = AnvilChunkData::from_chunk(chunk, compression_type)?;
+        let new_chunk_data = AnvilData::from_data::<W>(chunk, compression_type)?;
 
         let mut write_action = self.write_action.lock().await;
         if !advanced_config().chunk.write_in_place {
             *write_action = WriteAction::All;
         }
 
+        let epoch = SystemTime::now()
+            .duration_since(UNIX_EPOCH)
+            .unwrap()
+            .as_secs() as u32;
         match &*write_action {
             WriteAction::All => {
                 log::trace!("Write action is all: setting chunk in place");
                 // Doesn't matter, just add the data
-                self.chunks_data[index] = Some(AnvilChunkMetadata {
+                self.chunks_data[index] = Some(AnvilMetadata {
                     serialized_data: new_chunk_data,
                     timestamp: epoch,
                     file_sector_offset: 0,
@@ -627,7 +394,7 @@ impl ChunkSerializer for AnvilChunkFile {
                         );
                         // This chunk didn't exist before; append to EOF
                         let new_eof = self.end_sector + new_chunk_data.sector_count();
-                        self.chunks_data[index] = Some(AnvilChunkMetadata {
+                        self.chunks_data[index] = Some(AnvilMetadata {
                             serialized_data: new_chunk_data,
                             timestamp: epoch,
                             file_sector_offset: self.end_sector,
@@ -645,7 +412,7 @@ impl ChunkSerializer for AnvilChunkFile {
                                 new_chunk_data.sector_count()
                             );
                             // We can just add it
-                            self.chunks_data[index] = Some(AnvilChunkMetadata {
+                            self.chunks_data[index] = Some(AnvilMetadata {
                                 serialized_data: new_chunk_data,
                                 timestamp: epoch,
                                 file_sector_offset: old_chunk.file_sector_offset,
@@ -692,7 +459,7 @@ impl ChunkSerializer for AnvilChunkFile {
 
                                 // give up...
                                 *write_action = WriteAction::All;
-                                self.chunks_data[index] = Some(AnvilChunkMetadata {
+                                self.chunks_data[index] = Some(AnvilMetadata {
                                     serialized_data: new_chunk_data,
                                     timestamp: epoch,
                                     file_sector_offset: 0,
@@ -713,7 +480,7 @@ impl ChunkSerializer for AnvilChunkFile {
                                 let new_sectors = new_chunk_data.sector_count();
                                 let swapped_index = swap.0;
                                 let old_offset = old_chunk.file_sector_offset;
-                                self.chunks_data[index] = Some(AnvilChunkMetadata {
+                                self.chunks_data[index] = Some(AnvilMetadata {
                                     serialized_data: new_chunk_data,
                                     timestamp: epoch,
                                     file_sector_offset: swap.1.file_sector_offset,
@@ -762,149 +529,223 @@ impl ChunkSerializer for AnvilChunkFile {
 
         Ok(())
     }
+}
 
-    async fn get_chunks(
+impl Compression {
+    const GZIP_ID: u8 = 1;
+    const ZLIB_ID: u8 = 2;
+    const NO_COMPRESSION_ID: u8 = 3;
+    const LZ4_ID: u8 = 4;
+    const CUSTOM_ID: u8 = 127;
+
+    fn decompress_data(&self, compressed_data: &[u8]) -> Result<Box<[u8]>, CompressionError> {
+        match self {
+            Compression::GZip => {
+                let mut decoder = GzDecoder::new(compressed_data);
+                let mut chunk_data = Vec::new();
+                decoder
+                    .read_to_end(&mut chunk_data)
+                    .map_err(CompressionError::GZipError)?;
+                Ok(chunk_data.into_boxed_slice())
+            }
+            Compression::ZLib => {
+                let mut decoder = ZlibDecoder::new(compressed_data);
+                let mut chunk_data = Vec::new();
+                decoder
+                    .read_to_end(&mut chunk_data)
+                    .map_err(CompressionError::ZlibError)?;
+                Ok(chunk_data.into_boxed_slice())
+            }
+            Compression::LZ4 => {
+                let mut decoder =
+                    lz4::Decoder::new(compressed_data).map_err(CompressionError::LZ4Error)?;
+                let mut decompressed_data = Vec::new();
+                decoder
+                    .read_to_end(&mut decompressed_data)
+                    .map_err(CompressionError::LZ4Error)?;
+                Ok(decompressed_data.into_boxed_slice())
+            }
+            Compression::Custom => todo!(),
+        }
+    }
+
+    fn compress_data(
         &self,
-        chunks: &[Vector2<i32>],
-        stream: tokio::sync::mpsc::Sender<LoadedData<ChunkData, ChunkReadingError>>,
-    ) {
-        // Create an unbounded buffer so we don't block the rayon thread pool
-        let (bridge_send, mut bridge_recv) = tokio::sync::mpsc::unbounded_channel();
-
-        // Don't par iter here so we can prevent backpressure with the await in the async
-        // runtime
-        for chunk in chunks.iter().cloned() {
-            let index = AnvilChunkFile::get_chunk_index(&chunk);
-            match &self.chunks_data[index] {
-                None => stream
-                    .send(LoadedData::Missing(chunk))
-                    .await
-                    .expect("Failed to send chunk"),
-                Some(chunk_metadata) => {
-                    let send = bridge_send.clone();
-                    let chunk_data = chunk_metadata.serialized_data.clone();
-                    rayon::spawn(move || {
-                        let result = match chunk_data.to_chunk(chunk) {
-                            Ok(chunk) => LoadedData::Loaded(chunk),
-                            Err(err) => LoadedData::Error((chunk, err)),
-                        };
-
-                        send.send(result)
-                            .expect("Failed to send anvil chunks from rayon thread");
-                    });
+        uncompressed_data: &[u8],
+        compression_level: u32,
+    ) -> Result<Vec<u8>, CompressionError> {
+        match self {
+            Compression::GZip => {
+                let mut encoder = GzEncoder::new(
+                    uncompressed_data,
+                    flate2::Compression::new(compression_level),
+                );
+                let mut chunk_data = Vec::new();
+                encoder
+                    .read_to_end(&mut chunk_data)
+                    .map_err(CompressionError::GZipError)?;
+                Ok(chunk_data)
+            }
+            Compression::ZLib => {
+                let mut encoder = ZlibEncoder::new(
+                    uncompressed_data,
+                    flate2::Compression::new(compression_level),
+                );
+                let mut chunk_data = Vec::new();
+                encoder
+                    .read_to_end(&mut chunk_data)
+                    .map_err(CompressionError::ZlibError)?;
+                Ok(chunk_data)
+            }
+
+            Compression::LZ4 => {
+                let mut compressed_data = Vec::new();
+                let mut encoder = lz4::EncoderBuilder::new()
+                    .level(compression_level)
+                    .build(&mut compressed_data)
+                    .map_err(CompressionError::LZ4Error)?;
+                if let Err(err) = encoder.write_all(uncompressed_data) {
+                    return Err(CompressionError::LZ4Error(err));
+                }
+                if let (_output, Err(err)) = encoder.finish() {
+                    return Err(CompressionError::LZ4Error(err));
                 }
+                Ok(compressed_data)
             }
+            Compression::Custom => todo!(),
         }
-        // Drop the original so streams clean-up
-        drop(bridge_send);
+    }
 
-        // We don't want to waste work, so recv unbounded from the rayon thread pool, then re-send
-        // to the channel
+    /// Returns Ok when a compression is found otherwise an Err
+    #[allow(clippy::result_unit_err)]
+    pub fn from_byte(byte: u8) -> Result<Option<Self>, ()> {
+        match byte {
+            Self::GZIP_ID => Ok(Some(Self::GZip)),
+            Self::ZLIB_ID => Ok(Some(Self::ZLib)),
+            // Uncompressed (since a version before 1.15.1)
+            Self::NO_COMPRESSION_ID => Ok(None),
+            Self::LZ4_ID => Ok(Some(Self::LZ4)),
+            Self::CUSTOM_ID => Ok(Some(Self::Custom)),
+            // Unknown format
+            _ => Err(()),
+        }
+    }
+}
 
-        while let Some(data) = bridge_recv.recv().await {
-            stream
-                .send(data)
-                .await
-                .expect("Failed to send anvil chunks from bridge");
+impl From<pumpkin_config::chunk::Compression> for Compression {
+    fn from(value: pumpkin_config::chunk::Compression) -> Self {
+        // :c
+        match value {
+            pumpkin_config::chunk::Compression::GZip => Self::GZip,
+            pumpkin_config::chunk::Compression::ZLib => Self::ZLib,
+            pumpkin_config::chunk::Compression::LZ4 => Self::LZ4,
+            pumpkin_config::chunk::Compression::Custom => Self::Custom,
         }
     }
 }
 
-pub fn chunk_to_bytes(chunk_data: &ChunkData) -> Result<Vec<u8>, ChunkSerializingError> {
-    let mut sections = Vec::new();
+impl AnvilData {
+    /// Raw size of serialized chunk
+    #[inline]
+    fn raw_write_size(&self) -> usize {
+        // 4 bytes for the *length* and 1 byte for the *compression* method
+        self.compressed_data.remaining() + 4 + 1
+    }
 
-    for (i, blocks) in chunk_data.blocks.array_iter_subchunks().enumerate() {
-        // get unique blocks
-        let unique_blocks: HashSet<_> = blocks.iter().collect();
+    /// Size of serialized chunk with padding
+    #[inline]
+    fn padded_size(&self) -> usize {
+        let sector_count = self.sector_count() as usize;
+        sector_count * SECTOR_BYTES
+    }
 
-        let palette: IndexMap<_, _> = unique_blocks
-            .into_iter()
-            .enumerate()
-            .map(|(i, block)| {
-                let name = Block::from_state_id(*block).unwrap().name;
-                (block, (name, i))
-            })
-            .collect();
+    #[inline]
+    fn sector_count(&self) -> u32 {
+        let total_size = self.raw_write_size();
+        total_size.div_ceil(SECTOR_BYTES) as u32
+    }
 
-        // Determine the number of bits needed to represent the largest index in the palette
-        let block_bit_size = if palette.len() < 16 {
-            4
-        } else {
-            ceil_log2(palette.len() as u32).max(4)
-        };
+    fn from_bytes(bytes: Bytes) -> Result<Self, ChunkReadingError> {
+        let mut bytes = bytes;
+        // Minus one for the compression byte
+        let length = bytes.get_u32() as usize - 1;
 
-        let mut section_longs = Vec::new();
-        let mut current_pack_long: i64 = 0;
-        let mut bits_used_in_pack: u32 = 0;
-
-        // Empty data if the palette only contains one index https://minecraft.fandom.com/wiki/Chunk_format
-        // if palette.len() > 1 {}
-        // TODO: Update to write empty data. Rn or read does not handle this elegantly
-        for block in blocks.iter() {
-            // Push if next bit does not fit
-            if bits_used_in_pack + block_bit_size as u32 > 64 {
-                section_longs.push(current_pack_long);
-                current_pack_long = 0;
-                bits_used_in_pack = 0;
-            }
-            let index = palette.get(block).expect("Just added all unique").1;
-            current_pack_long |= (index as i64) << bits_used_in_pack;
-            bits_used_in_pack += block_bit_size as u32;
+        let compression_method = bytes.get_u8();
+        let compression = Compression::from_byte(compression_method)
+            .map_err(|_| ChunkReadingError::Compression(CompressionError::UnknownCompression))?;
 
-            assert!(bits_used_in_pack <= 64);
+        Ok(AnvilData {
+            compression,
+            // If this has padding, we need to trim it
+            compressed_data: bytes.slice(..length),
+        })
+    }
 
-            // If the current 64-bit integer is full, push it to the section_longs and start a new one
-            if bits_used_in_pack >= 64 {
-                section_longs.push(current_pack_long);
-                current_pack_long = 0;
-                bits_used_in_pack = 0;
-            }
+    async fn write(&self, w: &mut (impl AsyncWrite + Unpin + Send)) -> Result<(), std::io::Error> {
+        let padded_size = self.padded_size();
+
+        w.write_u32((self.compressed_data.remaining() + 1) as u32)
+            .await?;
+        w.write_u8(
+            self.compression
+                .map_or(Compression::NO_COMPRESSION_ID, |c| c as u8),
+        )
+        .await?;
+
+        w.write_all(&self.compressed_data).await?;
+        for _ in 0..(padded_size - self.raw_write_size()) {
+            w.write_u8(0).await?;
         }
 
-        // Push the last 64-bit integer if it contains any data
-        if bits_used_in_pack > 0 {
-            section_longs.push(current_pack_long);
+        Ok(())
+    }
+
+    fn to_chunk<W: BytesToData>(&self, pos: Vector2<i32>) -> Result<W::Data, ChunkReadingError> {
+        let chunk = if let Some(compression) = self.compression {
+            let decompress_bytes = compression
+                .decompress_data(&self.compressed_data)
+                .map_err(ChunkReadingError::Compression)?;
+
+            W::bytes_to_data(&decompress_bytes, pos)
+        } else {
+            W::bytes_to_data(&self.compressed_data, pos)
         }
+        .map_err(ChunkReadingError::ParsingError)?;
 
-        sections.push(ChunkSection {
-            y: i as i8 - 4,
-            block_states: Some(ChunkSectionBlockStates {
-                data: Some(section_longs.into_boxed_slice()),
-                palette: palette
-                    .into_iter()
-                    .map(|entry| PaletteEntry {
-                        name: entry.1.0.to_string(),
-                        properties: {
-                            let block = Block::from_state_id(*entry.0).unwrap();
-                            if let Some(properties) = block.properties(*entry.0) {
-                                let props = properties.to_props();
-                                let mut props_map = HashMap::new();
-                                for prop in props {
-                                    props_map.insert(prop.0.clone(), prop.1.clone());
-                                }
-                                Some(props_map)
-                            } else {
-                                None
-                            }
-                        },
-                    })
-                    .collect(),
-            }),
-        });
+        Ok(chunk)
     }
 
-    let nbt = ChunkNbt {
-        data_version: WORLD_DATA_VERSION,
-        x_pos: chunk_data.position.x,
-        z_pos: chunk_data.position.z,
-        status: ChunkStatus::Full,
-        heightmaps: chunk_data.heightmap.clone(),
-        sections,
-    };
-
-    let mut result = Vec::new();
-    to_bytes(&nbt, &mut result).map_err(ChunkSerializingError::ErrorSerializingChunk)?;
-    Ok(result)
+    fn from_data<T: DataToBytes>(
+        chunk: &T::Data,
+        compression: Option<Compression>,
+    ) -> Result<Self, ChunkWritingError> {
+        let raw_bytes = T::data_to_bytes(chunk)
+            .map_err(|err| ChunkWritingError::ChunkSerializingError(err.to_string()))?;
+
+        let compression = compression
+            .unwrap_or_else(|| advanced_config().chunk.compression.algorithm.clone().into());
+
+        // We need to buffer here anyway so theres no use in making an impl Write for this
+        let compressed_data = compression
+            .compress_data(&raw_bytes, advanced_config().chunk.compression.level)
+            .map_err(ChunkWritingError::Compression)?;
+
+        Ok(AnvilData {
+            compression: Some(compression),
+            compressed_data: compressed_data.into(),
+        })
+    }
+}
+
+impl Default for AnvilFile {
+    fn default() -> Self {
+        Self {
+            chunks_data: [const { None }; CHUNK_COUNT],
+            write_action: Mutex::new(WriteAction::Pass),
+            // Two sectors for offset + timestamp
+            end_sector: 2,
+        }
+    }
 }
 
 #[cfg(test)]
@@ -917,15 +758,17 @@ mod tests {
     use temp_dir::TempDir;
     use tokio::sync::RwLock;
 
-    use crate::chunk::format::anvil::AnvilChunkFile;
-    use crate::chunk::io::chunk_file_manager::ChunkFileManager;
-    use crate::chunk::io::{ChunkIO, LoadedData};
     use crate::coordinates::ChunkRelativeBlockCoordinates;
     use crate::generation::{Seed, get_world_gen};
     use crate::level::{LevelFolder, SyncChunk};
+    use crate::storage::format::anvil::AnvilFile;
+    use crate::storage::io::chunk_file_manager::ChunkFileManager;
+    use crate::storage::io::{ChunkIO, LoadedData};
+
+    use super::chunk::AnvilChunkFormat;
 
     async fn get_chunks(
-        saver: &ChunkFileManager<AnvilChunkFile>,
+        saver: &ChunkFileManager<AnvilChunkFormat>,
         folder: &LevelFolder,
         chunks: &[(Vector2<i32>, SyncChunk)],
     ) -> Box<[SyncChunk]> {
@@ -959,7 +802,7 @@ mod tests {
     #[tokio::test(flavor = "multi_thread")]
     async fn not_existing() {
         let region_path = PathBuf::from("not_existing");
-        let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
 
         let mut chunks = Vec::new();
         let (send, mut recv) = tokio::sync::mpsc::channel(1);
@@ -968,6 +811,7 @@ mod tests {
             .fetch_chunks(
                 &LevelFolder {
                     root_folder: PathBuf::from(""),
+                    entities_folder: PathBuf::from(""),
                     region_folder: region_path,
                 },
                 &[Vector2::new(0, 0)],
@@ -996,10 +840,11 @@ mod tests {
         let temp_dir = TempDir::new().unwrap();
         let level_folder = LevelFolder {
             root_folder: temp_dir.path().to_path_buf(),
+            entities_folder: PathBuf::from(""),
             region_folder: temp_dir.path().join("region"),
         };
         fs::create_dir(&level_folder.region_folder).expect("couldn't create region folder");
-        let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
 
         // Generate chunks
         let mut chunks = vec![];
@@ -1019,7 +864,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -1067,7 +912,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -1129,7 +974,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -1172,7 +1017,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -1201,10 +1046,11 @@ mod tests {
         let temp_dir = TempDir::new().unwrap();
         let level_folder = LevelFolder {
             root_folder: temp_dir.path().to_path_buf(),
+            entities_folder: PathBuf::from(""),
             region_folder: temp_dir.path().join("region"),
         };
         fs::create_dir(&level_folder.region_folder).expect("couldn't create region folder");
-        let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
 
         // Generate chunks
         let mut chunks = vec![];
@@ -1229,7 +1075,7 @@ mod tests {
                 .expect("Failed to write chunk");
 
             // Create a new manager to ensure nothing is cached
-            let chunk_saver = ChunkFileManager::<AnvilChunkFile>::default();
+            let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
             let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
             for (_, chunk) in &chunks {
diff --git a/pumpkin-world/src/chunk/format/linear.rs b/pumpkin-world/src/storage/format/linear.rs
similarity index 95%
rename from pumpkin-world/src/chunk/format/linear.rs
rename to pumpkin-world/src/storage/format/linear.rs
index 3692efc16..b1614a6d1 100644
--- a/pumpkin-world/src/chunk/format/linear.rs
+++ b/pumpkin-world/src/storage/format/linear.rs
@@ -2,9 +2,10 @@ use std::io::ErrorKind;
 use std::path::PathBuf;
 use std::time::{SystemTime, UNIX_EPOCH};
 
-use crate::chunk::format::anvil::AnvilChunkFile;
-use crate::chunk::io::{ChunkSerializer, LoadedData};
-use crate::chunk::{ChunkData, ChunkReadingError, ChunkWritingError};
+use crate::storage::format::anvil::AnvilFile;
+use crate::storage::format::get_region_coords;
+use crate::storage::io::{ChunkSerializer, LoadedData};
+use crate::storage::{ChunkData, ChunkReadingError, ChunkWritingError};
 use async_trait::async_trait;
 use bytes::{Buf, BufMut, Bytes};
 use log::error;
@@ -12,7 +13,9 @@ use pumpkin_config::advanced_config;
 use pumpkin_util::math::vector2::Vector2;
 use tokio::io::{AsyncWriteExt, BufWriter};
 
-use super::anvil::{CHUNK_COUNT, chunk_to_bytes};
+use super::anvil::CHUNK_COUNT;
+use super::anvil::chunk::AnvilChunkFormat;
+use super::{BytesToData, DataToBytes, get_chunk_index};
 
 /// The signature of the linear file format
 /// used as a header and footer described in https://gist.github.com/Aaron2550/5701519671253d4c6190bde6706f9f98
@@ -136,7 +139,7 @@ impl LinearFileHeader {
 
 impl LinearFile {
     const fn get_chunk_index(at: &Vector2<i32>) -> usize {
-        AnvilChunkFile::get_chunk_index(at)
+        get_chunk_index(at)
     }
 
     fn check_signature(bytes: &[u8]) -> Result<(), ChunkReadingError> {
@@ -168,7 +171,7 @@ impl ChunkSerializer for LinearFile {
     }
 
     fn get_chunk_key(chunk: &Vector2<i32>) -> String {
-        let (region_x, region_z) = AnvilChunkFile::get_region_coords(chunk);
+        let (region_x, region_z) = get_region_coords(chunk);
         format!("./r.{}.{}.linear", region_x, region_z)
     }
 
@@ -308,7 +311,7 @@ impl ChunkSerializer for LinearFile {
 
     async fn update_chunk(&mut self, chunk: &ChunkData) -> Result<(), ChunkWritingError> {
         let index = LinearFile::get_chunk_index(&chunk.position);
-        let chunk_raw: Bytes = chunk_to_bytes(chunk)
+        let chunk_raw: Bytes = AnvilChunkFormat::data_to_bytes(chunk)
             .map_err(|err| ChunkWritingError::ChunkSerializingError(err.to_string()))?
             .into();
 
@@ -342,7 +345,7 @@ impl ChunkSerializer for LinearFile {
             let send = bridge_send.clone();
             rayon::spawn(move || {
                 let result = if let Some(data) = linear_chunk_data {
-                    match ChunkData::from_bytes(&data, chunk)
+                    match AnvilChunkFormat::bytes_to_data(&data, chunk)
                         .map_err(ChunkReadingError::ParsingError)
                     {
                         Ok(chunk) => LoadedData::Loaded(chunk),
@@ -381,11 +384,11 @@ mod tests {
     use temp_dir::TempDir;
     use tokio::sync::RwLock;
 
-    use crate::chunk::format::linear::LinearFile;
-    use crate::chunk::io::chunk_file_manager::ChunkFileManager;
-    use crate::chunk::io::{ChunkIO, LoadedData};
     use crate::generation::{Seed, get_world_gen};
     use crate::level::LevelFolder;
+    use crate::storage::format::linear::LinearFile;
+    use crate::storage::io::chunk_file_manager::ChunkFileManager;
+    use crate::storage::io::{ChunkIO, LoadedData};
 
     #[tokio::test(flavor = "multi_thread")]
     async fn not_existing() {
@@ -399,6 +402,7 @@ mod tests {
             .fetch_chunks(
                 &LevelFolder {
                     root_folder: PathBuf::from(""),
+                    entities_folder: PathBuf::from(""),
                     region_folder: region_path,
                 },
                 &[Vector2::new(0, 0)],
@@ -422,6 +426,7 @@ mod tests {
         let temp_dir = TempDir::new().unwrap();
         let level_folder = LevelFolder {
             root_folder: temp_dir.path().to_path_buf(),
+            entities_folder: PathBuf::from(""),
             region_folder: temp_dir.path().join("region"),
         };
         fs::create_dir(&level_folder.region_folder).expect("couldn't create region folder");
diff --git a/pumpkin-world/src/storage/format/mod.rs b/pumpkin-world/src/storage/format/mod.rs
new file mode 100644
index 000000000..cdc4a38b3
--- /dev/null
+++ b/pumpkin-world/src/storage/format/mod.rs
@@ -0,0 +1,118 @@
+use std::collections::HashMap;
+
+use anvil::REGION_SIZE;
+use pumpkin_data::chunk::ChunkStatus;
+use pumpkin_nbt::{compound::NbtCompound, from_bytes, nbt_long_array};
+
+use pumpkin_util::math::{ceil_log2, vector2::Vector2};
+use serde::{Deserialize, Serialize};
+
+use crate::{
+    block::ChunkBlockState,
+    coordinates::{ChunkRelativeBlockCoordinates, Height},
+};
+
+use super::{
+    CHUNK_AREA, ChunkBlocks, ChunkData, ChunkHeightmaps, ChunkParsingError, ChunkSerializingError,
+    SUBCHUNK_VOLUME,
+};
+
+pub mod anvil;
+pub mod linear;
+
+// I can't use an tag because it will break ChunkNBT, but status need to have a big S, so "Status"
+#[derive(Serialize, Deserialize, Debug)]
+#[serde(rename_all = "PascalCase")]
+pub struct ChunkStatusWrapper {
+    status: ChunkStatus,
+}
+
+/// The number of bits that identify two chunks in the same region
+pub const SUBREGION_BITS: u8 = pumpkin_util::math::ceil_log2(REGION_SIZE as u32);
+
+pub const SUBREGION_AND: i32 = i32::pow(2, SUBREGION_BITS as u32) - 1;
+
+pub const fn get_chunk_index(pos: &Vector2<i32>) -> usize {
+    let local_x = pos.x & SUBREGION_AND;
+    let local_z = pos.z & SUBREGION_AND;
+    let index = (local_z << SUBREGION_BITS) + local_x;
+    index as usize
+}
+
+pub const fn get_region_coords(at: &Vector2<i32>) -> (i32, i32) {
+    // Divide by 32 for the region coordinates
+    (at.x >> SUBREGION_BITS, at.z >> SUBREGION_BITS)
+}
+
+/// Used for Saving
+pub trait DataToBytes
+where
+    Self: Send + Sync,
+{
+    type Data: Send + Sync + Sized;
+
+    fn data_to_bytes(chunk_data: &Self::Data) -> Result<Vec<u8>, ChunkSerializingError>;
+}
+
+/// Used for Reading
+pub trait BytesToData
+where
+    Self: Send + Sync,
+{
+    type Data: Send + Sync + Sized;
+
+    fn bytes_to_data(
+        chunk_data: &[u8],
+        position: Vector2<i32>,
+    ) -> Result<Self::Data, ChunkParsingError>;
+}
+
+#[derive(Serialize, Deserialize, Debug, Clone)]
+#[serde(rename_all = "PascalCase")]
+pub struct PaletteEntry {
+    // block name
+    pub name: String,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub properties: Option<HashMap<String, String>>,
+}
+
+#[derive(Serialize, Deserialize, Debug)]
+struct ChunkSection {
+    #[serde(rename = "Y")]
+    y: i8,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    block_states: Option<ChunkSectionBlockStates>,
+}
+
+#[derive(Serialize, Deserialize, Debug, Clone)]
+struct ChunkSectionBlockStates {
+    #[serde(
+        serialize_with = "nbt_long_array",
+        skip_serializing_if = "Option::is_none"
+    )]
+    data: Option<Box<[i64]>>,
+    palette: Vec<PaletteEntry>,
+}
+
+#[derive(Serialize, Deserialize, Debug)]
+#[serde(rename_all = "PascalCase")]
+struct ChunkNbt {
+    data_version: i32,
+    #[serde(rename = "xPos")]
+    x_pos: i32,
+    // #[serde(rename = "yPos")]
+    //y_pos: i32,
+    #[serde(rename = "zPos")]
+    z_pos: i32,
+    status: ChunkStatus,
+    #[serde(rename = "sections")]
+    sections: Vec<ChunkSection>,
+    heightmaps: ChunkHeightmaps,
+}
+
+// #[serde(rename_all = "PascalCase")]
+struct EntityNbt {
+    data_version: i32,
+    position: [i32; 2],
+    entities: Vec<NbtCompound>,
+}
diff --git a/pumpkin-world/src/chunk/io/chunk_file_manager.rs b/pumpkin-world/src/storage/io/chunk_file_manager.rs
similarity index 99%
rename from pumpkin-world/src/chunk/io/chunk_file_manager.rs
rename to pumpkin-world/src/storage/io/chunk_file_manager.rs
index bbc5d3759..86cba7a77 100644
--- a/pumpkin-world/src/chunk/io/chunk_file_manager.rs
+++ b/pumpkin-world/src/storage/io/chunk_file_manager.rs
@@ -18,8 +18,8 @@ use tokio::{
 };
 
 use crate::{
-    chunk::{ChunkData, ChunkReadingError, ChunkWritingError},
     level::{LevelFolder, SyncChunk},
+    storage::{ChunkData, ChunkReadingError, ChunkWritingError},
 };
 
 use super::{ChunkIO, ChunkSerializer, LoadedData};
diff --git a/pumpkin-world/src/chunk/io/mod.rs b/pumpkin-world/src/storage/io/mod.rs
similarity index 98%
rename from pumpkin-world/src/chunk/io/mod.rs
rename to pumpkin-world/src/storage/io/mod.rs
index 8d872ee5a..11352af2b 100644
--- a/pumpkin-world/src/chunk/io/mod.rs
+++ b/pumpkin-world/src/storage/io/mod.rs
@@ -99,7 +99,7 @@ pub trait ChunkSerializer: Send + Sync + Default {
     async fn write(&self, backend: Self::WriteBackend) -> Result<(), std::io::Error>;
 
     /// Create a new instance from bytes
-    fn read(r: Bytes) -> Result<Self, ChunkReadingError>;
+    fn read(bytes: Bytes) -> Result<Self, ChunkReadingError>;
 
     /// Add the chunk data to the serializer
     async fn update_chunk(&mut self, chunk_data: &Self::Data) -> Result<(), ChunkWritingError>;
diff --git a/pumpkin-world/src/chunk/mod.rs b/pumpkin-world/src/storage/mod.rs
similarity index 99%
rename from pumpkin-world/src/chunk/mod.rs
rename to pumpkin-world/src/storage/mod.rs
index 6151c1998..8664dde9c 100644
--- a/pumpkin-world/src/chunk/mod.rs
+++ b/pumpkin-world/src/storage/mod.rs
@@ -1,4 +1,4 @@
-use pumpkin_nbt::nbt_long_array;
+use pumpkin_nbt::{compound::NbtCompound, nbt_long_array};
 use pumpkin_util::math::vector2::Vector2;
 use serde::{Deserialize, Serialize};
 use std::iter::repeat_with;
@@ -61,6 +61,7 @@ pub struct ChunkData {
     /// See `https://minecraft.wiki/w/Heightmap` for more info
     pub heightmap: ChunkHeightmaps,
     pub position: Vector2<i32>,
+    pub entities: Vec<NbtCompound>,
     pub dirty: bool,
 }
 
diff --git a/pumpkin-world/src/world_info/anvil.rs b/pumpkin-world/src/world_info/anvil.rs
index d027a3ab9..415c6f1fc 100644
--- a/pumpkin-world/src/world_info/anvil.rs
+++ b/pumpkin-world/src/world_info/anvil.rs
@@ -137,6 +137,7 @@ mod test {
         let temp_dir = TempDir::new().unwrap();
         let level_folder = LevelFolder {
             root_folder: temp_dir.path().to_path_buf(),
+            entities_folder: temp_dir.path().join("entities"),
             region_folder: temp_dir.path().join("region"),
         };
 
@@ -220,6 +221,7 @@ mod test {
         let temp_dir = TempDir::new().unwrap();
         let level_folder = LevelFolder {
             root_folder: temp_dir.path().to_path_buf(),
+            entities_folder: temp_dir.path().join("entities"),
             region_folder: temp_dir.path().join("region"),
         };
 
diff --git a/pumpkin/src/plugin/api/events/world/chunk_load.rs b/pumpkin/src/plugin/api/events/world/chunk_load.rs
index 5bbbc0433..3be5a6ca3 100644
--- a/pumpkin/src/plugin/api/events/world/chunk_load.rs
+++ b/pumpkin/src/plugin/api/events/world/chunk_load.rs
@@ -1,6 +1,6 @@
 use crate::world::World;
 use pumpkin_macros::{Event, cancellable};
-use pumpkin_world::chunk::ChunkData;
+use pumpkin_world::storage::ChunkData;
 use std::sync::Arc;
 use tokio::sync::RwLock;
 
diff --git a/pumpkin/src/plugin/api/events/world/chunk_save.rs b/pumpkin/src/plugin/api/events/world/chunk_save.rs
index e9fb38043..d3c95419b 100644
--- a/pumpkin/src/plugin/api/events/world/chunk_save.rs
+++ b/pumpkin/src/plugin/api/events/world/chunk_save.rs
@@ -1,6 +1,6 @@
 use crate::world::World;
 use pumpkin_macros::{Event, cancellable};
-use pumpkin_world::chunk::ChunkData;
+use pumpkin_world::storage::ChunkData;
 use std::sync::Arc;
 use tokio::sync::RwLock;
 
diff --git a/pumpkin/src/plugin/api/events/world/chunk_send.rs b/pumpkin/src/plugin/api/events/world/chunk_send.rs
index 601ceaaae..7e2525da5 100644
--- a/pumpkin/src/plugin/api/events/world/chunk_send.rs
+++ b/pumpkin/src/plugin/api/events/world/chunk_send.rs
@@ -1,6 +1,6 @@
 use crate::world::World;
 use pumpkin_macros::{Event, cancellable};
-use pumpkin_world::chunk::ChunkData;
+use pumpkin_world::storage::ChunkData;
 use std::sync::Arc;
 use tokio::sync::RwLock;
 
diff --git a/pumpkin/src/world/mod.rs b/pumpkin/src/world/mod.rs
index ecd93bf84..0ad5433ff 100644
--- a/pumpkin/src/world/mod.rs
+++ b/pumpkin/src/world/mod.rs
@@ -50,7 +50,7 @@ use pumpkin_util::math::{position::BlockPos, vector3::Vector3};
 use pumpkin_util::text::{TextComponent, color::NamedColor};
 use pumpkin_world::level::Level;
 use pumpkin_world::level::SyncChunk;
-use pumpkin_world::{block::BlockDirection, chunk::ChunkData};
+use pumpkin_world::{block::BlockDirection, storage::ChunkData};
 use pumpkin_world::{
     block::registry::{
         get_block_and_state_by_state_id, get_block_by_state_id, get_state_by_state_id,

From e172c96e5831a7cf3cc25f59cfdf1be49e2af833 Mon Sep 17 00:00:00 2001
From: Alexander Medvedev <lilalexmed@proton.me>
Date: Tue, 18 Mar 2025 14:50:49 +0100
Subject: [PATCH 2/3] rename chunk_file_manager

---
 pumpkin-world/src/level.rs                    |  6 +-
 .../src/storage/format/anvil/entity.rs        | 19 +++++-
 pumpkin-world/src/storage/format/anvil/mod.rs | 64 ++++++++-----------
 ...{chunk_file_manager.rs => file_manager.rs} |  0
 pumpkin-world/src/storage/io/mod.rs           |  2 +-
 5 files changed, 45 insertions(+), 46 deletions(-)
 rename pumpkin-world/src/storage/io/{chunk_file_manager.rs => file_manager.rs} (100%)

diff --git a/pumpkin-world/src/level.rs b/pumpkin-world/src/level.rs
index 66467be4f..e8776d8c3 100644
--- a/pumpkin-world/src/level.rs
+++ b/pumpkin-world/src/level.rs
@@ -19,7 +19,7 @@ use crate::{
             anvil::{AnvilChunkFile, chunk::AnvilChunkFormat},
             linear::LinearFile,
         },
-        io::{ChunkIO, LoadedData, chunk_file_manager::ChunkFileManager},
+        io::{ChunkIO, LoadedData, file_manager::FileManager},
     },
     world_info::{
         LevelData, WorldInfoError, WorldInfoReader, WorldInfoWriter,
@@ -121,8 +121,8 @@ impl Level {
 
         let chunk_io: Arc<dyn ChunkIO<Data = SyncChunk>> = match advanced_config().chunk.format {
             //ChunkFormat::Anvil => (Arc::new(AnvilChunkFormat), Arc::new(AnvilChunkFormat)),
-            ChunkFormat::Linear => Arc::new(ChunkFileManager::<LinearFile>::default()),
-            ChunkFormat::Anvil => Arc::new(ChunkFileManager::<AnvilChunkFormat>::default()),
+            ChunkFormat::Linear => Arc::new(FileManager::<LinearFile>::default()),
+            ChunkFormat::Anvil => Arc::new(FileManager::<AnvilChunkFormat>::default()),
         };
 
         Self {
diff --git a/pumpkin-world/src/storage/format/anvil/entity.rs b/pumpkin-world/src/storage/format/anvil/entity.rs
index dd5f1dc7c..cfaeb2b5f 100644
--- a/pumpkin-world/src/storage/format/anvil/entity.rs
+++ b/pumpkin-world/src/storage/format/anvil/entity.rs
@@ -6,7 +6,7 @@ use pumpkin_util::math::vector2::Vector2;
 
 use crate::storage::{
     ChunkData, ChunkReadingError, ChunkSerializingError, ChunkWritingError,
-    format::{DataToBytes, EntityNbt, get_chunk_index},
+    format::{BytesToData, DataToBytes, EntityNbt, get_chunk_index},
     io::{ChunkSerializer, LoadedData},
 };
 
@@ -40,13 +40,15 @@ impl ChunkSerializer for AnvilEntityFormat {
     }
 
     async fn update_chunk(&mut self, chunk: &Self::Data) -> Result<(), ChunkWritingError> {
-        self.anvil.update_chunk::<Self>(chunk.position, chunk).await
+        self.anvil
+            .update_chunk::<Self>(Vector2::new(chunk.position[0], chunk.position[1]), chunk)
+            .await
     }
 
     async fn get_chunks(
         &self,
         chunks: &[Vector2<i32>],
-        stream: tokio::sync::mpsc::Sender<LoadedData<ChunkData, ChunkReadingError>>,
+        stream: tokio::sync::mpsc::Sender<LoadedData<Self::Data, ChunkReadingError>>,
     ) {
         // Create an unbounded buffer so we don't block the rayon thread pool
         let (bridge_send, mut bridge_recv) = tokio::sync::mpsc::unbounded_channel();
@@ -95,3 +97,14 @@ impl DataToBytes for AnvilEntityFormat {
 
     fn data_to_bytes(chunk_data: &Self::Data) -> Result<Vec<u8>, ChunkSerializingError> {}
 }
+
+impl BytesToData for AnvilEntityFormat {
+    type Data = EntityNbt;
+
+    fn bytes_to_data(
+        chunk_data: &[u8],
+        position: Vector2<i32>,
+    ) -> Result<Self::Data, crate::storage::ChunkParsingError> {
+        todo!()
+    }
+}
diff --git a/pumpkin-world/src/storage/format/anvil/mod.rs b/pumpkin-world/src/storage/format/anvil/mod.rs
index b0a249a70..51961165f 100644
--- a/pumpkin-world/src/storage/format/anvil/mod.rs
+++ b/pumpkin-world/src/storage/format/anvil/mod.rs
@@ -134,34 +134,26 @@ impl AnvilFile {
             .await?;
 
         let mut write = BufWriter::new(file);
-        // The first two sectors are reserved for the location table
-        for (index, metadata) in self.chunks_data.iter().enumerate() {
+        let mut timestamp_buf = Vec::new();
+        for metadata in &self.chunks_data {
             if let Some(chunk) = metadata {
                 let chunk_data = &chunk.serialized_data;
-                let sector_count = chunk_data.sector_count();
+                let combined_value = (chunk.file_sector_offset << 8) | chunk_data.sector_count();
+                write.write_u32(combined_value).await?;
+                timestamp_buf.write_u32(chunk.timestamp).await?;
                 log::trace!(
-                    "Writing position for chunk {} - {}:{}",
-                    index,
+                    "Writing position and timestamp for chunk: {}:{}, timestamp:{}",
                     chunk.file_sector_offset,
-                    sector_count
+                    chunk_data.sector_count(),
+                    chunk.timestamp
                 );
-                write
-                    .write_u32((chunk.file_sector_offset << 8) | sector_count)
-                    .await?;
-            } else {
-                // If the chunk is not present, we write 0 to the location and timestamp tables
-                write.write_u32(0).await?;
-            };
-        }
-
-        for metadata in &self.chunks_data {
-            if let Some(chunk) = metadata {
-                write.write_u32(chunk.timestamp).await?;
             } else {
-                // If the chunk is not present, we write 0 to the location and timestamp tables
+                // Write 0 for both location/size and timestamp in one iteration.
                 write.write_u32(0).await?;
+                timestamp_buf.write_u32(0).await?;
             }
         }
+        write.write(&timestamp_buf).await?;
 
         let mut chunks = indices
             .iter()
@@ -233,11 +225,13 @@ impl AnvilFile {
             .await?;
 
         let mut write = BufWriter::new(file);
+        let mut timestamp_buf = Vec::new();
 
         // The first two sectors are reserved for the location table
         let mut current_sector: u32 = 2;
         for metadata in &self.chunks_data {
             if let Some(chunk) = metadata {
+                timestamp_buf.write_u32(chunk.timestamp).await?;
                 let chunk = &chunk.serialized_data;
                 let sector_count = chunk.sector_count();
                 write
@@ -247,17 +241,10 @@ impl AnvilFile {
             } else {
                 // If the chunk is not present, we write 0 to the location and timestamp tables
                 write.write_u32(0).await?;
+                timestamp_buf.write_u32(0).await?;
             };
         }
-
-        for metadata in &self.chunks_data {
-            if let Some(chunk) = metadata {
-                write.write_u32(chunk.timestamp).await?;
-            } else {
-                // If the chunk is not present, we write 0 to the location and timestamp tables
-                write.write_u32(0).await?;
-            }
-        }
+        write.write(&timestamp_buf).await?;
 
         for chunk in self.chunks_data.iter().flatten() {
             chunk.serialized_data.write(&mut write).await?;
@@ -761,14 +748,13 @@ mod tests {
     use crate::coordinates::ChunkRelativeBlockCoordinates;
     use crate::generation::{Seed, get_world_gen};
     use crate::level::{LevelFolder, SyncChunk};
-    use crate::storage::format::anvil::AnvilFile;
-    use crate::storage::io::chunk_file_manager::ChunkFileManager;
+    use crate::storage::io::file_manager::FileManager;
     use crate::storage::io::{ChunkIO, LoadedData};
 
     use super::chunk::AnvilChunkFormat;
 
     async fn get_chunks(
-        saver: &ChunkFileManager<AnvilChunkFormat>,
+        saver: &FileManager<AnvilChunkFormat>,
         folder: &LevelFolder,
         chunks: &[(Vector2<i32>, SyncChunk)],
     ) -> Box<[SyncChunk]> {
@@ -802,7 +788,7 @@ mod tests {
     #[tokio::test(flavor = "multi_thread")]
     async fn not_existing() {
         let region_path = PathBuf::from("not_existing");
-        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+        let chunk_saver = FileManager::<AnvilChunkFormat>::default();
 
         let mut chunks = Vec::new();
         let (send, mut recv) = tokio::sync::mpsc::channel(1);
@@ -844,7 +830,7 @@ mod tests {
             region_folder: temp_dir.path().join("region"),
         };
         fs::create_dir(&level_folder.region_folder).expect("couldn't create region folder");
-        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+        let chunk_saver = FileManager::<AnvilChunkFormat>::default();
 
         // Generate chunks
         let mut chunks = vec![];
@@ -864,7 +850,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+        let chunk_saver = FileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -912,7 +898,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+        let chunk_saver = FileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -974,7 +960,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+        let chunk_saver = FileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -1017,7 +1003,7 @@ mod tests {
             .expect("Failed to write chunk");
 
         // Create a new manager to ensure nothing is cached
-        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+        let chunk_saver = FileManager::<AnvilChunkFormat>::default();
         let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
         for (_, chunk) in &chunks {
@@ -1050,7 +1036,7 @@ mod tests {
             region_folder: temp_dir.path().join("region"),
         };
         fs::create_dir(&level_folder.region_folder).expect("couldn't create region folder");
-        let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+        let chunk_saver = FileManager::<AnvilChunkFormat>::default();
 
         // Generate chunks
         let mut chunks = vec![];
@@ -1075,7 +1061,7 @@ mod tests {
                 .expect("Failed to write chunk");
 
             // Create a new manager to ensure nothing is cached
-            let chunk_saver = ChunkFileManager::<AnvilChunkFormat>::default();
+            let chunk_saver = FileManager::<AnvilChunkFormat>::default();
             let read_chunks = get_chunks(&chunk_saver, &level_folder, &chunks).await;
 
             for (_, chunk) in &chunks {
diff --git a/pumpkin-world/src/storage/io/chunk_file_manager.rs b/pumpkin-world/src/storage/io/file_manager.rs
similarity index 100%
rename from pumpkin-world/src/storage/io/chunk_file_manager.rs
rename to pumpkin-world/src/storage/io/file_manager.rs
diff --git a/pumpkin-world/src/storage/io/mod.rs b/pumpkin-world/src/storage/io/mod.rs
index 11352af2b..f33344c70 100644
--- a/pumpkin-world/src/storage/io/mod.rs
+++ b/pumpkin-world/src/storage/io/mod.rs
@@ -7,7 +7,7 @@ use pumpkin_util::math::vector2::Vector2;
 use super::{ChunkReadingError, ChunkWritingError};
 use crate::level::LevelFolder;
 
-pub mod chunk_file_manager;
+pub mod file_manager;
 
 /// The result of loading a chunk data.
 ///

From b990c9a178179c858815f8742cbf7c9781ecddd7 Mon Sep 17 00:00:00 2001
From: Alexander Medvedev <lilalexmed@proton.me>
Date: Tue, 18 Mar 2025 15:52:22 +0100
Subject: [PATCH 3/3] read and write entity components nbt

---
 .../src/storage/format/anvil/entity.rs        | 40 +++++++++++++++++--
 pumpkin-world/src/storage/format/mod.rs       | 10 ++---
 pumpkin/src/entity/mod.rs                     |  5 ++-
 3 files changed, 44 insertions(+), 11 deletions(-)

diff --git a/pumpkin-world/src/storage/format/anvil/entity.rs b/pumpkin-world/src/storage/format/anvil/entity.rs
index cfaeb2b5f..8e1080930 100644
--- a/pumpkin-world/src/storage/format/anvil/entity.rs
+++ b/pumpkin-world/src/storage/format/anvil/entity.rs
@@ -1,7 +1,10 @@
 use std::path::PathBuf;
 
 use bytes::Bytes;
-use pumpkin_nbt::compound::NbtCompound;
+use pumpkin_nbt::{
+    compound::NbtCompound, deserializer::ReadAdaptor, serializer::WriteAdaptor, tag::NbtTag,
+    to_bytes,
+};
 use pumpkin_util::math::vector2::Vector2;
 
 use crate::storage::{
@@ -95,7 +98,26 @@ impl ChunkSerializer for AnvilEntityFormat {
 impl DataToBytes for AnvilEntityFormat {
     type Data = EntityNbt;
 
-    fn data_to_bytes(chunk_data: &Self::Data) -> Result<Vec<u8>, ChunkSerializingError> {}
+    fn data_to_bytes(data: &Self::Data) -> Result<Vec<u8>, ChunkSerializingError> {
+        let mut content = NbtCompound::new();
+        content.put_int("DataVersion", data.data_version);
+        content.put(
+            "Position",
+            NbtTag::IntArray(vec![data.position.x, data.position.z].into_boxed_slice()),
+        );
+        let mut entities = Vec::new();
+        for entity in &data.entities {
+            entities.push(NbtTag::Compound(entity.clone()));
+        }
+        content.put_list("Entities", entities.into_boxed_slice());
+
+        let mut result = Vec::new();
+        let mut writer = WriteAdaptor::new(&mut result);
+        content
+            .serialize_content(&mut writer)
+            .map_err(ChunkSerializingError::ErrorSerializingChunk)?;
+        Ok(result)
+    }
 }
 
 impl BytesToData for AnvilEntityFormat {
@@ -105,6 +127,18 @@ impl BytesToData for AnvilEntityFormat {
         chunk_data: &[u8],
         position: Vector2<i32>,
     ) -> Result<Self::Data, crate::storage::ChunkParsingError> {
-        todo!()
+        let content = NbtCompound::deserialize_content(&mut ReadAdaptor::new(chunk_data)).unwrap();
+        let data_version = content.get_int("DataVersion").unwrap();
+        let position = content.get_int_array("Position").unwrap();
+        let entities = content.get_list("Entities").unwrap();
+        let mut entity_components = Vec::new();
+        for entity in entities {
+            entity_components.push(entity.extract_compound().unwrap().clone());
+        }
+        Ok(EntityNbt {
+            data_version,
+            position: Vector2::new(position[0], position[1]),
+            entities: entity_components,
+        })
     }
 }
diff --git a/pumpkin-world/src/storage/format/mod.rs b/pumpkin-world/src/storage/format/mod.rs
index cdc4a38b3..caf5aa01b 100644
--- a/pumpkin-world/src/storage/format/mod.rs
+++ b/pumpkin-world/src/storage/format/mod.rs
@@ -51,7 +51,7 @@ where
 {
     type Data: Send + Sync + Sized;
 
-    fn data_to_bytes(chunk_data: &Self::Data) -> Result<Vec<u8>, ChunkSerializingError>;
+    fn data_to_bytes(data: &Self::Data) -> Result<Vec<u8>, ChunkSerializingError>;
 }
 
 /// Used for Reading
@@ -61,10 +61,7 @@ where
 {
     type Data: Send + Sync + Sized;
 
-    fn bytes_to_data(
-        chunk_data: &[u8],
-        position: Vector2<i32>,
-    ) -> Result<Self::Data, ChunkParsingError>;
+    fn bytes_to_data(data: &[u8], position: Vector2<i32>) -> Result<Self::Data, ChunkParsingError>;
 }
 
 #[derive(Serialize, Deserialize, Debug, Clone)]
@@ -113,6 +110,7 @@ struct ChunkNbt {
 // #[serde(rename_all = "PascalCase")]
 struct EntityNbt {
     data_version: i32,
-    position: [i32; 2],
+    /// The Chunk position
+    position: Vector2<i32>,
     entities: Vec<NbtCompound>,
 }
diff --git a/pumpkin/src/entity/mod.rs b/pumpkin/src/entity/mod.rs
index d7f2193cd..6f0613186 100644
--- a/pumpkin/src/entity/mod.rs
+++ b/pumpkin/src/entity/mod.rs
@@ -478,6 +478,7 @@ impl EntityBase for Entity {
 #[async_trait]
 impl NBTStorage for Entity {
     async fn write_nbt(&self, nbt: &mut pumpkin_nbt::compound::NbtCompound) {
+        nbt.put_int("id", self.entity_type.id as i32);
         let position = self.pos.load();
         nbt.put(
             "Pos",
@@ -492,9 +493,9 @@ impl NBTStorage for Entity {
                 vec![velocity.x.into(), velocity.y.into(), velocity.z.into()].into_boxed_slice(),
             ),
         );
-        nbt.put(
+        nbt.put_list(
             "Rotation",
-            NbtTag::List(vec![self.yaw.load().into(), self.pitch.load().into()].into_boxed_slice()),
+            vec![self.yaw.load().into(), self.pitch.load().into()].into_boxed_slice(),
         );
 
         // todo more...