aboutsummaryrefslogtreecommitdiff
path: root/azalea-world/src
diff options
context:
space:
mode:
authormat <27899617+mat-1@users.noreply.github.com>2022-11-27 16:25:07 -0600
committerGitHub <noreply@github.com>2022-11-27 16:25:07 -0600
commit631ed63dbdc7167df4de02a55b5c2ef1cea909e9 (patch)
tree104e567c332f2aeb30ea6acefef8c73f9b2f158b /azalea-world/src
parent962b9fcaae917c7e5bef718469fba31f6ff7c3cb (diff)
downloadazalea-drasl-631ed63dbdc7167df4de02a55b5c2ef1cea909e9.tar.xz
Swarm (#36)
* make azalea-pathfinder dir * start writing d* lite impl * more work on d* lite * work more on implementing d* lite * full d* lite impl * updated edges * add next() function * add NoPathError * why does dstar lite not work * fix d* lite implementation * make the test actually check the coords * replace while loop with if statement * fix clippy complaints * make W only have to be PartialOrd * fix PartialOrd issues * implement mtd* lite * add a test to mtd* lite * remove normal d* lite * make heuristic only take in one arg * add `success` function * Update README.md * evil black magic to make .entity not need dimension * start adding moves * slightly improve the vec3/position situation new macro that implements all the useful functions * moves stuff * make it compile * update deps in az-pathfinder * make it compile again * more pathfinding stuff * add Bot::look_at * replace EntityMut and EntityRef with just Entity * block pos pathfinding stuff * rename movedirection to walkdirection * execute path every tick * advance path * change az-pf version * make azalea_client keep plugin state * fix Plugins::get * why does it think there is air * start debugging incorrect air * update some From methods to use rem_euclid * start adding swarm * fix deadlock i still don't understand why it was happening but the solution was to keep the Client::player lock for shorter so it didn't overlap with the Client::dimension lock * make lookat actually work probably * fix going too fast * Update main.rs * make a thing immutable * direction_looking_at * fix rotations * import swarm in an example * fix stuff from merge * remove azalea_pathfinder import * delete azalea-pathfinder crate already in azalea::pathfinder module * swarms * start working on shared dimensions * Shared worlds work * start adding Swarm::add_account * add_account works * change "client" to "bot" in some places * Fix issues from merge * Update world.rs * add SwarmEvent::Disconnect(Account) * almost add SwarmEvent::Chat and new plugin system it panics rn * make plugins have to provide the State associated type * improve comments * make fn build slightly cleaner * fix SwarmEvent::Chat * change a println in bot/main.rs * Client::shutdown -> disconnect * polish fix clippy warnings + improve some docs a bit * fix shared worlds* *there's a bug that entities and bots will have their positions exaggerated because the relative movement packet is applied for every entity once per bot * i am being trolled by rust for some reason some stuff is really slow for literally no reason and it makes no sense i am going insane * make world an RwLock again * remove debug messages * fix skipping event ticks unfortunately now sending events is `.send().await?` instead of just `.send()` * fix deadlock + warnings * turns out my floor_mod impl was wrong and i32::rem_euclid has the correct behavior LOL * still errors with lots of bots * make swarm iter & fix new chunks not loading * improve docs * start fixing tests * fix all the tests except the examples i don't know how to exclude them from the tests * improve docs some more
Diffstat (limited to 'azalea-world/src')
-rwxr-xr-xazalea-world/src/chunk_storage.rs167
-rw-r--r--azalea-world/src/container.rs54
-rw-r--r--azalea-world/src/entity/attributes.rs2
-rw-r--r--azalea-world/src/entity/mod.rs13
-rwxr-xr-xazalea-world/src/entity_storage.rs322
-rw-r--r--[-rwxr-xr-x]azalea-world/src/lib.rs170
-rw-r--r--azalea-world/src/world.rs181
7 files changed, 640 insertions, 269 deletions
diff --git a/azalea-world/src/chunk_storage.rs b/azalea-world/src/chunk_storage.rs
index a03cbe7b..6a8a995e 100755
--- a/azalea-world/src/chunk_storage.rs
+++ b/azalea-world/src/chunk_storage.rs
@@ -4,36 +4,61 @@ use crate::World;
use azalea_block::BlockState;
use azalea_buf::BufReadError;
use azalea_buf::{McBufReadable, McBufWritable};
-use azalea_core::floor_mod;
use azalea_core::{BlockPos, ChunkBlockPos, ChunkPos, ChunkSectionBlockPos};
use log::debug;
use log::trace;
+use log::warn;
use parking_lot::Mutex;
+use parking_lot::RwLock;
+use std::collections::HashMap;
use std::fmt::Debug;
use std::io::Cursor;
-use std::{
- io::Write,
- ops::{Index, IndexMut},
- sync::Arc,
-};
+use std::sync::Weak;
+use std::{io::Write, sync::Arc};
const SECTION_HEIGHT: u32 = 16;
-pub struct ChunkStorage {
+/// An efficient storage of chunks for a client that has a limited render
+/// distance. This has support for using a shared [`WeakChunkStorage`]. If you
+/// have an infinite render distance (like a server), you should use
+/// [`ChunkStorage`] instead.
+pub struct PartialChunkStorage {
+ /// Chunk storage that can be shared by clients.
+ shared: Arc<RwLock<WeakChunkStorage>>,
+
pub view_center: ChunkPos,
chunk_radius: u32,
view_range: u32,
- pub height: u32,
- pub min_y: i32,
// chunks is a list of size chunk_radius * chunk_radius
chunks: Vec<Option<Arc<Mutex<Chunk>>>>,
}
+/// A storage for chunks where they're only stored weakly, so if they're not
+/// actively being used somewhere else they'll be forgotten. This is used for
+/// shared worlds.
+pub struct WeakChunkStorage {
+ pub height: u32,
+ pub min_y: i32,
+ pub chunks: HashMap<ChunkPos, Weak<Mutex<Chunk>>>,
+}
+
+/// A storage of potentially infinite chunks in a world. Chunks are stored as
+/// an `Arc<Mutex>` so they can be shared across threads.
+pub struct ChunkStorage {
+ pub height: u32,
+ pub min_y: i32,
+ pub chunks: HashMap<ChunkPos, Arc<Mutex<Chunk>>>,
+}
+
+/// A single chunk in a world (16*?*16 blocks). This only contains the blocks and biomes. You
+/// can derive the height of the chunk from the number of sections, but you
+/// need a [`ChunkStorage`] to get the minimum Y coordinate.
#[derive(Debug)]
pub struct Chunk {
pub sections: Vec<Section>,
}
+/// A section of a chunk, i.e. a 16*16*16 block area.
#[derive(Clone, Debug)]
pub struct Section {
pub block_count: u16,
@@ -59,22 +84,28 @@ impl Default for Chunk {
}
}
-impl ChunkStorage {
- pub fn new(chunk_radius: u32, height: u32, min_y: i32) -> Self {
+impl PartialChunkStorage {
+ pub fn new(chunk_radius: u32, shared: Arc<RwLock<WeakChunkStorage>>) -> Self {
let view_range = chunk_radius * 2 + 1;
- ChunkStorage {
+ PartialChunkStorage {
+ shared,
view_center: ChunkPos::new(0, 0),
chunk_radius,
view_range,
- height,
- min_y,
chunks: vec![None; (view_range * view_range) as usize],
}
}
+ pub fn min_y(&self) -> i32 {
+ self.shared.read().min_y
+ }
+ pub fn height(&self) -> u32 {
+ self.shared.read().height
+ }
+
fn get_index(&self, chunk_pos: &ChunkPos) -> usize {
- (floor_mod(chunk_pos.x, self.view_range) * self.view_range
- + floor_mod(chunk_pos.z, self.view_range)) as usize
+ (i32::rem_euclid(chunk_pos.x, self.view_range as i32) * (self.view_range as i32)
+ + i32::rem_euclid(chunk_pos.z, self.view_range as i32)) as usize
}
pub fn in_range(&self, chunk_pos: &ChunkPos) -> bool {
@@ -84,19 +115,19 @@ impl ChunkStorage {
pub fn get_block_state(&self, pos: &BlockPos) -> Option<BlockState> {
let chunk_pos = ChunkPos::from(pos);
- let chunk = self[&chunk_pos].as_ref()?;
+ let chunk = self.get(&chunk_pos)?;
let chunk = chunk.lock();
- chunk.get(&ChunkBlockPos::from(pos), self.min_y)
+ chunk.get(&ChunkBlockPos::from(pos), self.min_y())
}
pub fn set_block_state(&self, pos: &BlockPos, state: BlockState) -> Option<BlockState> {
- if pos.y < self.min_y || pos.y >= (self.min_y + self.height as i32) {
+ if pos.y < self.min_y() || pos.y >= (self.min_y() + self.height() as i32) {
return None;
}
let chunk_pos = ChunkPos::from(pos);
- let chunk = self[&chunk_pos].as_ref()?;
+ let chunk = self.get(&chunk_pos)?;
let mut chunk = chunk.lock();
- Some(chunk.get_and_set(&ChunkBlockPos::from(pos), state, self.min_y))
+ Some(chunk.get_and_set(&ChunkBlockPos::from(pos), state, self.min_y()))
}
pub fn replace_with_packet_data(
@@ -116,27 +147,77 @@ impl ChunkStorage {
let chunk = Arc::new(Mutex::new(Chunk::read_with_dimension_height(
data,
- self.height,
+ self.height(),
)?));
trace!("Loaded chunk {:?}", pos);
- self[pos] = Some(chunk);
+ self.set(pos, Some(chunk));
Ok(())
}
-}
-impl Index<&ChunkPos> for ChunkStorage {
- type Output = Option<Arc<Mutex<Chunk>>>;
+ /// Get a [`Chunk`] within render distance, or `None` if it's not loaded.
+ /// Use [`PartialChunkStorage::get`] to get a chunk from the shared storage.
+ pub fn limited_get(&self, pos: &ChunkPos) -> Option<&Arc<Mutex<Chunk>>> {
+ if !self.in_range(pos) {
+ warn!(
+ "Chunk at {:?} is not in the render distance (center: {:?}, {} chunks)",
+ pos, self.view_center, self.chunk_radius,
+ );
+ return None;
+ }
- fn index(&self, pos: &ChunkPos) -> &Self::Output {
- &self.chunks[self.get_index(pos)]
+ let index = self.get_index(pos);
+ self.chunks[index].as_ref()
}
-}
-impl IndexMut<&ChunkPos> for ChunkStorage {
- fn index_mut<'a>(&'a mut self, pos: &ChunkPos) -> &'a mut Self::Output {
+ /// Get a mutable reference to a [`Chunk`] within render distance, or
+ /// `None` if it's not loaded. Use [`PartialChunkStorage::get`] to get
+ /// a chunk from the shared storage.
+ pub fn limited_get_mut(&mut self, pos: &ChunkPos) -> Option<&mut Option<Arc<Mutex<Chunk>>>> {
+ if !self.in_range(pos) {
+ return None;
+ }
+
let index = self.get_index(pos);
- &mut self.chunks[index]
+ Some(&mut self.chunks[index])
+ }
+
+ /// Get a chunk,
+ pub fn get(&self, pos: &ChunkPos) -> Option<Arc<Mutex<Chunk>>> {
+ self.shared
+ .read()
+ .chunks
+ .get(pos)
+ .and_then(|chunk| chunk.upgrade())
+ }
+
+ /// Set a chunk in the shared storage and reference it from the limited
+ /// storage.
+ ///
+ /// # Panics
+ /// If the chunk is not in the render distance.
+ pub fn set(&mut self, pos: &ChunkPos, chunk: Option<Arc<Mutex<Chunk>>>) {
+ if let Some(chunk) = &chunk {
+ self.shared
+ .write()
+ .chunks
+ .insert(*pos, Arc::downgrade(chunk));
+ } else {
+ // don't remove it from the shared storage, since it'll be removed
+ // automatically if this was the last reference
+ }
+ if let Some(chunk_mut) = self.limited_get_mut(pos) {
+ *chunk_mut = chunk;
+ }
+ }
+}
+impl WeakChunkStorage {
+ pub fn new(height: u32, min_y: i32) -> Self {
+ WeakChunkStorage {
+ height,
+ min_y,
+ chunks: HashMap::new(),
+ }
}
}
@@ -214,14 +295,14 @@ impl McBufWritable for Chunk {
}
}
-impl Debug for ChunkStorage {
+impl Debug for PartialChunkStorage {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ChunkStorage")
.field("view_center", &self.view_center)
.field("chunk_radius", &self.chunk_radius)
.field("view_range", &self.view_range)
- .field("height", &self.height)
- .field("min_y", &self.min_y)
+ .field("height", &self.height())
+ .field("min_y", &self.min_y())
// .field("chunks", &self.chunks)
.field("chunks", &format_args!("{} items", self.chunks.len()))
.finish()
@@ -292,9 +373,14 @@ impl Section {
}
}
-impl Default for ChunkStorage {
+impl Default for PartialChunkStorage {
+ fn default() -> Self {
+ Self::new(8, Arc::new(RwLock::new(WeakChunkStorage::default())))
+ }
+}
+impl Default for WeakChunkStorage {
fn default() -> Self {
- Self::new(8, 384, -64)
+ Self::new(384, -64)
}
}
@@ -317,8 +403,11 @@ mod tests {
#[test]
fn test_out_of_bounds_y() {
- let mut chunk_storage = ChunkStorage::default();
- chunk_storage[&ChunkPos { x: 0, z: 0 }] = Some(Arc::new(Mutex::new(Chunk::default())));
+ let mut chunk_storage = PartialChunkStorage::default();
+ chunk_storage.set(
+ &ChunkPos { x: 0, z: 0 },
+ Some(Arc::new(Mutex::new(Chunk::default()))),
+ );
assert!(chunk_storage
.get_block_state(&BlockPos { x: 0, y: 319, z: 0 })
.is_some());
diff --git a/azalea-world/src/container.rs b/azalea-world/src/container.rs
new file mode 100644
index 00000000..acdc9b05
--- /dev/null
+++ b/azalea-world/src/container.rs
@@ -0,0 +1,54 @@
+use crate::WeakWorld;
+use azalea_core::ResourceLocation;
+use log::error;
+use std::{
+ collections::HashMap,
+ sync::{Arc, Weak},
+};
+
+/// A container of [`WeakWorld`]s. Worlds are stored as a Weak pointer here, so
+/// if no clients are using a world it will be forgotten.
+#[derive(Default)]
+pub struct WeakWorldContainer {
+ pub worlds: HashMap<ResourceLocation, Weak<WeakWorld>>,
+}
+
+impl WeakWorldContainer {
+ pub fn new() -> Self {
+ WeakWorldContainer {
+ worlds: HashMap::new(),
+ }
+ }
+
+ /// Get a world from the container.
+ pub fn get(&self, name: &ResourceLocation) -> Option<Arc<WeakWorld>> {
+ self.worlds.get(name).and_then(|world| world.upgrade())
+ }
+
+ /// Add an empty world to the container (or not if it already exists) and
+ /// returns a strong reference to the world.
+ #[must_use = "the world will be immediately forgotten if unused"]
+ pub fn insert(&mut self, name: ResourceLocation, height: u32, min_y: i32) -> Arc<WeakWorld> {
+ if let Some(existing) = self.worlds.get(&name).and_then(|world| world.upgrade()) {
+ if existing.height() != height {
+ error!(
+ "Shared dimension height mismatch: {} != {}",
+ existing.height(),
+ height,
+ );
+ }
+ if existing.min_y() != min_y {
+ error!(
+ "Shared world min_y mismatch: {} != {}",
+ existing.min_y(),
+ min_y,
+ );
+ }
+ existing
+ } else {
+ let world = Arc::new(WeakWorld::new(height, min_y));
+ self.worlds.insert(name, Arc::downgrade(&world));
+ world
+ }
+ }
+}
diff --git a/azalea-world/src/entity/attributes.rs b/azalea-world/src/entity/attributes.rs
index f7e9682e..fca6b88f 100644
--- a/azalea-world/src/entity/attributes.rs
+++ b/azalea-world/src/entity/attributes.rs
@@ -1,4 +1,4 @@
-//! https://minecraft.fandom.com/wiki/Attribute
+//! <https://minecraft.fandom.com/wiki/Attribute>
use std::{
collections::HashMap,
diff --git a/azalea-world/src/entity/mod.rs b/azalea-world/src/entity/mod.rs
index 4611f215..dbf7e665 100644
--- a/azalea-world/src/entity/mod.rs
+++ b/azalea-world/src/entity/mod.rs
@@ -270,20 +270,11 @@ impl EntityData {
&self.pos
}
- /// Convert this &mut self into a (mutable) pointer.
- ///
- /// # Safety
- /// The entity MUST exist while this pointer exists.
- pub unsafe fn as_ptr(&mut self) -> NonNull<EntityData> {
- NonNull::new_unchecked(self as *mut EntityData)
- }
-
/// Convert this &self into a (mutable) pointer.
///
/// # Safety
- /// The entity MUST exist while this pointer exists. You also must not
- /// modify the data inside the pointer.
- pub unsafe fn as_const_ptr(&self) -> NonNull<EntityData> {
+ /// The entity MUST exist for at least as long as this pointer exists.
+ pub unsafe fn as_ptr(&self) -> NonNull<EntityData> {
// this is cursed
NonNull::new_unchecked(self as *const EntityData as *mut EntityData)
}
diff --git a/azalea-world/src/entity_storage.rs b/azalea-world/src/entity_storage.rs
index 02d7d55a..c8c58a75 100755
--- a/azalea-world/src/entity_storage.rs
+++ b/azalea-world/src/entity_storage.rs
@@ -2,101 +2,229 @@ use crate::entity::EntityData;
use azalea_core::ChunkPos;
use log::warn;
use nohash_hasher::{IntMap, IntSet};
-use std::collections::HashMap;
+use parking_lot::RwLock;
+use std::{
+ collections::HashMap,
+ sync::{Arc, Weak},
+};
use uuid::Uuid;
-#[derive(Debug)]
-pub struct EntityStorage {
- data_by_id: IntMap<u32, EntityData>,
- id_by_chunk: HashMap<ChunkPos, IntSet<u32>>,
+// How entity updates are processed (to avoid issues with shared worlds)
+// - each bot contains a map of { entity id: updates received }
+// - the shared world also contains a canonical "true" updates received for each entity
+// - when a client loads an entity, its "updates received" is set to the same as the global "updates received"
+// - when the shared world sees an entity for the first time, the "updates received" is set to 1.
+// - clients can force the shared "updates received" to 0 to make it so certain entities (i.e. other bots in our swarm) don't get confused and updated by other bots
+// - when a client gets an update to an entity, we check if our "updates received" is the same as the shared world's "updates received":
+// if it is, then process the update and increment the client's and shared world's "updates received"
+// if not, then we simply increment our local "updates received" and do nothing else
+
+/// Store a map of entities by ID. To get an iterator over all entities, use
+/// `storage.shared.read().entities` [`WeakEntityStorage::entities`].
+///
+/// This is meant to be used with shared worlds.
+#[derive(Debug, Default)]
+pub struct PartialEntityStorage {
+ pub shared: Arc<RwLock<WeakEntityStorage>>,
+
+ /// The entity id of the player that owns this struct.
+ pub owner_entity_id: u32,
+ pub updates_received: IntMap<u32, u32>,
+ /// Strong references to the entities we have loaded.
+ data_by_id: IntMap<u32, Arc<EntityData>>,
+}
+
+/// Weakly store entities in a world. If the entities aren't being referenced
+/// by anything else (like an [`PartialEntityStorage`]), they'll be forgotten.
+#[derive(Debug, Default)]
+pub struct WeakEntityStorage {
+ data_by_id: IntMap<u32, Weak<EntityData>>,
+ /// An index of all the entity ids we know are in a chunk
+ ids_by_chunk: HashMap<ChunkPos, IntSet<u32>>,
+ /// An index of entity ids by their UUIDs
id_by_uuid: HashMap<Uuid, u32>,
+
+ pub updates_received: IntMap<u32, u32>,
}
-impl EntityStorage {
- pub fn new() -> Self {
+impl PartialEntityStorage {
+ pub fn new(shared: Arc<RwLock<WeakEntityStorage>>, owner_entity_id: u32) -> Self {
+ shared.write().updates_received.insert(owner_entity_id, 0);
Self {
+ shared,
+ owner_entity_id,
+ updates_received: IntMap::default(),
data_by_id: IntMap::default(),
- id_by_chunk: HashMap::default(),
- id_by_uuid: HashMap::default(),
}
}
/// Add an entity to the storage.
#[inline]
pub fn insert(&mut self, id: u32, entity: EntityData) {
- self.id_by_chunk
+ // if the entity is already in the shared world, we don't need to do anything
+ if self.shared.read().data_by_id.contains_key(&id) {
+ return;
+ }
+
+ // add the entity to the "indexes"
+ let mut shared = self.shared.write();
+ shared
+ .ids_by_chunk
.entry(ChunkPos::from(entity.pos()))
.or_default()
.insert(id);
- self.id_by_uuid.insert(entity.uuid, id);
+ shared.id_by_uuid.insert(entity.uuid, id);
+
+ // now store the actual entity data
+ let entity = Arc::new(entity);
+ shared.data_by_id.insert(id, Arc::downgrade(&entity));
self.data_by_id.insert(id, entity);
+ // set our updates_received to the shared updates_received, unless it's
+ // not there in which case set both to 1
+ if let Some(&shared_updates_received) = shared.updates_received.get(&id) {
+ // 0 means we're never tracking updates for this entity
+ if shared_updates_received != 0 || id == self.owner_entity_id {
+ self.updates_received.insert(id, 1);
+ }
+ } else {
+ shared.updates_received.insert(id, 1);
+ self.updates_received.insert(id, 1);
+ }
}
- /// Remove an entity from the storage by its id.
+ /// Remove an entity from this storage by its id. It will only be removed
+ /// from the shared storage if there are no other references to it.
#[inline]
pub fn remove_by_id(&mut self, id: u32) {
if let Some(entity) = self.data_by_id.remove(&id) {
- let entity_chunk = ChunkPos::from(entity.pos());
- let entity_uuid = entity.uuid;
- if self.id_by_chunk.remove(&entity_chunk).is_none() {
- warn!("Tried to remove entity with id {id} from chunk {entity_chunk:?} but it was not found.");
- }
- if self.id_by_uuid.remove(&entity_uuid).is_none() {
- warn!("Tried to remove entity with id {id} from uuid {entity_uuid:?} but it was not found.");
- }
+ let chunk = ChunkPos::from(entity.pos());
+ let uuid = entity.uuid;
+ self.updates_received.remove(&id);
+ drop(entity);
+ // maybe remove it from the storage
+ self.shared.write().remove_entity_if_unused(id, uuid, chunk);
} else {
warn!("Tried to remove entity with id {id} but it was not found.")
}
}
- /// Check if there is an entity that exists with the given id.
+ /// Whether the entity with the given id is being loaded by this storage.
+ /// If you want to check whether the entity is in the shared storage, use
+ /// [`WeakEntityStorage::contains_id`].
#[inline]
- pub fn contains_id(&self, id: &u32) -> bool {
+ pub fn limited_contains_id(&self, id: &u32) -> bool {
self.data_by_id.contains_key(id)
}
- /// Get a reference to an entity by its id.
+ /// Whether the entity with the given id is in the shared storage (i.e.
+ /// it's possible we don't see the entity but something else in the shared
+ /// storage does). To check whether the entity is being loaded by this
+ /// storage, use [`PartialEntityStorage::limited_contains_id`].
#[inline]
- pub fn get_by_id(&self, id: u32) -> Option<&EntityData> {
+ pub fn contains_id(&self, id: &u32) -> bool {
+ self.shared.read().data_by_id.contains_key(id)
+ }
+
+ /// Get a reference to an entity by its id, if it's being loaded by this storage.
+ #[inline]
+ pub fn limited_get_by_id(&self, id: u32) -> Option<&Arc<EntityData>> {
self.data_by_id.get(&id)
}
- /// Get a mutable reference to an entity by its id.
+ /// Get a mutable reference to an entity by its id, if it's being loaded by
+ /// this storage.
#[inline]
- pub fn get_mut_by_id(&mut self, id: u32) -> Option<&mut EntityData> {
+ pub fn limited_get_mut_by_id(&mut self, id: u32) -> Option<&mut Arc<EntityData>> {
self.data_by_id.get_mut(&id)
}
- /// Get a reference to an entity by its uuid.
+ /// Returns whether we're allowed to update this entity (to prevent two clients in
+ /// a shared world updating it twice), and acknowleges that we WILL update
+ /// it if it's true. Don't call this unless you actually got an entity
+ /// update that all other clients within render distance will get too.
+ pub fn maybe_update(&mut self, id: u32) -> bool {
+ let this_client_updates_received = self.updates_received.get(&id).copied();
+ let shared_updates_received = self.shared.read().updates_received.get(&id).copied();
+
+ let can_update = this_client_updates_received == shared_updates_received;
+ if can_update {
+ let new_updates_received = this_client_updates_received.unwrap_or(0) + 1;
+ self.updates_received.insert(id, new_updates_received);
+ self.shared
+ .write()
+ .updates_received
+ .insert(id, new_updates_received);
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Get an entity in the shared storage by its id, if it exists.
#[inline]
- pub fn get_by_uuid(&self, uuid: &Uuid) -> Option<&EntityData> {
- self.id_by_uuid
+ pub fn get_by_id(&self, id: u32) -> Option<Arc<EntityData>> {
+ self.shared
+ .read()
+ .data_by_id
+ .get(&id)
+ .and_then(|e| e.upgrade())
+ }
+
+ /// Get a reference to an entity by its UUID, if it's being loaded by this
+ /// storage.
+ #[inline]
+ pub fn limited_get_by_uuid(&self, uuid: &Uuid) -> Option<&Arc<EntityData>> {
+ self.shared
+ .read()
+ .id_by_uuid
.get(uuid)
.and_then(|id| self.data_by_id.get(id))
}
- /// Get a mutable reference to an entity by its uuid.
+ /// Get a mutable reference to an entity by its UUID, if it's being loaded
+ /// by this storage.
#[inline]
- pub fn get_mut_by_uuid(&mut self, uuid: &Uuid) -> Option<&mut EntityData> {
- self.id_by_uuid
+ pub fn limited_get_mut_by_uuid(&mut self, uuid: &Uuid) -> Option<&mut Arc<EntityData>> {
+ self.shared
+ .read()
+ .id_by_uuid
.get(uuid)
.and_then(|id| self.data_by_id.get_mut(id))
}
- /// Clear all entities in a chunk.
+ /// Get an entity in the shared storage by its UUID, if it exists.
+ #[inline]
+ pub fn get_by_uuid(&self, uuid: &Uuid) -> Option<Arc<EntityData>> {
+ self.shared.read().id_by_uuid.get(uuid).and_then(|id| {
+ self.shared
+ .read()
+ .data_by_id
+ .get(id)
+ .and_then(|e| e.upgrade())
+ })
+ }
+
+ /// Clear all entities in a chunk. This will not clear them from the
+ /// shared storage, unless there are no other references to them.
pub fn clear_chunk(&mut self, chunk: &ChunkPos) {
- if let Some(entities) = self.id_by_chunk.remove(chunk) {
- for entity_id in entities {
- if let Some(entity) = self.data_by_id.remove(&entity_id) {
- self.id_by_uuid.remove(&entity.uuid);
- } else {
- warn!("While clearing chunk {chunk:?}, found an entity that isn't in by_id {entity_id}.");
+ if let Some(entities) = self.shared.read().ids_by_chunk.get(chunk) {
+ for id in entities.iter() {
+ if let Some(entity) = self.data_by_id.remove(id) {
+ let uuid = entity.uuid;
+ drop(entity);
+ // maybe remove it from the storage
+ self.shared
+ .write()
+ .remove_entity_if_unused(*id, uuid, *chunk);
}
}
+ // for entity_id in entities {
+ // self.remove_by_id(entity_id);
+ // }
}
}
- /// Updates an entity from its old chunk.
+ /// Move an entity from its old chunk to a new chunk.
#[inline]
pub fn update_entity_chunk(
&mut self,
@@ -104,36 +232,40 @@ impl EntityStorage {
old_chunk: &ChunkPos,
new_chunk: &ChunkPos,
) {
- if let Some(entities) = self.id_by_chunk.get_mut(old_chunk) {
+ if let Some(entities) = self.shared.write().ids_by_chunk.get_mut(old_chunk) {
entities.remove(&entity_id);
}
- self.id_by_chunk
+ self.shared
+ .write()
+ .ids_by_chunk
.entry(*new_chunk)
.or_default()
.insert(entity_id);
}
- /// Get an iterator over all entities.
- #[inline]
- pub fn entities(&self) -> std::collections::hash_map::Values<'_, u32, EntityData> {
- self.data_by_id.values()
- }
-
- pub fn find_one_entity<F>(&self, mut f: F) -> Option<&EntityData>
+ pub fn find_one_entity<F>(&self, mut f: F) -> Option<Arc<EntityData>>
where
- F: FnMut(&EntityData) -> bool,
+ F: FnMut(&Arc<EntityData>) -> bool,
{
- self.entities().find(|&entity| f(entity))
+ for entity in self.shared.read().entities() {
+ if let Some(entity) = entity.upgrade() {
+ if f(&entity) {
+ return Some(entity);
+ }
+ }
+ }
+ None
}
- pub fn find_one_entity_in_chunk<F>(&self, chunk: &ChunkPos, mut f: F) -> Option<&EntityData>
+ pub fn find_one_entity_in_chunk<F>(&self, chunk: &ChunkPos, mut f: F) -> Option<Arc<EntityData>>
where
F: FnMut(&EntityData) -> bool,
{
- if let Some(entities) = self.id_by_chunk.get(chunk) {
+ let shared = self.shared.read();
+ if let Some(entities) = shared.ids_by_chunk.get(chunk) {
for entity_id in entities {
- if let Some(entity) = self.data_by_id.get(entity_id) {
- if f(entity) {
+ if let Some(entity) = shared.data_by_id.get(entity_id).and_then(|e| e.upgrade()) {
+ if f(&entity) {
return Some(entity);
}
}
@@ -143,9 +275,81 @@ impl EntityStorage {
}
}
-impl Default for EntityStorage {
- fn default() -> Self {
- Self::new()
+impl WeakEntityStorage {
+ pub fn new() -> Self {
+ Self {
+ data_by_id: IntMap::default(),
+ ids_by_chunk: HashMap::default(),
+ id_by_uuid: HashMap::default(),
+ updates_received: IntMap::default(),
+ }
+ }
+
+ /// Remove an entity from the storage if it has no strong references left.
+ /// Returns whether the entity was removed.
+ pub fn remove_entity_if_unused(&mut self, id: u32, uuid: Uuid, chunk: ChunkPos) -> bool {
+ if self.data_by_id.get(&id).and_then(|e| e.upgrade()).is_some() {
+ // if we could get the entity, that means there are still strong
+ // references to it
+ false
+ } else {
+ if self.ids_by_chunk.remove(&chunk).is_none() {
+ warn!("Tried to remove entity with id {id} from chunk {chunk:?} but it was not found.");
+ }
+ if self.id_by_uuid.remove(&uuid).is_none() {
+ warn!(
+ "Tried to remove entity with id {id} from uuid {uuid:?} but it was not found."
+ );
+ }
+ if self.updates_received.remove(&id).is_none() {
+ // if this happens it means we weren't tracking the updates_received for the client (bad)
+ warn!(
+ "Tried to remove entity with id {id} from updates_received but it was not found."
+ );
+ }
+ true
+ }
+ }
+
+ /// Remove a chunk from the storage if the entities in it have no strong
+ /// references left.
+ pub fn remove_chunk_if_unused(&mut self, chunk: &ChunkPos) {
+ if let Some(entities) = self.ids_by_chunk.get(chunk) {
+ if entities.is_empty() {
+ self.ids_by_chunk.remove(chunk);
+ }
+ }
+ }
+
+ /// Get an iterator over all entities in the shared storage. The iterator
+ /// is over `Weak<EntityData>`s, so you'll have to manually try to upgrade.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let mut storage = EntityStorage::new();
+ /// storage.insert(
+ /// 0,
+ /// Arc::new(EntityData::new(
+ /// uuid,
+ /// Vec3::default(),
+ /// EntityMetadata::Player(metadata::Player::default()),
+ /// )),
+ /// );
+ /// for entity in storage.shared.read().entities() {
+ /// if let Some(entity) = entity.upgrade() {
+ /// println!("Entity: {:?}", entity);
+ /// }
+ /// }
+ /// ```
+ pub fn entities(&self) -> std::collections::hash_map::Values<'_, u32, Weak<EntityData>> {
+ self.data_by_id.values()
+ }
+
+ /// Whether the entity with the given id is in the shared storage.
+ #[inline]
+ pub fn contains_id(&self, id: &u32) -> bool {
+ self.data_by_id.contains_key(id)
}
}
@@ -158,7 +362,7 @@ mod tests {
#[test]
fn test_store_entity() {
- let mut storage = EntityStorage::new();
+ let mut storage = PartialEntityStorage::default();
assert!(storage.get_by_id(0).is_none());
let uuid = Uuid::from_u128(100);
diff --git a/azalea-world/src/lib.rs b/azalea-world/src/lib.rs
index 26cae205..05cc7d85 100755..100644
--- a/azalea-world/src/lib.rs
+++ b/azalea-world/src/lib.rs
@@ -1,174 +1,26 @@
#![feature(int_roundings)]
+#![feature(error_generic_member_access)]
+#![feature(provide_any)]
mod bit_storage;
mod chunk_storage;
+mod container;
pub mod entity;
mod entity_storage;
mod palette;
+mod world;
+
+use std::backtrace::Backtrace;
-use azalea_block::BlockState;
-use azalea_buf::BufReadError;
-use azalea_core::{BlockPos, ChunkPos, PositionDelta8, Vec3};
pub use bit_storage::BitStorage;
-pub use chunk_storage::{Chunk, ChunkStorage};
-use entity::{Entity, EntityData};
-pub use entity_storage::EntityStorage;
-use parking_lot::Mutex;
-use std::{
- io::Cursor,
- ops::{Index, IndexMut},
- sync::Arc,
-};
+pub use chunk_storage::{Chunk, ChunkStorage, PartialChunkStorage, WeakChunkStorage};
+pub use container::*;
+pub use entity_storage::{PartialEntityStorage, WeakEntityStorage};
use thiserror::Error;
-use uuid::Uuid;
-
-/// A world is a collection of chunks and entities. They're called "levels" in Minecraft's source code.
-#[derive(Debug, Default)]
-pub struct World {
- pub chunk_storage: ChunkStorage,
- pub entity_storage: EntityStorage,
-}
+pub use world::*;
#[derive(Error, Debug)]
pub enum MoveEntityError {
#[error("Entity doesn't exist")]
- EntityDoesNotExist,
-}
-
-impl World {
- pub fn new(chunk_radius: u32, height: u32, min_y: i32) -> Self {
- World {
- chunk_storage: ChunkStorage::new(chunk_radius, height, min_y),
- entity_storage: EntityStorage::new(),
- }
- }
-
- pub fn replace_with_packet_data(
- &mut self,
- pos: &ChunkPos,
- data: &mut Cursor<&[u8]>,
- ) -> Result<(), BufReadError> {
- self.chunk_storage.replace_with_packet_data(pos, data)
- }
-
- pub fn set_chunk(&mut self, pos: &ChunkPos, chunk: Option<Chunk>) -> Result<(), BufReadError> {
- self[pos] = chunk.map(|c| Arc::new(Mutex::new(c)));
- Ok(())
- }
-
- pub fn update_view_center(&mut self, pos: &ChunkPos) {
- self.chunk_storage.view_center = *pos;
- }
-
- pub fn get_block_state(&self, pos: &BlockPos) -> Option<BlockState> {
- self.chunk_storage.get_block_state(pos)
- }
-
- pub fn set_block_state(&mut self, pos: &BlockPos, state: BlockState) -> Option<BlockState> {
- self.chunk_storage.set_block_state(pos, state)
- }
-
- pub fn set_entity_pos(&mut self, entity_id: u32, new_pos: Vec3) -> Result<(), MoveEntityError> {
- let mut entity = self
- .entity_mut(entity_id)
- .ok_or(MoveEntityError::EntityDoesNotExist)?;
-
- let old_chunk = ChunkPos::from(entity.pos());
- let new_chunk = ChunkPos::from(&new_pos);
- // this is fine because we update the chunk below
- unsafe { entity.move_unchecked(new_pos) };
- if old_chunk != new_chunk {
- self.entity_storage
- .update_entity_chunk(entity_id, &old_chunk, &new_chunk);
- }
- Ok(())
- }
-
- pub fn move_entity_with_delta(
- &mut self,
- entity_id: u32,
- delta: &PositionDelta8,
- ) -> Result<(), MoveEntityError> {
- let mut entity = self
- .entity_mut(entity_id)
- .ok_or(MoveEntityError::EntityDoesNotExist)?;
- let new_pos = entity.pos().with_delta(delta);
-
- let old_chunk = ChunkPos::from(entity.pos());
- let new_chunk = ChunkPos::from(&new_pos);
- // this is fine because we update the chunk below
-
- unsafe { entity.move_unchecked(new_pos) };
- if old_chunk != new_chunk {
- self.entity_storage
- .update_entity_chunk(entity_id, &old_chunk, &new_chunk);
- }
- Ok(())
- }
-
- pub fn add_entity(&mut self, id: u32, entity: EntityData) {
- self.entity_storage.insert(id, entity);
- }
-
- pub fn height(&self) -> u32 {
- self.chunk_storage.height
- }
-
- pub fn min_y(&self) -> i32 {
- self.chunk_storage.min_y
- }
-
- pub fn entity_data_by_id(&self, id: u32) -> Option<&EntityData> {
- self.entity_storage.get_by_id(id)
- }
-
- pub fn entity_data_mut_by_id(&mut self, id: u32) -> Option<&mut EntityData> {
- self.entity_storage.get_mut_by_id(id)
- }
-
- pub fn entity(&self, id: u32) -> Option<Entity<&World>> {
- let entity_data = self.entity_storage.get_by_id(id)?;
- let entity_ptr = unsafe { entity_data.as_const_ptr() };
- Some(Entity::new(self, id, entity_ptr))
- }
-
- pub fn entity_mut(&mut self, id: u32) -> Option<Entity<'_, &mut World>> {
- let entity_data = self.entity_storage.get_mut_by_id(id)?;
- let entity_ptr = unsafe { entity_data.as_ptr() };
- Some(Entity::new(self, id, entity_ptr))
- }
-
- pub fn entity_by_uuid(&self, uuid: &Uuid) -> Option<&EntityData> {
- self.entity_storage.get_by_uuid(uuid)
- }
-
- pub fn entity_mut_by_uuid(&mut self, uuid: &Uuid) -> Option<&mut EntityData> {
- self.entity_storage.get_mut_by_uuid(uuid)
- }
-
- /// Get an iterator over all entities.
- #[inline]
- pub fn entities(&self) -> std::collections::hash_map::Values<'_, u32, EntityData> {
- self.entity_storage.entities()
- }
-
- pub fn find_one_entity<F>(&self, mut f: F) -> Option<&EntityData>
- where
- F: FnMut(&EntityData) -> bool,
- {
- self.entity_storage.find_one_entity(|entity| f(entity))
- }
-}
-
-impl Index<&ChunkPos> for World {
- type Output = Option<Arc<Mutex<Chunk>>>;
-
- fn index(&self, pos: &ChunkPos) -> &Self::Output {
- &self.chunk_storage[pos]
- }
-}
-impl IndexMut<&ChunkPos> for World {
- fn index_mut<'a>(&'a mut self, pos: &ChunkPos) -> &'a mut Self::Output {
- &mut self.chunk_storage[pos]
- }
+ EntityDoesNotExist(Backtrace),
}
diff --git a/azalea-world/src/world.rs b/azalea-world/src/world.rs
new file mode 100644
index 00000000..257d9eb6
--- /dev/null
+++ b/azalea-world/src/world.rs
@@ -0,0 +1,181 @@
+use crate::{
+ entity::{Entity, EntityData},
+ Chunk, MoveEntityError, PartialChunkStorage, PartialEntityStorage, WeakChunkStorage,
+ WeakEntityStorage,
+};
+use azalea_block::BlockState;
+use azalea_buf::BufReadError;
+use azalea_core::{BlockPos, ChunkPos, PositionDelta8, Vec3};
+use parking_lot::{Mutex, RwLock};
+use std::{backtrace::Backtrace, fmt::Debug};
+use std::{fmt::Formatter, io::Cursor, sync::Arc};
+use uuid::Uuid;
+
+/// A world is a collection of chunks and entities. They're called "levels" in Minecraft's source code.
+#[derive(Default)]
+pub struct World {
+ // we just need to keep a strong reference to `shared` so it doesn't get
+ // dropped, we don't need to do anything with it
+ _shared: Arc<WeakWorld>,
+
+ pub chunk_storage: PartialChunkStorage,
+ pub entity_storage: PartialEntityStorage,
+}
+
+/// A world where the chunks are stored as weak pointers. This is used for shared worlds.
+#[derive(Default)]
+pub struct WeakWorld {
+ pub chunk_storage: Arc<RwLock<WeakChunkStorage>>,
+ pub entity_storage: Arc<RwLock<WeakEntityStorage>>,
+}
+
+impl World {
+ pub fn new(chunk_radius: u32, shared: Arc<WeakWorld>, owner_entity_id: u32) -> Self {
+ World {
+ _shared: shared.clone(),
+ chunk_storage: PartialChunkStorage::new(chunk_radius, shared.chunk_storage.clone()),
+ entity_storage: PartialEntityStorage::new(
+ shared.entity_storage.clone(),
+ owner_entity_id,
+ ),
+ }
+ }
+
+ pub fn replace_with_packet_data(
+ &mut self,
+ pos: &ChunkPos,
+ data: &mut Cursor<&[u8]>,
+ ) -> Result<(), BufReadError> {
+ self.chunk_storage.replace_with_packet_data(pos, data)
+ }
+
+ pub fn get_chunk(&self, pos: &ChunkPos) -> Option<Arc<Mutex<Chunk>>> {
+ self.chunk_storage.get(pos)
+ }
+
+ pub fn set_chunk(&mut self, pos: &ChunkPos, chunk: Option<Chunk>) -> Result<(), BufReadError> {
+ self.chunk_storage
+ .set(pos, chunk.map(|c| Arc::new(Mutex::new(c))));
+ Ok(())
+ }
+
+ pub fn update_view_center(&mut self, pos: &ChunkPos) {
+ self.chunk_storage.view_center = *pos;
+ }
+
+ pub fn get_block_state(&self, pos: &BlockPos) -> Option<BlockState> {
+ self.chunk_storage.get_block_state(pos)
+ }
+
+ pub fn set_block_state(&mut self, pos: &BlockPos, state: BlockState) -> Option<BlockState> {
+ self.chunk_storage.set_block_state(pos, state)
+ }
+
+ pub fn set_entity_pos(&mut self, entity_id: u32, new_pos: Vec3) -> Result<(), MoveEntityError> {
+ let mut entity = self
+ .entity_mut(entity_id)
+ .ok_or_else(|| MoveEntityError::EntityDoesNotExist(Backtrace::capture()))?;
+ let old_chunk = ChunkPos::from(entity.pos());
+ let new_chunk = ChunkPos::from(&new_pos);
+ // this is fine because we update the chunk below
+ unsafe { entity.move_unchecked(new_pos) };
+ if old_chunk != new_chunk {
+ self.entity_storage
+ .update_entity_chunk(entity_id, &old_chunk, &new_chunk);
+ }
+ Ok(())
+ }
+
+ pub fn move_entity_with_delta(
+ &mut self,
+ entity_id: u32,
+ delta: &PositionDelta8,
+ ) -> Result<(), MoveEntityError> {
+ let mut entity = self
+ .entity_mut(entity_id)
+ .ok_or_else(|| MoveEntityError::EntityDoesNotExist(Backtrace::capture()))?;
+ let new_pos = entity.pos().with_delta(delta);
+
+ let old_chunk = ChunkPos::from(entity.pos());
+ let new_chunk = ChunkPos::from(&new_pos);
+ // this is fine because we update the chunk below
+
+ unsafe { entity.move_unchecked(new_pos) };
+ if old_chunk != new_chunk {
+ self.entity_storage
+ .update_entity_chunk(entity_id, &old_chunk, &new_chunk);
+ }
+ Ok(())
+ }
+
+ pub fn add_entity(&mut self, id: u32, entity: EntityData) {
+ self.entity_storage.insert(id, entity);
+ }
+
+ pub fn height(&self) -> u32 {
+ self.chunk_storage.height()
+ }
+
+ pub fn min_y(&self) -> i32 {
+ self.chunk_storage.min_y()
+ }
+
+ pub fn entity_data_by_id(&self, id: u32) -> Option<Arc<EntityData>> {
+ self.entity_storage.get_by_id(id)
+ }
+
+ pub fn entity(&self, id: u32) -> Option<Entity<&World>> {
+ let entity_data = self.entity_storage.get_by_id(id)?;
+ let entity_ptr = unsafe { entity_data.as_ptr() };
+ Some(Entity::new(self, id, entity_ptr))
+ }
+
+ /// Returns a mutable reference to the entity with the given ID.
+ pub fn entity_mut(&mut self, id: u32) -> Option<Entity<'_, &mut World>> {
+ // no entity for you (we're processing this entity somewhere else)
+ if id != self.entity_storage.owner_entity_id && !self.entity_storage.maybe_update(id) {
+ return None;
+ }
+
+ let entity_data = self.entity_storage.get_by_id(id)?;
+ let entity_ptr = unsafe { entity_data.as_ptr() };
+ Some(Entity::new(self, id, entity_ptr))
+ }
+
+ pub fn entity_by_uuid(&self, uuid: &Uuid) -> Option<Arc<EntityData>> {
+ self.entity_storage.get_by_uuid(uuid)
+ }
+
+ pub fn find_one_entity<F>(&self, mut f: F) -> Option<Arc<EntityData>>
+ where
+ F: FnMut(&EntityData) -> bool,
+ {
+ self.entity_storage.find_one_entity(|entity| f(entity))
+ }
+}
+
+impl WeakWorld {
+ pub fn new(height: u32, min_y: i32) -> Self {
+ WeakWorld {
+ chunk_storage: Arc::new(RwLock::new(WeakChunkStorage::new(height, min_y))),
+ entity_storage: Arc::new(RwLock::new(WeakEntityStorage::new())),
+ }
+ }
+
+ pub fn height(&self) -> u32 {
+ self.chunk_storage.read().height
+ }
+
+ pub fn min_y(&self) -> i32 {
+ self.chunk_storage.read().min_y
+ }
+}
+
+impl Debug for World {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("World")
+ .field("chunk_storage", &self.chunk_storage)
+ .field("entity_storage", &self.entity_storage)
+ .finish()
+ }
+}