aboutsummaryrefslogtreecommitdiff
path: root/azalea-client/src/task_pool.rs
diff options
context:
space:
mode:
authormat <27899617+mat-1@users.noreply.github.com>2023-02-04 19:32:27 -0600
committerGitHub <noreply@github.com>2023-02-04 19:32:27 -0600
commita5672815ccef520b433363ac622dbb6d6af60c91 (patch)
treef9bb1b41876d81423ac3f188f4d368e6d362eed1 /azalea-client/src/task_pool.rs
parent7c7446ab1e467c29f86e9bfba260741fc469389a (diff)
downloadazalea-drasl-a5672815ccef520b433363ac622dbb6d6af60c91.tar.xz
Use an ECS (#52)
* add EntityData::kind * start making metadata use hecs * make entity codegen generate ecs stuff * fix registry codegen * get rid of worldhaver it's not even used * add bevy_ecs to deps * rename Component to FormattedText also start making the metadata use bevy_ecs but bevy_ecs doesn't let you query on Bundles so it's annoying * generate metadata.rs correctly for bevy_ecs * start switching more entity stuff to use ecs * more ecs stuff for entity storage * ok well it compiles but it definitely doesn't work * random fixes * change a bunch of entity things to use the components * some ecs stuff in az-client * packet handler uses the ecs now and other fun changes i still need to make ticking use the ecs but that's tricker, i'm considering using bevy_ecs systems for those bevy_ecs systems can't be async but the only async things in ticking is just sending packets which can just be done as a tokio task so that's not a big deal * start converting some functions in az-client into systems committing because i'm about to try something that might go horribly wrong * start splitting client i'm probably gonna change it so azalea entity ids are separate from minecraft entity ids next (so stuff like player ids can be consistent and we don't have to wait for the login packet) * separate minecraft entity ids from azalea entity ids + more ecs stuff i guess i'm using bevy_app now too huh it's necessary for plugins and it lets us control the tick rate anyways so it's fine i think i'm still not 100% sure how packet handling that interacts with the world will work, but i think if i can sneak the ecs world into there it'll be fine. Can't put packet handling in the schedule because that'd make it tick-bound, which it's not (technically it'd still work but it'd be wrong and anticheats might realize). * packet handling now it runs the schedule only when we get a tick or packet :smile: also i systemified some more functions and did other random fixes so az-world and az-physics compile making azalea-client use the ecs is almost done! all the hard parts are done now i hope, i just have to finish writing all the code so it actually works * start figuring out how functions in Client will work generally just lifetimes being annoying but i think i can get it all to work * make writing packets work synchronously* * huh az-client compiles * start fixing stuff * start fixing some packets * make packet handler work i still haven't actually tested any of this yet lol but in theory it should all work i'll probably either actually test az-client and fix all the remaining issues or update the azalea crate next ok also one thing that i'm not particularly happy with is how the packet handlers are doing ugly queries like ```rs let local_player = ecs .query::<&LocalPlayer>() .get_mut(ecs, player_entity) .unwrap(); ``` i think the right way to solve it would be by putting every packet handler in its own system but i haven't come up with a way to make that not be really annoying yet * fix warnings * ok what if i just have a bunch of queries and a single packet handler system * simple example for azalea-client * :bug: * maybe fix deadlock idk can't test it rn lmao * make physicsstate its own component * use the default plugins * azalea compiles lol * use systemstate for packet handler * fix entities basically moved some stuff from being in the world to just being components * physics (ticking) works * try to add a .entity_by function still doesn't work because i want to make the predicate magic * try to make entity_by work well it does work but i couldn't figure out how to make it look not terrible. Will hopefully change in the future * everything compiles * start converting swarm to use builder * continue switching swarm to builder and fix stuff * make swarm use builder still have to fix some stuff and make client use builder * fix death event * client builder * fix some warnings * document plugins a bit * start trying to fix tests * azalea-ecs * azalea-ecs stuff compiles * az-physics tests pass :tada: * fix all the tests * clippy on azalea-ecs-macros * remove now-unnecessary trait_upcasting feature * fix some clippy::pedantic warnings lol * why did cargo fmt not remove the trailing spaces * FIX ALL THE THINGS * when i said 'all' i meant non-swarm bugs * start adding task pool * fix entity deduplication * fix pathfinder not stopping * fix some more random bugs * fix panic that sometimes happens in swarms * make pathfinder run in task * fix some tests * fix doctests and clippy * deadlock * fix systems running in wrong order * fix non-swarm bots
Diffstat (limited to 'azalea-client/src/task_pool.rs')
-rw-r--r--azalea-client/src/task_pool.rs177
1 files changed, 177 insertions, 0 deletions
diff --git a/azalea-client/src/task_pool.rs b/azalea-client/src/task_pool.rs
new file mode 100644
index 00000000..2a3afbbc
--- /dev/null
+++ b/azalea-client/src/task_pool.rs
@@ -0,0 +1,177 @@
+//! Borrowed from `bevy_core`.
+
+use azalea_ecs::{
+ app::{App, Plugin},
+ schedule::IntoSystemDescriptor,
+ system::Resource,
+};
+use bevy_tasks::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool, TaskPoolBuilder};
+
+/// Setup of default task pools: `AsyncComputeTaskPool`, `ComputeTaskPool`,
+/// `IoTaskPool`.
+#[derive(Default)]
+pub struct TaskPoolPlugin {
+ /// Options for the [`TaskPool`](bevy_tasks::TaskPool) created at
+ /// application start.
+ pub task_pool_options: TaskPoolOptions,
+}
+
+impl Plugin for TaskPoolPlugin {
+ fn build(&self, app: &mut App) {
+ // Setup the default bevy task pools
+ self.task_pool_options.create_default_pools();
+
+ #[cfg(not(target_arch = "wasm32"))]
+ app.add_system_to_stage(
+ azalea_ecs::app::CoreStage::Last,
+ bevy_tasks::tick_global_task_pools_on_main_thread.at_end(),
+ );
+ }
+}
+
+/// Helper for configuring and creating the default task pools. For end-users
+/// who want full control, set up [`TaskPoolPlugin`](super::TaskPoolPlugin)
+#[derive(Clone, Resource)]
+pub struct TaskPoolOptions {
+ /// If the number of physical cores is less than min_total_threads, force
+ /// using min_total_threads
+ pub min_total_threads: usize,
+ /// If the number of physical cores is greater than max_total_threads, force
+ /// using max_total_threads
+ pub max_total_threads: usize,
+
+ /// Used to determine number of IO threads to allocate
+ pub io: TaskPoolThreadAssignmentPolicy,
+ /// Used to determine number of async compute threads to allocate
+ pub async_compute: TaskPoolThreadAssignmentPolicy,
+ /// Used to determine number of compute threads to allocate
+ pub compute: TaskPoolThreadAssignmentPolicy,
+}
+
+impl Default for TaskPoolOptions {
+ fn default() -> Self {
+ TaskPoolOptions {
+ // By default, use however many cores are available on the system
+ min_total_threads: 1,
+ max_total_threads: std::usize::MAX,
+
+ // Use 25% of cores for IO, at least 1, no more than 4
+ io: TaskPoolThreadAssignmentPolicy {
+ min_threads: 1,
+ max_threads: 4,
+ percent: 0.25,
+ },
+
+ // Use 25% of cores for async compute, at least 1, no more than 4
+ async_compute: TaskPoolThreadAssignmentPolicy {
+ min_threads: 1,
+ max_threads: 4,
+ percent: 0.25,
+ },
+
+ // Use all remaining cores for compute (at least 1)
+ compute: TaskPoolThreadAssignmentPolicy {
+ min_threads: 1,
+ max_threads: std::usize::MAX,
+ percent: 1.0, // This 1.0 here means "whatever is left over"
+ },
+ }
+ }
+}
+
+impl TaskPoolOptions {
+ // /// Create a configuration that forces using the given number of threads.
+ // pub fn with_num_threads(thread_count: usize) -> Self {
+ // TaskPoolOptions {
+ // min_total_threads: thread_count,
+ // max_total_threads: thread_count,
+ // ..Default::default()
+ // }
+ // }
+
+ /// Inserts the default thread pools into the given resource map based on
+ /// the configured values
+ pub fn create_default_pools(&self) {
+ let total_threads = bevy_tasks::available_parallelism()
+ .clamp(self.min_total_threads, self.max_total_threads);
+
+ let mut remaining_threads = total_threads;
+
+ {
+ // Determine the number of IO threads we will use
+ let io_threads = self
+ .io
+ .get_number_of_threads(remaining_threads, total_threads);
+
+ remaining_threads = remaining_threads.saturating_sub(io_threads);
+
+ IoTaskPool::init(|| {
+ TaskPoolBuilder::default()
+ .num_threads(io_threads)
+ .thread_name("IO Task Pool".to_string())
+ .build()
+ });
+ }
+
+ {
+ // Determine the number of async compute threads we will use
+ let async_compute_threads = self
+ .async_compute
+ .get_number_of_threads(remaining_threads, total_threads);
+
+ remaining_threads = remaining_threads.saturating_sub(async_compute_threads);
+
+ AsyncComputeTaskPool::init(|| {
+ TaskPoolBuilder::default()
+ .num_threads(async_compute_threads)
+ .thread_name("Async Compute Task Pool".to_string())
+ .build()
+ });
+ }
+
+ {
+ // Determine the number of compute threads we will use
+ // This is intentionally last so that an end user can specify 1.0 as the percent
+ let compute_threads = self
+ .compute
+ .get_number_of_threads(remaining_threads, total_threads);
+
+ ComputeTaskPool::init(|| {
+ TaskPoolBuilder::default()
+ .num_threads(compute_threads)
+ .thread_name("Compute Task Pool".to_string())
+ .build()
+ });
+ }
+ }
+}
+
+/// Defines a simple way to determine how many threads to use given the number
+/// of remaining cores and number of total cores
+#[derive(Clone)]
+pub struct TaskPoolThreadAssignmentPolicy {
+ /// Force using at least this many threads
+ pub min_threads: usize,
+ /// Under no circumstance use more than this many threads for this pool
+ pub max_threads: usize,
+ /// Target using this percentage of total cores, clamped by min_threads and
+ /// max_threads. It is permitted to use 1.0 to try to use all remaining
+ /// threads
+ pub percent: f32,
+}
+
+impl TaskPoolThreadAssignmentPolicy {
+ /// Determine the number of threads to use for this task pool
+ fn get_number_of_threads(&self, remaining_threads: usize, total_threads: usize) -> usize {
+ assert!(self.percent >= 0.0);
+ let mut desired = (total_threads as f32 * self.percent).round() as usize;
+
+ // Limit ourselves to the number of cores available
+ desired = desired.min(remaining_threads);
+
+ // Clamp by min_threads, max_threads. (This may result in us using more threads
+ // than are available, this is intended. An example case where this
+ // might happen is a device with <= 2 threads.
+ desired.clamp(self.min_threads, self.max_threads)
+ }
+}