From 8b342f6aaccfeaee1fde390a5701cf166bd5ba4a Mon Sep 17 00:00:00 2001 From: trivernis Date: Fri, 25 Feb 2022 20:04:16 +0100 Subject: [PATCH 01/15] Add tables to store job information Signed-off-by: trivernis --- .../20220225183244_add-jobs-tables.sql | 14 +++++++ .../mediarepo-database/src/entities/job.rs | 42 +++++++++++++++++++ .../src/entities/job_state.rs | 29 +++++++++++++ .../mediarepo-database/src/entities/mod.rs | 2 + 4 files changed, 87 insertions(+) create mode 100644 mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql create mode 100644 mediarepo-daemon/mediarepo-database/src/entities/job.rs create mode 100644 mediarepo-daemon/mediarepo-database/src/entities/job_state.rs diff --git a/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql b/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql new file mode 100644 index 0000000..fa24024 --- /dev/null +++ b/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql @@ -0,0 +1,14 @@ +CREATE TABLE jobs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + job_type INTEGER NOT NULL, + name VARCHAR(255), + next_run DATETIME, + interval INTEGER +); + +CREATE TABLE job_states ( + job_id INTEGER FOREIGN KEY REFERENCES jobs (id), + key VARCHAR(128) NOT NULL DEFAULT 'default', + value BLOB, + PRIMARY KEY (job_id, key) +); \ No newline at end of file diff --git a/mediarepo-daemon/mediarepo-database/src/entities/job.rs b/mediarepo-daemon/mediarepo-database/src/entities/job.rs new file mode 100644 index 0000000..f87f5ba --- /dev/null +++ b/mediarepo-daemon/mediarepo-database/src/entities/job.rs @@ -0,0 +1,42 @@ +use chrono::NaiveDateTime; +use sea_orm::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel)] +#[sea_orm(table_name = "namespaces")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i64, + pub job_type: JobType, + pub name: Option, + pub next_run: Option, + pub interval: Option, +} + +#[derive(Clone, Debug, PartialEq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "u32", db_type = "Integer")] +pub enum JobType { + #[sea_orm(num_value = 10)] + MigrateCDs, + #[sea_orm(num_value = 20)] + CalculateSizes, + #[sea_orm(num_value = 30)] + GenerateThumbs, + #[sea_orm(num_value = 40)] + CheckIntegrity, + #[sea_orm(num_value = 50)] + Vacuum, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::job_state::Entity")] + JobState, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::JobState.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs b/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs new file mode 100644 index 0000000..8180a6c --- /dev/null +++ b/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs @@ -0,0 +1,29 @@ +use sea_orm::prelude::*; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel)] +#[sea_orm(table_name = "namespaces")] +pub struct Model { + #[sea_orm(primary_key)] + pub job_id: i64, + #[sea_orm(primary_key)] + pub key: String, + pub value: Vec, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::job::Entity", + from = "Column::JobId", + to = "super::job::Column::Id" + )] + Job, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Job.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/mediarepo-daemon/mediarepo-database/src/entities/mod.rs b/mediarepo-daemon/mediarepo-database/src/entities/mod.rs index 3eee5ae..a29c7b5 100644 --- a/mediarepo-daemon/mediarepo-database/src/entities/mod.rs +++ b/mediarepo-daemon/mediarepo-database/src/entities/mod.rs @@ -3,6 +3,8 @@ pub mod content_descriptor_source; pub mod content_descriptor_tag; pub mod file; pub mod file_metadata; +pub mod job; +pub mod job_state; pub mod namespace; pub mod sort_key; pub mod sorting_preset; From 530fbd76064dd0b88bd7aca116cb9cf9faa1715c Mon Sep 17 00:00:00 2001 From: trivernis Date: Sat, 26 Feb 2022 09:24:19 +0100 Subject: [PATCH 02/15] Add job trait and state data objects Signed-off-by: trivernis --- mediarepo-daemon/Cargo.lock | 14 ++++ mediarepo-daemon/Cargo.toml | 7 +- mediarepo-daemon/mediarepo-core/Cargo.toml | 1 + mediarepo-daemon/mediarepo-core/src/error.rs | 3 + mediarepo-daemon/mediarepo-core/src/lib.rs | 1 + .../mediarepo-database/src/entities/job.rs | 2 +- .../mediarepo-logic/src/dao/job/mod.rs | 1 + .../mediarepo-logic/src/dao/job/state.rs | 20 +++++ .../mediarepo-logic/src/dto/job.rs | 33 ++++++++ .../mediarepo-logic/src/dto/job_state.rs | 35 ++++++++ .../mediarepo-logic/src/dto/mod.rs | 4 + mediarepo-daemon/mediarepo-worker/Cargo.toml | 27 ++++++ .../mediarepo-worker/src/jobs/mod.rs | 23 +++++ mediarepo-daemon/mediarepo-worker/src/lib.rs | 3 + .../mediarepo-worker/src/scheduler.rs | 7 ++ .../mediarepo-worker/src/state_data.rs | 84 +++++++++++++++++++ 16 files changed, 262 insertions(+), 3 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs create mode 100644 mediarepo-daemon/mediarepo-logic/src/dto/job.rs create mode 100644 mediarepo-daemon/mediarepo-logic/src/dto/job_state.rs create mode 100644 mediarepo-daemon/mediarepo-worker/Cargo.toml create mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs create mode 100644 mediarepo-daemon/mediarepo-worker/src/lib.rs create mode 100644 mediarepo-daemon/mediarepo-worker/src/scheduler.rs create mode 100644 mediarepo-daemon/mediarepo-worker/src/state_data.rs diff --git a/mediarepo-daemon/Cargo.lock b/mediarepo-daemon/Cargo.lock index 741d923..7dfc9f1 100644 --- a/mediarepo-daemon/Cargo.lock +++ b/mediarepo-daemon/Cargo.lock @@ -1294,6 +1294,7 @@ name = "mediarepo-core" version = "0.1.0" dependencies = [ "base64", + "bincode", "config", "data-encoding", "futures 0.3.21", @@ -1324,6 +1325,7 @@ dependencies = [ "mediarepo-core", "mediarepo-logic", "mediarepo-socket", + "mediarepo-worker", "num-integer", "rolling-file", "structopt", @@ -1381,6 +1383,18 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "mediarepo-worker" +version = "0.1.0" +dependencies = [ + "async-trait", + "chrono", + "mediarepo-core", + "mediarepo-logic", + "serde", + "tokio", +] + [[package]] name = "memchr" version = "2.4.1" diff --git a/mediarepo-daemon/Cargo.toml b/mediarepo-daemon/Cargo.toml index 821495e..81deeb0 100644 --- a/mediarepo-daemon/Cargo.toml +++ b/mediarepo-daemon/Cargo.toml @@ -1,6 +1,6 @@ [workspace] -members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "."] -default-members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "."] +members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "mediarepo-worker", "."] +default-members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "mediarepo-worker", "."] [package] name = "mediarepo-daemon" @@ -37,6 +37,9 @@ path = "mediarepo-logic" [dependencies.mediarepo-socket] path = "./mediarepo-socket" +[dependencies.mediarepo-worker] +path = "./mediarepo-worker" + [dependencies.tokio] version = "1.17.0" features = ["macros", "rt-multi-thread", "io-std", "io-util"] diff --git a/mediarepo-daemon/mediarepo-core/Cargo.toml b/mediarepo-daemon/mediarepo-core/Cargo.toml index 5a007dd..09cdf3f 100644 --- a/mediarepo-daemon/mediarepo-core/Cargo.toml +++ b/mediarepo-daemon/mediarepo-core/Cargo.toml @@ -21,6 +21,7 @@ tracing = "0.1.31" data-encoding = "2.3.2" tokio-graceful-shutdown = "0.4.3" thumbnailer = "0.4.0" +bincode = "1.3.3" [dependencies.sea-orm] version = "0.6.0" diff --git a/mediarepo-daemon/mediarepo-core/src/error.rs b/mediarepo-daemon/mediarepo-core/src/error.rs index 7f5dfbc..4173ed4 100644 --- a/mediarepo-daemon/mediarepo-core/src/error.rs +++ b/mediarepo-daemon/mediarepo-core/src/error.rs @@ -43,6 +43,9 @@ pub enum RepoError { #[error("the database file is corrupted {0}")] Corrupted(String), + + #[error("bincode de-/serialization failed {0}")] + Bincode(#[from] bincode::Error), } #[derive(Error, Debug)] diff --git a/mediarepo-daemon/mediarepo-core/src/lib.rs b/mediarepo-daemon/mediarepo-core/src/lib.rs index a33546e..ab1a2fc 100644 --- a/mediarepo-daemon/mediarepo-core/src/lib.rs +++ b/mediarepo-daemon/mediarepo-core/src/lib.rs @@ -1,3 +1,4 @@ +pub use bincode; pub use futures; pub use itertools; pub use mediarepo_api; diff --git a/mediarepo-daemon/mediarepo-database/src/entities/job.rs b/mediarepo-daemon/mediarepo-database/src/entities/job.rs index f87f5ba..dddf46c 100644 --- a/mediarepo-daemon/mediarepo-database/src/entities/job.rs +++ b/mediarepo-daemon/mediarepo-database/src/entities/job.rs @@ -12,7 +12,7 @@ pub struct Model { pub interval: Option, } -#[derive(Clone, Debug, PartialEq, EnumIter, DeriveActiveEnum)] +#[derive(Clone, Copy, Debug, PartialEq, EnumIter, DeriveActiveEnum)] #[sea_orm(rs_type = "u32", db_type = "Integer")] pub enum JobType { #[sea_orm(num_value = 10)] diff --git a/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs b/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs index 31ab290..b2077bc 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs @@ -3,5 +3,6 @@ use crate::dao_provider; pub mod generate_missing_thumbnails; pub mod migrate_content_descriptors; pub mod sqlite_operations; +pub mod state; dao_provider!(JobDao); diff --git a/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs new file mode 100644 index 0000000..db6c13b --- /dev/null +++ b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs @@ -0,0 +1,20 @@ +use crate::dao::job::JobDao; +use crate::dto::JobStateDto; +use mediarepo_core::error::RepoResult; +use mediarepo_database::entities::job_state; +use sea_orm::prelude::*; + +impl JobDao { + /// Returns all job states for a given job id + pub async fn states_for_job_id(&self, job_id: i64) -> RepoResult> { + let states = job_state::Entity::find() + .filter(job_state::Column::JobId.eq(job_id)) + .all(&self.ctx.db) + .await? + .into_iter() + .map(JobStateDto::new) + .collect(); + + Ok(states) + } +} diff --git a/mediarepo-daemon/mediarepo-logic/src/dto/job.rs b/mediarepo-daemon/mediarepo-logic/src/dto/job.rs new file mode 100644 index 0000000..32a27ee --- /dev/null +++ b/mediarepo-daemon/mediarepo-logic/src/dto/job.rs @@ -0,0 +1,33 @@ +use chrono::NaiveDateTime; +use mediarepo_database::entities::job; +use mediarepo_database::entities::job::JobType; + +pub struct JobDto { + model: job::Model, +} + +impl JobDto { + pub(crate) fn new(model: job::Model) -> Self { + Self { model } + } + + pub fn id(&self) -> i64 { + self.model.id + } + + pub fn job_type(&self) -> JobType { + self.model.job_type + } + + pub fn name(&self) -> Option<&String> { + self.model.name.as_ref() + } + + pub fn next_run(&self) -> Option { + self.model.next_run + } + + pub fn interval(&self) -> Option { + self.model.interval + } +} diff --git a/mediarepo-daemon/mediarepo-logic/src/dto/job_state.rs b/mediarepo-daemon/mediarepo-logic/src/dto/job_state.rs new file mode 100644 index 0000000..05bbdce --- /dev/null +++ b/mediarepo-daemon/mediarepo-logic/src/dto/job_state.rs @@ -0,0 +1,35 @@ +use mediarepo_database::entities::job_state; + +#[derive(Clone, Debug)] +pub struct JobStateDto { + model: job_state::Model, +} + +impl JobStateDto { + pub(crate) fn new(model: job_state::Model) -> Self { + Self { model } + } + + pub fn job_id(&self) -> i64 { + self.model.job_id + } + + pub fn key(&self) -> &String { + &self.model.key + } + + pub fn value(&self) -> &[u8] { + &self.model.value + } + + pub fn into_value(self) -> Vec { + self.model.value + } +} + +#[derive(Clone, Debug)] +pub struct UpsertJobStateDto { + pub job_id: i64, + pub key: String, + pub value: Vec, +} diff --git a/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs b/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs index 2444214..c1285fa 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs @@ -1,5 +1,7 @@ pub use file::*; pub use file_metadata::*; +pub use job::*; +pub use job_state::*; pub use namespace::*; pub use sorting_preset::*; pub use tag::*; @@ -7,6 +9,8 @@ pub use thumbnail::*; mod file; mod file_metadata; +mod job; +mod job_state; mod namespace; mod sorting_preset; mod tag; diff --git a/mediarepo-daemon/mediarepo-worker/Cargo.toml b/mediarepo-daemon/mediarepo-worker/Cargo.toml new file mode 100644 index 0000000..96af684 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "mediarepo-worker" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = "0.1.52" + +[dependencies.mediarepo-core] +path = "../mediarepo-core" + +[dependencies.mediarepo-logic] +path = "../mediarepo-logic" + +[dependencies.tokio] +version = "1.17.0" +features = [] + +[dependencies.chrono] +version = "0.4.19" +features = ["serde"] + +[dependencies.serde] +version = "1.0.136" +features = ["derive"] \ No newline at end of file diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs new file mode 100644 index 0000000..ad9c8d8 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -0,0 +1,23 @@ +use crate::state_data::StateData; +use async_trait::async_trait; +use mediarepo_core::error::RepoResult; +use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dto::JobDto; + +#[async_trait] +pub trait ScheduledJob { + fn new(dto: JobDto) -> Self; + + async fn set_state(&self, state: StateData) -> RepoResult<()>; + + async fn run(&self, repo: Repo) -> RepoResult<()>; + + fn execution_state(&self) -> JobExecutionState; +} + +#[derive(Clone, Copy, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum JobExecutionState { + Scheduled, + Running, + Finished, +} diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs new file mode 100644 index 0000000..6046de1 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -0,0 +1,3 @@ +pub mod jobs; +pub mod scheduler; +pub mod state_data; diff --git a/mediarepo-daemon/mediarepo-worker/src/scheduler.rs b/mediarepo-daemon/mediarepo-worker/src/scheduler.rs new file mode 100644 index 0000000..f7e244d --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/scheduler.rs @@ -0,0 +1,7 @@ +use mediarepo_logic::dao::repo::Repo; + +pub struct Scheduler { + pub repo: Repo, +} + +impl Scheduler {} diff --git a/mediarepo-daemon/mediarepo-worker/src/state_data.rs b/mediarepo-daemon/mediarepo-worker/src/state_data.rs new file mode 100644 index 0000000..f197fb5 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/state_data.rs @@ -0,0 +1,84 @@ +use mediarepo_core::bincode; +use mediarepo_core::error::RepoResult; +use mediarepo_logic::dao::job::JobDao; +use mediarepo_logic::dto::UpsertJobStateDto; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use tokio::sync::RwLock; + +#[derive(Debug)] +pub struct StateData { + job_id: i64, + inner: Arc>>>, + changed_keys: Arc>>, +} + +impl StateData { + /// Loads the state from the database + pub async fn load(job_dao: JobDao, job_id: i64) -> RepoResult { + let states = job_dao.states_for_job_id(job_id).await?; + let states_map = states + .into_iter() + .map(|s| (s.key().to_owned(), s.into_value())) + .collect::>>(); + + Ok(Self { + job_id, + inner: Arc::new(RwLock::new(states_map)), + changed_keys: Default::default(), + }) + } + + /// Returns the deserialized copy of a state object from the inner map + pub async fn entry, T: DeserializeOwned>(&self, key: S) -> RepoResult> { + let entries = self.inner.read().await; + let entry = entries.get(key.as_ref()); + + if let Some(bytes) = entry { + let value = bincode::deserialize(bytes)?; + + Ok(Some(value)) + } else { + Ok(None) + } + } + + /// Stores an entry in inner map + pub async fn store_entry(&self, key: String, value: &T) -> RepoResult<()> { + let entry_bytes = bincode::serialize(value)?; + let mut entries = self.inner.write().await; + entries.insert(key.clone(), entry_bytes); + let mut changed_entries = self.changed_keys.write().await; + changed_entries.insert(key); + + Ok(()) + } + + /// Returns a list of all changed state objects as an upsert list + pub async fn changed_states(&self) -> Vec { + let mut upsert_list = Vec::new(); + + { + let changed_keys = self.changed_keys.read().await; + let entries = self.inner.read().await; + + for key in &*changed_keys { + if let Some(value) = entries.get(key) { + upsert_list.push(UpsertJobStateDto { + job_id: self.job_id, + key: key.to_owned(), + value: value.clone(), + }); + } + } + } + { + let mut changed_keys = self.changed_keys.write().await; + changed_keys.clear(); + } + + upsert_list + } +} From 496d720219a6f37249c0b97df0f8896793fec300 Mon Sep 17 00:00:00 2001 From: trivernis Date: Sat, 26 Feb 2022 12:11:53 +0100 Subject: [PATCH 03/15] Add job scheduler implementation with progress report Signed-off-by: trivernis --- mediarepo-daemon/Cargo.lock | 1 + .../mediarepo-logic/src/dao/job/mod.rs | 21 +++ .../mediarepo-logic/src/dao/job/state.rs | 42 ++++- .../mediarepo-logic/src/dto/job.rs | 1 + mediarepo-daemon/mediarepo-worker/Cargo.toml | 3 +- .../mediarepo-worker/src/jobs/mod.rs | 7 +- .../mediarepo-worker/src/jobs_table.rs | 13 ++ mediarepo-daemon/mediarepo-worker/src/lib.rs | 2 + .../mediarepo-worker/src/progress.rs | 48 ++++++ .../mediarepo-worker/src/scheduler.rs | 161 +++++++++++++++++- .../mediarepo-worker/src/state_data.rs | 4 + 11 files changed, 295 insertions(+), 8 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs_table.rs create mode 100644 mediarepo-daemon/mediarepo-worker/src/progress.rs diff --git a/mediarepo-daemon/Cargo.lock b/mediarepo-daemon/Cargo.lock index 7dfc9f1..0a991af 100644 --- a/mediarepo-daemon/Cargo.lock +++ b/mediarepo-daemon/Cargo.lock @@ -1393,6 +1393,7 @@ dependencies = [ "mediarepo-logic", "serde", "tokio", + "tracing", ] [[package]] diff --git a/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs b/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs index b2077bc..1e17ca9 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs @@ -1,4 +1,9 @@ use crate::dao_provider; +use crate::dto::JobDto; +use chrono::Local; +use mediarepo_core::error::RepoResult; +use mediarepo_database::entities::job; +use sea_orm::prelude::*; pub mod generate_missing_thumbnails; pub mod migrate_content_descriptors; @@ -6,3 +11,19 @@ pub mod sqlite_operations; pub mod state; dao_provider!(JobDao); + +impl JobDao { + /// Returns a list of all jobs that are scheduled (have a next_run date) + pub async fn scheduled_for_now(&self) -> RepoResult> { + let jobs = job::Entity::find() + .filter(job::Column::NextRun.is_not_null()) + .filter(job::Column::NextRun.lt(Local::now().naive_local())) + .all(&self.ctx.db) + .await? + .into_iter() + .map(JobDto::new) + .collect(); + + Ok(jobs) + } +} diff --git a/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs index db6c13b..8370305 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs @@ -1,8 +1,10 @@ use crate::dao::job::JobDao; -use crate::dto::JobStateDto; +use crate::dto::{JobStateDto, UpsertJobStateDto}; use mediarepo_core::error::RepoResult; use mediarepo_database::entities::job_state; use sea_orm::prelude::*; +use sea_orm::ActiveValue::Set; +use sea_orm::{Condition, TransactionTrait}; impl JobDao { /// Returns all job states for a given job id @@ -17,4 +19,42 @@ impl JobDao { Ok(states) } + + pub async fn upsert_multiple_states(&self, states: Vec) -> RepoResult<()> { + let trx = self.ctx.db.begin().await?; + + job_state::Entity::delete_many() + .filter(build_state_filters(&states)) + .exec(&trx) + .await?; + job_state::Entity::insert_many(build_active_state_models(states)) + .exec(&trx) + .await?; + + trx.commit().await?; + + Ok(()) + } +} + +fn build_state_filters(states: &Vec) -> Condition { + states + .iter() + .map(|s| { + Condition::all() + .add(job_state::Column::JobId.eq(s.job_id)) + .add(job_state::Column::Key.eq(s.key.to_owned())) + }) + .fold(Condition::any(), |acc, cond| acc.add(cond)) +} + +fn build_active_state_models(states: Vec) -> Vec { + states + .into_iter() + .map(|s| job_state::ActiveModel { + job_id: Set(s.job_id), + key: Set(s.key), + value: Set(s.value), + }) + .collect() } diff --git a/mediarepo-daemon/mediarepo-logic/src/dto/job.rs b/mediarepo-daemon/mediarepo-logic/src/dto/job.rs index 32a27ee..f4c186e 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dto/job.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dto/job.rs @@ -2,6 +2,7 @@ use chrono::NaiveDateTime; use mediarepo_database::entities::job; use mediarepo_database::entities::job::JobType; +#[derive(Clone, Debug)] pub struct JobDto { model: job::Model, } diff --git a/mediarepo-daemon/mediarepo-worker/Cargo.toml b/mediarepo-daemon/mediarepo-worker/Cargo.toml index 96af684..b5903ec 100644 --- a/mediarepo-daemon/mediarepo-worker/Cargo.toml +++ b/mediarepo-daemon/mediarepo-worker/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dependencies] async-trait = "0.1.52" +tracing = "0.1.31" [dependencies.mediarepo-core] path = "../mediarepo-core" @@ -16,7 +17,7 @@ path = "../mediarepo-logic" [dependencies.tokio] version = "1.17.0" -features = [] +features = ["macros"] [dependencies.chrono] version = "0.4.19" diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index ad9c8d8..205a5f2 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -1,16 +1,15 @@ +use crate::progress::JobProgressUpdate; use crate::state_data::StateData; use async_trait::async_trait; use mediarepo_core::error::RepoResult; use mediarepo_logic::dao::repo::Repo; -use mediarepo_logic::dto::JobDto; +use tokio::sync::mpsc::Sender; #[async_trait] pub trait ScheduledJob { - fn new(dto: JobDto) -> Self; - async fn set_state(&self, state: StateData) -> RepoResult<()>; - async fn run(&self, repo: Repo) -> RepoResult<()>; + async fn run(&self, sender: &mut Sender, repo: Repo) -> RepoResult<()>; fn execution_state(&self) -> JobExecutionState; } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs_table.rs b/mediarepo-daemon/mediarepo-worker/src/jobs_table.rs new file mode 100644 index 0000000..d9c30f7 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/jobs_table.rs @@ -0,0 +1,13 @@ +use crate::progress::JobProgressUpdate; +use mediarepo_logic::dto::JobDto; +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::RwLock; + +pub type JobsTable = Arc>>; + +#[derive(Clone, Debug)] +pub struct JobEntry { + pub dto: JobDto, + pub last_update: Option, +} diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index 6046de1..ac3b2ec 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -1,3 +1,5 @@ pub mod jobs; +pub mod jobs_table; +pub mod progress; pub mod scheduler; pub mod state_data; diff --git a/mediarepo-daemon/mediarepo-worker/src/progress.rs b/mediarepo-daemon/mediarepo-worker/src/progress.rs new file mode 100644 index 0000000..59659d2 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/progress.rs @@ -0,0 +1,48 @@ +use crate::jobs::JobExecutionState; + +#[derive(Clone, Debug)] +pub struct JobProgressUpdate { + id: i64, + state: JobExecutionState, + progress: Option, + total: Option, +} + +impl JobProgressUpdate { + pub fn new(id: i64) -> Self { + Self { + id, + state: JobExecutionState::Scheduled, + progress: None, + total: None, + } + } + + pub fn id(&self) -> i64 { + self.id + } + + pub fn state(&self) -> JobExecutionState { + self.state + } + + pub fn set_state(&mut self, state: JobExecutionState) { + self.state = state; + } + + pub fn progress(&self) -> Option { + self.progress + } + + pub fn set_progress(&mut self, progress: u64) { + self.progress = Some(progress); + } + + pub fn total(&self) -> Option { + self.total + } + + pub fn set_total(&mut self, total: u64) { + self.total = Some(total) + } +} diff --git a/mediarepo-daemon/mediarepo-worker/src/scheduler.rs b/mediarepo-daemon/mediarepo-worker/src/scheduler.rs index f7e244d..c10d632 100644 --- a/mediarepo-daemon/mediarepo-worker/src/scheduler.rs +++ b/mediarepo-daemon/mediarepo-worker/src/scheduler.rs @@ -1,7 +1,164 @@ +use crate::jobs::{JobExecutionState, ScheduledJob}; +use crate::jobs_table::JobsTable; +use crate::progress::JobProgressUpdate; +use crate::state_data::StateData; +use mediarepo_core::error::RepoResult; +use mediarepo_core::futures::select; +use mediarepo_core::settings::LogLevel::Debug; +use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dao::DaoProvider; +use mediarepo_logic::dto::JobDto; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tokio::sync::RwLock; +#[derive(Clone)] pub struct Scheduler { - pub repo: Repo, + repo: Repo, + state_data: Arc>>, + jobs_table: JobsTable, } -impl Scheduler {} +impl Scheduler { + pub fn new(repo: Repo) -> Self { + Self { + repo, + state_data: Default::default(), + jobs_table: Default::default(), + } + } + + pub fn run(self, subsystem: SubsystemHandle) -> JobsTable { + tokio::task::spawn({ + let subsystem = subsystem.clone(); + let scheduler = self.clone(); + async move { + scheduler.loop_save_states(subsystem).await; + } + }); + + let (tx, rx) = channel(32); + + tokio::task::spawn({ + let scheduler = self.clone(); + async move { + scheduler.loop_schedule(subsystem, tx).await; + } + }); + + let jobs_table = self.jobs_table.clone(); + + tokio::task::spawn(async move { self.update_on_progress(rx).await }); + + jobs_table + } + + async fn loop_schedule( + self, + subsystem: SubsystemHandle, + job_progress_sender: Sender, + ) { + loop { + if let Err(e) = self.schedule(&job_progress_sender).await { + tracing::error!("failed to schedule jobs: {}", e); + } + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(1)) => {}, + _ = subsystem.on_shutdown_requested() => { + break; + } + } + } + } + + async fn schedule(&self, job_progress_sender: &Sender) -> RepoResult<()> { + let mut scheduled_jobs = self.repo.job().scheduled_for_now().await?; + let running_jobs = self.running_jobs().await; + + scheduled_jobs.retain(|j| !running_jobs.contains(&j.id())); + + for job in scheduled_jobs { + let mut sender = job_progress_sender.clone(); + let mut progress = JobProgressUpdate::new(job.id()); + let scheduled_job = create_job(job); + let _ = sender.send(progress.clone()).await; + let repo = self.repo.clone(); + + tokio::task::spawn(async move { + progress.set_state(JobExecutionState::Running); + let _ = sender.send(progress.clone()).await; + + if let Err(e) = scheduled_job.run(&mut sender, repo).await { + tracing::error!("error occurred during job execution: {}", e); + } + progress.set_state(JobExecutionState::Finished); + let _ = sender.send(progress).await; + }); + } + + Ok(()) + } + + async fn loop_save_states(self, subsystem: SubsystemHandle) { + loop { + if let Err(e) = self.save_states().await { + tracing::error!("failed to save job state {}", e); + } + + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(1)) => {}, + _ = subsystem.on_shutdown_requested() => { + let _ = self.save_states().await; + break; + } + } + } + } + + async fn save_states(&self) -> RepoResult<()> { + let mut changed_states = Vec::new(); + { + let states = self.state_data.read().await; + for state in &*states { + changed_states.append(&mut state.changed_states().await); + } + } + self.repo + .job() + .upsert_multiple_states(changed_states) + .await?; + + Ok(()) + } + + async fn update_on_progress(mut self, mut rx: Receiver) { + while let Some(progress) = rx.recv().await { + let mut jobs_table = self.jobs_table.write().await; + + if let JobExecutionState::Finished = progress.state() { + let mut state_data = self.state_data.write().await; + state_data.retain(|s| s.job_id() != progress.id()); + } + + if let Some(entry) = jobs_table.get_mut(&progress.id()) { + entry.last_update = Some(progress); + } + } + } + + async fn running_jobs(&self) -> Vec { + let jobs_table = self.jobs_table.read().await; + jobs_table + .values() + .filter_map(|v| v.last_update.as_ref()) + .filter(|u| u.state() != JobExecutionState::Finished) + .map(|u| u.id()) + .collect() + } +} + +fn create_job(dto: JobDto) -> Box { + todo!("implement") +} diff --git a/mediarepo-daemon/mediarepo-worker/src/state_data.rs b/mediarepo-daemon/mediarepo-worker/src/state_data.rs index f197fb5..7a9f5e7 100644 --- a/mediarepo-daemon/mediarepo-worker/src/state_data.rs +++ b/mediarepo-daemon/mediarepo-worker/src/state_data.rs @@ -16,6 +16,10 @@ pub struct StateData { } impl StateData { + pub fn job_id(&self) -> i64 { + self.job_id + } + /// Loads the state from the database pub async fn load(job_dao: JobDao, job_id: i64) -> RepoResult { let states = job_dao.states_for_job_id(job_id).await?; From 0cb37e1268a397fa338cbda7e9801f1653cb3ff3 Mon Sep 17 00:00:00 2001 From: trivernis Date: Tue, 8 Mar 2022 12:03:57 +0100 Subject: [PATCH 04/15] Add vacuum job Signed-off-by: trivernis --- mediarepo-daemon/Cargo.lock | 1 + mediarepo-daemon/mediarepo-worker/Cargo.toml | 3 + .../mediarepo-worker/src/execution_state.rs | 58 +++++++++++++++++++ .../mediarepo-worker/src/jobs/mod.rs | 18 +++--- .../mediarepo-worker/src/jobs/vacuum.rs | 29 ++++++++++ mediarepo-daemon/mediarepo-worker/src/lib.rs | 1 + .../mediarepo-worker/src/progress.rs | 32 +++++++++- .../mediarepo-worker/src/scheduler.rs | 26 +++++++-- 8 files changed, 152 insertions(+), 16 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-worker/src/execution_state.rs create mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs diff --git a/mediarepo-daemon/Cargo.lock b/mediarepo-daemon/Cargo.lock index 0a991af..f032b1c 100644 --- a/mediarepo-daemon/Cargo.lock +++ b/mediarepo-daemon/Cargo.lock @@ -1390,6 +1390,7 @@ dependencies = [ "async-trait", "chrono", "mediarepo-core", + "mediarepo-database", "mediarepo-logic", "serde", "tokio", diff --git a/mediarepo-daemon/mediarepo-worker/Cargo.toml b/mediarepo-daemon/mediarepo-worker/Cargo.toml index b5903ec..d448ff5 100644 --- a/mediarepo-daemon/mediarepo-worker/Cargo.toml +++ b/mediarepo-daemon/mediarepo-worker/Cargo.toml @@ -15,6 +15,9 @@ path = "../mediarepo-core" [dependencies.mediarepo-logic] path = "../mediarepo-logic" +[dependencies.mediarepo-database] +path = "../mediarepo-database" + [dependencies.tokio] version = "1.17.0" features = ["macros"] diff --git a/mediarepo-daemon/mediarepo-worker/src/execution_state.rs b/mediarepo-daemon/mediarepo-worker/src/execution_state.rs new file mode 100644 index 0000000..82a2959 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/execution_state.rs @@ -0,0 +1,58 @@ +use std::sync::atomic::{AtomicU8, Ordering}; +use std::sync::Arc; + +#[derive(Clone, Copy, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum JobExecutionState { + Scheduled, + Running, + Finished, +} + +pub struct ExecutionStateSynchronizer { + state: Arc, +} + +impl Default for ExecutionStateSynchronizer { + fn default() -> Self { + Self { + state: Arc::new(AtomicU8::new(0)), + } + } +} + +impl ExecutionStateSynchronizer { + pub fn set_scheduled(&self) { + self.state.store(0, Ordering::Relaxed); + } + + #[must_use] + pub fn set_running(&self) -> RunningHandle { + self.state.store(1, Ordering::Relaxed); + RunningHandle { + state: Arc::clone(&self.state), + } + } + + pub fn set_finished(&self) { + self.state.store(2, Ordering::SeqCst) + } + + pub fn state(&self) -> JobExecutionState { + match self.state.load(Ordering::SeqCst) { + 0 => JobExecutionState::Scheduled, + 1 => JobExecutionState::Running, + 2 => JobExecutionState::Scheduled, + _ => JobExecutionState::Finished, + } + } +} + +pub struct RunningHandle { + state: Arc, +} + +impl Drop for RunningHandle { + fn drop(&mut self) { + self.state.store(2, Ordering::SeqCst); + } +} diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index 205a5f2..4f42740 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -1,4 +1,9 @@ -use crate::progress::JobProgressUpdate; +mod vacuum; + +pub use vacuum::*; + +use crate::execution_state::JobExecutionState; +use crate::progress::{JobProgressUpdate, ProgressSender}; use crate::state_data::StateData; use async_trait::async_trait; use mediarepo_core::error::RepoResult; @@ -9,14 +14,5 @@ use tokio::sync::mpsc::Sender; pub trait ScheduledJob { async fn set_state(&self, state: StateData) -> RepoResult<()>; - async fn run(&self, sender: &mut Sender, repo: Repo) -> RepoResult<()>; - - fn execution_state(&self) -> JobExecutionState; -} - -#[derive(Clone, Copy, Debug, Ord, PartialOrd, Eq, PartialEq)] -pub enum JobExecutionState { - Scheduled, - Running, - Finished, + async fn run(&self, sender: &ProgressSender, repo: Repo) -> RepoResult<()>; } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs new file mode 100644 index 0000000..7527d85 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs @@ -0,0 +1,29 @@ +use crate::execution_state::{ExecutionStateSynchronizer, JobExecutionState}; +use crate::jobs::ScheduledJob; +use crate::progress::{JobProgressUpdate, ProgressSender}; +use crate::state_data::StateData; +use async_trait::async_trait; +use mediarepo_core::error::RepoResult; +use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dao::DaoProvider; +use std::sync::Arc; +use tokio::sync::mpsc::Sender; +use tokio::sync::RwLock; + +#[derive(Default, Clone)] +pub struct VacuumJob; + +#[async_trait] +impl ScheduledJob for VacuumJob { + async fn set_state(&self, _: StateData) -> RepoResult<()> { + Ok(()) + } + + async fn run(&self, sender: &ProgressSender, repo: Repo) -> RepoResult<()> { + sender.send_progress_percent(0.0); + repo.job().vacuum().await?; + sender.send_progress_percent(1.0); + + Ok(()) + } +} diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index ac3b2ec..8044986 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -1,3 +1,4 @@ +pub mod execution_state; pub mod jobs; pub mod jobs_table; pub mod progress; diff --git a/mediarepo-daemon/mediarepo-worker/src/progress.rs b/mediarepo-daemon/mediarepo-worker/src/progress.rs index 59659d2..f643089 100644 --- a/mediarepo-daemon/mediarepo-worker/src/progress.rs +++ b/mediarepo-daemon/mediarepo-worker/src/progress.rs @@ -1,4 +1,5 @@ -use crate::jobs::JobExecutionState; +use crate::execution_state::{ExecutionStateSynchronizer, JobExecutionState, RunningHandle}; +use tokio::sync::mpsc::Sender; #[derive(Clone, Debug)] pub struct JobProgressUpdate { @@ -46,3 +47,32 @@ impl JobProgressUpdate { self.total = Some(total) } } + +pub struct ProgressSender { + job_id: i64, + execution_state_sync: ExecutionStateSynchronizer, + pub inner: Sender, +} + +impl ProgressSender { + pub fn new(job_id: i64, sender: Sender) -> Self { + Self { + job_id, + inner: sender, + execution_state_sync: ExecutionStateSynchronizer::default(), + } + } + + pub fn send_progress(&self, progress: u64, total: u64) { + let _ = self.inner.send(JobProgressUpdate { + id: self.job_id, + state: JobExecutionState::Running, + progress: Some(progress), + total: Some(total), + }); + } + + pub fn send_progress_percent(&self, percent: f64) { + self.send_progress((percent * 100.0) as u64, 100); + } +} diff --git a/mediarepo-daemon/mediarepo-worker/src/scheduler.rs b/mediarepo-daemon/mediarepo-worker/src/scheduler.rs index c10d632..3303d97 100644 --- a/mediarepo-daemon/mediarepo-worker/src/scheduler.rs +++ b/mediarepo-daemon/mediarepo-worker/src/scheduler.rs @@ -1,11 +1,13 @@ -use crate::jobs::{JobExecutionState, ScheduledJob}; +use crate::execution_state::JobExecutionState; +use crate::jobs::{ScheduledJob, VacuumJob}; use crate::jobs_table::JobsTable; -use crate::progress::JobProgressUpdate; +use crate::progress::{JobProgressUpdate, ProgressSender}; use crate::state_data::StateData; use mediarepo_core::error::RepoResult; use mediarepo_core::futures::select; use mediarepo_core::settings::LogLevel::Debug; use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; +use mediarepo_database::entities::job::JobType; use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::dao::DaoProvider; use mediarepo_logic::dto::JobDto; @@ -90,9 +92,11 @@ impl Scheduler { progress.set_state(JobExecutionState::Running); let _ = sender.send(progress.clone()).await; - if let Err(e) = scheduled_job.run(&mut sender, repo).await { + let progress_sender = ProgressSender::new(progress.id(), sender); + if let Err(e) = scheduled_job.run(&progress_sender, repo).await { tracing::error!("error occurred during job execution: {}", e); } + let sender = progress_sender.inner; progress.set_state(JobExecutionState::Finished); let _ = sender.send(progress).await; }); @@ -160,5 +164,19 @@ impl Scheduler { } fn create_job(dto: JobDto) -> Box { - todo!("implement") + match dto.job_type() { + JobType::MigrateCDs => { + todo!() + } + JobType::CalculateSizes => { + todo!() + } + JobType::GenerateThumbs => { + todo!() + } + JobType::CheckIntegrity => { + todo!() + } + JobType::Vacuum => Box::new(VacuumJob), + } } From be3e9bbce3bd407532a47df082fbee1d193dd863 Mon Sep 17 00:00:00 2001 From: trivernis Date: Wed, 9 Mar 2022 18:39:12 +0100 Subject: [PATCH 05/15] Fix migration sql Signed-off-by: trivernis --- .../migrations/20220225183244_add-jobs-tables.sql | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql b/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql index fa24024..b7438e9 100644 --- a/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql +++ b/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql @@ -7,8 +7,9 @@ CREATE TABLE jobs ( ); CREATE TABLE job_states ( - job_id INTEGER FOREIGN KEY REFERENCES jobs (id), + job_id INTEGER, key VARCHAR(128) NOT NULL DEFAULT 'default', value BLOB, - PRIMARY KEY (job_id, key) + PRIMARY KEY (job_id, key), + FOREIGN KEY (job_id) REFERENCES jobs (id) ); \ No newline at end of file From 1735d08c1deeb4cfd8a1ffa920d4ac69e9a52ede Mon Sep 17 00:00:00 2001 From: trivernis Date: Sat, 12 Mar 2022 13:08:59 +0100 Subject: [PATCH 06/15] Update api dependencies Signed-off-by: trivernis --- mediarepo-api/Cargo.toml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mediarepo-api/Cargo.toml b/mediarepo-api/Cargo.toml index 4510d07..62a9c17 100644 --- a/mediarepo-api/Cargo.toml +++ b/mediarepo-api/Cargo.toml @@ -7,13 +7,13 @@ license = "gpl-3" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tracing = "0.1.30" +tracing = "0.1.32" thiserror = "1.0.30" async-trait = { version = "0.1.52", optional = true } parking_lot = { version = "0.12.0", optional = true } -serde_json = { version = "1.0.78", optional = true } +serde_json = { version = "1.0.79", optional = true } directories = { version = "4.0.1", optional = true } -mime_guess = { version = "2.0.3", optional = true } +mime_guess = { version = "2.0.4", optional = true } serde_piecewise_default = "0.2.0" futures = { version = "0.3.21", optional = true } url = { version = "2.2.2", optional = true } @@ -22,6 +22,7 @@ pathsearch = { version = "0.2.0", optional = true } [dependencies.bromine] version = "0.18.1" optional = true +git = "https://github.com/Trivernis/bromine" features = ["serialize_bincode"] [dependencies.serde] @@ -34,12 +35,12 @@ features = ["serde"] [dependencies.tauri] version = "1.0.0-rc.4" -optional=true +optional = true default-features = false features = [] [dependencies.tokio] -version = "1.16.1" +version = "1.17.0" optional = true features = ["sync", "fs", "net", "io-util", "io-std", "time", "rt", "process"] From a145d604d90a4571520e61b8a3ced2b31abd7ac8 Mon Sep 17 00:00:00 2001 From: trivernis Date: Sat, 12 Mar 2022 17:18:49 +0100 Subject: [PATCH 07/15] Simplify job implementation Signed-off-by: trivernis --- .../mediarepo-core/src/type_list.rs | 0 .../mediarepo-database/src/entities/job.rs | 42 ---- .../mediarepo-logic/src/dto/job.rs | 34 ---- .../mediarepo-worker/src/execution_state.rs | 58 ------ .../mediarepo-worker/src/job_dispatcher.rs | 0 .../mediarepo-worker/src/jobs_table.rs | 13 -- .../mediarepo-worker/src/progress.rs | 78 -------- .../mediarepo-worker/src/scheduler.rs | 182 ------------------ .../mediarepo-worker/src/state_data.rs | 88 --------- 9 files changed, 495 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-core/src/type_list.rs delete mode 100644 mediarepo-daemon/mediarepo-database/src/entities/job.rs delete mode 100644 mediarepo-daemon/mediarepo-logic/src/dto/job.rs delete mode 100644 mediarepo-daemon/mediarepo-worker/src/execution_state.rs create mode 100644 mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs delete mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs_table.rs delete mode 100644 mediarepo-daemon/mediarepo-worker/src/progress.rs delete mode 100644 mediarepo-daemon/mediarepo-worker/src/scheduler.rs delete mode 100644 mediarepo-daemon/mediarepo-worker/src/state_data.rs diff --git a/mediarepo-daemon/mediarepo-core/src/type_list.rs b/mediarepo-daemon/mediarepo-core/src/type_list.rs new file mode 100644 index 0000000..e69de29 diff --git a/mediarepo-daemon/mediarepo-database/src/entities/job.rs b/mediarepo-daemon/mediarepo-database/src/entities/job.rs deleted file mode 100644 index dddf46c..0000000 --- a/mediarepo-daemon/mediarepo-database/src/entities/job.rs +++ /dev/null @@ -1,42 +0,0 @@ -use chrono::NaiveDateTime; -use sea_orm::prelude::*; - -#[derive(Clone, Debug, PartialEq, DeriveEntityModel)] -#[sea_orm(table_name = "namespaces")] -pub struct Model { - #[sea_orm(primary_key)] - pub id: i64, - pub job_type: JobType, - pub name: Option, - pub next_run: Option, - pub interval: Option, -} - -#[derive(Clone, Copy, Debug, PartialEq, EnumIter, DeriveActiveEnum)] -#[sea_orm(rs_type = "u32", db_type = "Integer")] -pub enum JobType { - #[sea_orm(num_value = 10)] - MigrateCDs, - #[sea_orm(num_value = 20)] - CalculateSizes, - #[sea_orm(num_value = 30)] - GenerateThumbs, - #[sea_orm(num_value = 40)] - CheckIntegrity, - #[sea_orm(num_value = 50)] - Vacuum, -} - -#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] -pub enum Relation { - #[sea_orm(has_many = "super::job_state::Entity")] - JobState, -} - -impl Related for Entity { - fn to() -> RelationDef { - Relation::JobState.def() - } -} - -impl ActiveModelBehavior for ActiveModel {} diff --git a/mediarepo-daemon/mediarepo-logic/src/dto/job.rs b/mediarepo-daemon/mediarepo-logic/src/dto/job.rs deleted file mode 100644 index f4c186e..0000000 --- a/mediarepo-daemon/mediarepo-logic/src/dto/job.rs +++ /dev/null @@ -1,34 +0,0 @@ -use chrono::NaiveDateTime; -use mediarepo_database::entities::job; -use mediarepo_database::entities::job::JobType; - -#[derive(Clone, Debug)] -pub struct JobDto { - model: job::Model, -} - -impl JobDto { - pub(crate) fn new(model: job::Model) -> Self { - Self { model } - } - - pub fn id(&self) -> i64 { - self.model.id - } - - pub fn job_type(&self) -> JobType { - self.model.job_type - } - - pub fn name(&self) -> Option<&String> { - self.model.name.as_ref() - } - - pub fn next_run(&self) -> Option { - self.model.next_run - } - - pub fn interval(&self) -> Option { - self.model.interval - } -} diff --git a/mediarepo-daemon/mediarepo-worker/src/execution_state.rs b/mediarepo-daemon/mediarepo-worker/src/execution_state.rs deleted file mode 100644 index 82a2959..0000000 --- a/mediarepo-daemon/mediarepo-worker/src/execution_state.rs +++ /dev/null @@ -1,58 +0,0 @@ -use std::sync::atomic::{AtomicU8, Ordering}; -use std::sync::Arc; - -#[derive(Clone, Copy, Debug, Ord, PartialOrd, Eq, PartialEq)] -pub enum JobExecutionState { - Scheduled, - Running, - Finished, -} - -pub struct ExecutionStateSynchronizer { - state: Arc, -} - -impl Default for ExecutionStateSynchronizer { - fn default() -> Self { - Self { - state: Arc::new(AtomicU8::new(0)), - } - } -} - -impl ExecutionStateSynchronizer { - pub fn set_scheduled(&self) { - self.state.store(0, Ordering::Relaxed); - } - - #[must_use] - pub fn set_running(&self) -> RunningHandle { - self.state.store(1, Ordering::Relaxed); - RunningHandle { - state: Arc::clone(&self.state), - } - } - - pub fn set_finished(&self) { - self.state.store(2, Ordering::SeqCst) - } - - pub fn state(&self) -> JobExecutionState { - match self.state.load(Ordering::SeqCst) { - 0 => JobExecutionState::Scheduled, - 1 => JobExecutionState::Running, - 2 => JobExecutionState::Scheduled, - _ => JobExecutionState::Finished, - } - } -} - -pub struct RunningHandle { - state: Arc, -} - -impl Drop for RunningHandle { - fn drop(&mut self) { - self.state.store(2, Ordering::SeqCst); - } -} diff --git a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs new file mode 100644 index 0000000..e69de29 diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs_table.rs b/mediarepo-daemon/mediarepo-worker/src/jobs_table.rs deleted file mode 100644 index d9c30f7..0000000 --- a/mediarepo-daemon/mediarepo-worker/src/jobs_table.rs +++ /dev/null @@ -1,13 +0,0 @@ -use crate::progress::JobProgressUpdate; -use mediarepo_logic::dto::JobDto; -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::RwLock; - -pub type JobsTable = Arc>>; - -#[derive(Clone, Debug)] -pub struct JobEntry { - pub dto: JobDto, - pub last_update: Option, -} diff --git a/mediarepo-daemon/mediarepo-worker/src/progress.rs b/mediarepo-daemon/mediarepo-worker/src/progress.rs deleted file mode 100644 index f643089..0000000 --- a/mediarepo-daemon/mediarepo-worker/src/progress.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::execution_state::{ExecutionStateSynchronizer, JobExecutionState, RunningHandle}; -use tokio::sync::mpsc::Sender; - -#[derive(Clone, Debug)] -pub struct JobProgressUpdate { - id: i64, - state: JobExecutionState, - progress: Option, - total: Option, -} - -impl JobProgressUpdate { - pub fn new(id: i64) -> Self { - Self { - id, - state: JobExecutionState::Scheduled, - progress: None, - total: None, - } - } - - pub fn id(&self) -> i64 { - self.id - } - - pub fn state(&self) -> JobExecutionState { - self.state - } - - pub fn set_state(&mut self, state: JobExecutionState) { - self.state = state; - } - - pub fn progress(&self) -> Option { - self.progress - } - - pub fn set_progress(&mut self, progress: u64) { - self.progress = Some(progress); - } - - pub fn total(&self) -> Option { - self.total - } - - pub fn set_total(&mut self, total: u64) { - self.total = Some(total) - } -} - -pub struct ProgressSender { - job_id: i64, - execution_state_sync: ExecutionStateSynchronizer, - pub inner: Sender, -} - -impl ProgressSender { - pub fn new(job_id: i64, sender: Sender) -> Self { - Self { - job_id, - inner: sender, - execution_state_sync: ExecutionStateSynchronizer::default(), - } - } - - pub fn send_progress(&self, progress: u64, total: u64) { - let _ = self.inner.send(JobProgressUpdate { - id: self.job_id, - state: JobExecutionState::Running, - progress: Some(progress), - total: Some(total), - }); - } - - pub fn send_progress_percent(&self, percent: f64) { - self.send_progress((percent * 100.0) as u64, 100); - } -} diff --git a/mediarepo-daemon/mediarepo-worker/src/scheduler.rs b/mediarepo-daemon/mediarepo-worker/src/scheduler.rs deleted file mode 100644 index 3303d97..0000000 --- a/mediarepo-daemon/mediarepo-worker/src/scheduler.rs +++ /dev/null @@ -1,182 +0,0 @@ -use crate::execution_state::JobExecutionState; -use crate::jobs::{ScheduledJob, VacuumJob}; -use crate::jobs_table::JobsTable; -use crate::progress::{JobProgressUpdate, ProgressSender}; -use crate::state_data::StateData; -use mediarepo_core::error::RepoResult; -use mediarepo_core::futures::select; -use mediarepo_core::settings::LogLevel::Debug; -use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; -use mediarepo_database::entities::job::JobType; -use mediarepo_logic::dao::repo::Repo; -use mediarepo_logic::dao::DaoProvider; -use mediarepo_logic::dto::JobDto; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::mpsc::{channel, Receiver, Sender}; -use tokio::sync::RwLock; - -#[derive(Clone)] -pub struct Scheduler { - repo: Repo, - state_data: Arc>>, - jobs_table: JobsTable, -} - -impl Scheduler { - pub fn new(repo: Repo) -> Self { - Self { - repo, - state_data: Default::default(), - jobs_table: Default::default(), - } - } - - pub fn run(self, subsystem: SubsystemHandle) -> JobsTable { - tokio::task::spawn({ - let subsystem = subsystem.clone(); - let scheduler = self.clone(); - async move { - scheduler.loop_save_states(subsystem).await; - } - }); - - let (tx, rx) = channel(32); - - tokio::task::spawn({ - let scheduler = self.clone(); - async move { - scheduler.loop_schedule(subsystem, tx).await; - } - }); - - let jobs_table = self.jobs_table.clone(); - - tokio::task::spawn(async move { self.update_on_progress(rx).await }); - - jobs_table - } - - async fn loop_schedule( - self, - subsystem: SubsystemHandle, - job_progress_sender: Sender, - ) { - loop { - if let Err(e) = self.schedule(&job_progress_sender).await { - tracing::error!("failed to schedule jobs: {}", e); - } - tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(1)) => {}, - _ = subsystem.on_shutdown_requested() => { - break; - } - } - } - } - - async fn schedule(&self, job_progress_sender: &Sender) -> RepoResult<()> { - let mut scheduled_jobs = self.repo.job().scheduled_for_now().await?; - let running_jobs = self.running_jobs().await; - - scheduled_jobs.retain(|j| !running_jobs.contains(&j.id())); - - for job in scheduled_jobs { - let mut sender = job_progress_sender.clone(); - let mut progress = JobProgressUpdate::new(job.id()); - let scheduled_job = create_job(job); - let _ = sender.send(progress.clone()).await; - let repo = self.repo.clone(); - - tokio::task::spawn(async move { - progress.set_state(JobExecutionState::Running); - let _ = sender.send(progress.clone()).await; - - let progress_sender = ProgressSender::new(progress.id(), sender); - if let Err(e) = scheduled_job.run(&progress_sender, repo).await { - tracing::error!("error occurred during job execution: {}", e); - } - let sender = progress_sender.inner; - progress.set_state(JobExecutionState::Finished); - let _ = sender.send(progress).await; - }); - } - - Ok(()) - } - - async fn loop_save_states(self, subsystem: SubsystemHandle) { - loop { - if let Err(e) = self.save_states().await { - tracing::error!("failed to save job state {}", e); - } - - tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(1)) => {}, - _ = subsystem.on_shutdown_requested() => { - let _ = self.save_states().await; - break; - } - } - } - } - - async fn save_states(&self) -> RepoResult<()> { - let mut changed_states = Vec::new(); - { - let states = self.state_data.read().await; - for state in &*states { - changed_states.append(&mut state.changed_states().await); - } - } - self.repo - .job() - .upsert_multiple_states(changed_states) - .await?; - - Ok(()) - } - - async fn update_on_progress(mut self, mut rx: Receiver) { - while let Some(progress) = rx.recv().await { - let mut jobs_table = self.jobs_table.write().await; - - if let JobExecutionState::Finished = progress.state() { - let mut state_data = self.state_data.write().await; - state_data.retain(|s| s.job_id() != progress.id()); - } - - if let Some(entry) = jobs_table.get_mut(&progress.id()) { - entry.last_update = Some(progress); - } - } - } - - async fn running_jobs(&self) -> Vec { - let jobs_table = self.jobs_table.read().await; - jobs_table - .values() - .filter_map(|v| v.last_update.as_ref()) - .filter(|u| u.state() != JobExecutionState::Finished) - .map(|u| u.id()) - .collect() - } -} - -fn create_job(dto: JobDto) -> Box { - match dto.job_type() { - JobType::MigrateCDs => { - todo!() - } - JobType::CalculateSizes => { - todo!() - } - JobType::GenerateThumbs => { - todo!() - } - JobType::CheckIntegrity => { - todo!() - } - JobType::Vacuum => Box::new(VacuumJob), - } -} diff --git a/mediarepo-daemon/mediarepo-worker/src/state_data.rs b/mediarepo-daemon/mediarepo-worker/src/state_data.rs deleted file mode 100644 index 7a9f5e7..0000000 --- a/mediarepo-daemon/mediarepo-worker/src/state_data.rs +++ /dev/null @@ -1,88 +0,0 @@ -use mediarepo_core::bincode; -use mediarepo_core::error::RepoResult; -use mediarepo_logic::dao::job::JobDao; -use mediarepo_logic::dto::UpsertJobStateDto; -use serde::de::DeserializeOwned; -use serde::Serialize; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use tokio::sync::RwLock; - -#[derive(Debug)] -pub struct StateData { - job_id: i64, - inner: Arc>>>, - changed_keys: Arc>>, -} - -impl StateData { - pub fn job_id(&self) -> i64 { - self.job_id - } - - /// Loads the state from the database - pub async fn load(job_dao: JobDao, job_id: i64) -> RepoResult { - let states = job_dao.states_for_job_id(job_id).await?; - let states_map = states - .into_iter() - .map(|s| (s.key().to_owned(), s.into_value())) - .collect::>>(); - - Ok(Self { - job_id, - inner: Arc::new(RwLock::new(states_map)), - changed_keys: Default::default(), - }) - } - - /// Returns the deserialized copy of a state object from the inner map - pub async fn entry, T: DeserializeOwned>(&self, key: S) -> RepoResult> { - let entries = self.inner.read().await; - let entry = entries.get(key.as_ref()); - - if let Some(bytes) = entry { - let value = bincode::deserialize(bytes)?; - - Ok(Some(value)) - } else { - Ok(None) - } - } - - /// Stores an entry in inner map - pub async fn store_entry(&self, key: String, value: &T) -> RepoResult<()> { - let entry_bytes = bincode::serialize(value)?; - let mut entries = self.inner.write().await; - entries.insert(key.clone(), entry_bytes); - let mut changed_entries = self.changed_keys.write().await; - changed_entries.insert(key); - - Ok(()) - } - - /// Returns a list of all changed state objects as an upsert list - pub async fn changed_states(&self) -> Vec { - let mut upsert_list = Vec::new(); - - { - let changed_keys = self.changed_keys.read().await; - let entries = self.inner.read().await; - - for key in &*changed_keys { - if let Some(value) = entries.get(key) { - upsert_list.push(UpsertJobStateDto { - job_id: self.job_id, - key: key.to_owned(), - value: value.clone(), - }); - } - } - } - { - let mut changed_keys = self.changed_keys.write().await; - changed_keys.clear(); - } - - upsert_list - } -} From 056166ee6051b25e1390df253118c3886b2f81ce Mon Sep 17 00:00:00 2001 From: trivernis Date: Sat, 12 Mar 2022 17:43:58 +0100 Subject: [PATCH 08/15] Integrate worker into main application Signed-off-by: trivernis --- mediarepo-daemon/Cargo.lock | 88 ++++++++++--------- mediarepo-daemon/Cargo.toml | 4 +- mediarepo-daemon/mediarepo-core/Cargo.toml | 7 +- mediarepo-daemon/mediarepo-core/src/lib.rs | 2 + .../mediarepo-core/src/type_list.rs | 46 ++++++++++ .../mediarepo-database/Cargo.toml | 2 +- .../20220225183244_add-jobs-tables.sql | 14 +-- .../src/entities/job_state.rs | 44 +++++++--- .../mediarepo-database/src/entities/mod.rs | 1 - mediarepo-daemon/mediarepo-logic/Cargo.toml | 3 +- .../mediarepo-logic/src/dao/job/mod.rs | 21 ----- .../mediarepo-logic/src/dao/job/state.rs | 14 ++- .../mediarepo-logic/src/dto/job_state.rs | 12 +-- .../mediarepo-logic/src/dto/mod.rs | 2 - .../mediarepo-logic/src/type_keys.rs | 3 +- mediarepo-daemon/mediarepo-socket/Cargo.toml | 2 +- mediarepo-daemon/mediarepo-socket/src/lib.rs | 26 ++---- mediarepo-daemon/mediarepo-worker/Cargo.toml | 4 +- .../mediarepo-worker/src/job_dispatcher.rs | 79 +++++++++++++++++ .../mediarepo-worker/src/jobs/mod.rs | 30 +++++-- .../mediarepo-worker/src/jobs/vacuum.rs | 25 +++--- mediarepo-daemon/mediarepo-worker/src/lib.rs | 31 +++++-- mediarepo-daemon/src/main.rs | 35 ++++---- 23 files changed, 317 insertions(+), 178 deletions(-) diff --git a/mediarepo-daemon/Cargo.lock b/mediarepo-daemon/Cargo.lock index cf12abc..3f7b0bc 100644 --- a/mediarepo-daemon/Cargo.lock +++ b/mediarepo-daemon/Cargo.lock @@ -63,9 +63,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.55" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "159bb86af3a200e19a068f4224eae4c8bb2d0fa054c7e5d1cacd5cef95e684cd" +checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27" [[package]] name = "arrayref" @@ -270,9 +270,8 @@ dependencies = [ [[package]] name = "bromine" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd7887995490657bf3ec578f39e747ef7b5355a8dc6c99b3d5be59ca70dc4d5" +version = "0.18.3" +source = "git+https://github.com/Trivernis/bromine#c2728a44ead210e1535bce5fc2d3979530700b96" dependencies = [ "async-trait", "bincode", @@ -301,9 +300,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439989e6b8c38d1b6570a384ef1e49c8848128f5a97f3914baef02920842712f" +checksum = "0e851ca7c24871e7336801608a4797d7376545b6928a10d32d75685687141ead" [[package]] name = "byteorder" @@ -709,9 +708,9 @@ dependencies = [ [[package]] name = "flume" -version = "0.10.11" +version = "0.10.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b279436a715a9de95dcd26b151db590a71961cc06e54918b24fe0dd5b7d3fc4" +checksum = "843c03199d0c0ca54bc1ea90ac0d507274c28abcc4f691ae8b4eaa375087c76a" dependencies = [ "futures-core", "futures-sink", @@ -885,7 +884,7 @@ dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -907,9 +906,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" +checksum = "62eeb471aa3e3c9197aa4bfeabfe02982f6dc96f750486c0bb0009ac58b26d2b" dependencies = [ "bytes", "fnv", @@ -1373,7 +1372,6 @@ dependencies = [ "serde", "tokio", "tracing", - "typemap_rev", ] [[package]] @@ -1465,14 +1463,15 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" +checksum = "7ba42135c6a5917b9db9cd7b293e5409e1c6b041e6f9825e92e55a894c63b6f8" dependencies = [ "libc", "log", "miow", "ntapi", + "wasi 0.11.0+wasi-snapshot-preview1", "winapi", ] @@ -1535,9 +1534,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "nanorand" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729eb334247daa1803e0a094d0a5c55711b85571179f5ec6e53eccfdf7008958" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ "getrandom", ] @@ -1644,18 +1643,18 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "720d3ea1055e4e4574c0c0b0f8c3fd4f24c4cdaf465948206dea090b57b526ad" +checksum = "cf5395665662ef45796a4ff5486c5d41d29e0c09640af4c5f17fd94ee2c119c9" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d992b768490d7fe0d8586d9b5745f6c49f557da6d81dc982b1d167ad4edbb21" +checksum = "3b0498641e53dd6ac1a4f22547548caa6864cc4933784319cd1775271c5a46ce" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.36", @@ -1665,18 +1664,18 @@ dependencies = [ [[package]] name = "num_threads" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ba99ba6393e2c3734791401b66902d981cb03bf190af674ca69949b6d5fb15" +checksum = "c539a50b93a303167eded6e8dff5220cd39447409fb659f4cd24b1f72fe4f133" dependencies = [ "libc", ] [[package]] name = "once_cell" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" [[package]] name = "opaque-debug" @@ -1947,9 +1946,9 @@ checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "png" -version = "0.17.3" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8f1882177b17c98ec33a51f5910ecbf4db92ca0def706781a1f8d0c661f393" +checksum = "dc38c0ad57efb786dd57b9864e5b18bae478c00c824dc55a38bbc9da95dde3ba" dependencies = [ "bitflags", "crc32fast", @@ -2149,18 +2148,18 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ "aho-corasick", "memchr", @@ -2809,7 +2808,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] @@ -3039,9 +3038,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6c650a8ef0cd2dd93736f033d21cbd1224c5a967aa0c258d00fcf7dafef9b9f" +checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" dependencies = [ "cfg-if 1.0.0", "log", @@ -3052,9 +3051,9 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94571df2eae3ed4353815ea5a90974a594a1792d8782ff2cbcc9392d1101f366" +checksum = "9ab026b18a46ac429e5c98bec10ca06424a97b3ad7b3949d9b4a102fff6623c4" dependencies = [ "crossbeam-channel", "time 0.3.7", @@ -3063,9 +3062,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8276d9a4a3a558d7b7ad5303ad50b53d58264641b82914b7ada36bd762e7a716" +checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" dependencies = [ "proc-macro2 1.0.36", "quote 1.0.15", @@ -3074,9 +3073,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" +checksum = "aa31669fa42c09c34d94d8165dd2012e8ff3c66aca50f3bb226b68f216f2706c" dependencies = [ "lazy_static", "valuable", @@ -3168,8 +3167,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typemap_rev" version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed5b74f0a24b5454580a79abb6994393b09adf0ab8070f15827cb666255de155" +source = "git+https://github.com/Trivernis/typemap_rev?rev=750c67bffe8024d2a47725daa473f068ad653fc4#750c67bffe8024d2a47725daa473f068ad653fc4" [[package]] name = "typenum" @@ -3305,6 +3303,12 @@ version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" version = "0.2.79" diff --git a/mediarepo-daemon/Cargo.toml b/mediarepo-daemon/Cargo.toml index fb92df1..cf21780 100644 --- a/mediarepo-daemon/Cargo.toml +++ b/mediarepo-daemon/Cargo.toml @@ -16,12 +16,12 @@ name = "mediarepo-daemon" path = "src/main.rs" [dependencies] -tracing = "0.1.31" +tracing = "0.1.32" toml = "0.5.8" structopt = "0.3.26" glob = "0.3.0" tracing-flame = "0.2.0" -tracing-appender = "0.2.0" +tracing-appender = "0.2.1" tracing-log = "0.1.2" rolling-file = "0.1.0" num-integer = "0.1.44" diff --git a/mediarepo-daemon/mediarepo-core/Cargo.toml b/mediarepo-daemon/mediarepo-core/Cargo.toml index 3016aee..d7af792 100644 --- a/mediarepo-daemon/mediarepo-core/Cargo.toml +++ b/mediarepo-daemon/mediarepo-core/Cargo.toml @@ -13,17 +13,20 @@ multibase = "0.9.1" base64 = "0.13.0" toml = "0.5.8" serde = "1.0.136" -typemap_rev = "0.1.5" futures = "0.3.21" itertools = "0.10.3" glob = "0.3.0" -tracing = "0.1.31" +tracing = "0.1.32" data-encoding = "2.3.2" tokio-graceful-shutdown = "0.4.3" thumbnailer = "0.4.0" bincode = "1.3.3" tracing-subscriber = "0.3.9" +[dependencies.typemap_rev] +git = "https://github.com/Trivernis/typemap_rev" +rev = "750c67bffe8024d2a47725daa473f068ad653fc4" + [dependencies.sea-orm] version = "0.6.0" default-features = false diff --git a/mediarepo-daemon/mediarepo-core/src/lib.rs b/mediarepo-daemon/mediarepo-core/src/lib.rs index e9a928c..d98957f 100644 --- a/mediarepo-daemon/mediarepo-core/src/lib.rs +++ b/mediarepo-daemon/mediarepo-core/src/lib.rs @@ -5,6 +5,7 @@ pub use mediarepo_api; pub use mediarepo_api::bromine; pub use thumbnailer; pub use tokio_graceful_shutdown; +pub use typemap_rev; pub mod content_descriptor; pub mod context; @@ -13,4 +14,5 @@ pub mod fs; pub mod settings; pub mod tracing_layer_list; pub mod type_keys; +pub mod type_list; pub mod utils; diff --git a/mediarepo-daemon/mediarepo-core/src/type_list.rs b/mediarepo-daemon/mediarepo-core/src/type_list.rs index e69de29..fbe913f 100644 --- a/mediarepo-daemon/mediarepo-core/src/type_list.rs +++ b/mediarepo-daemon/mediarepo-core/src/type_list.rs @@ -0,0 +1,46 @@ +use std::any::{Any, TypeId}; +use std::vec::IntoIter; +use typemap_rev::TypeMapKey; + +pub trait CloneAny: Any + Send + Sync { + fn clone_any(&self) -> Box; +} + +impl CloneAny for T { + fn clone_any(&self) -> Box { + Box::new(self.clone()) + } +} + +impl Clone for Box { + fn clone(&self) -> Self { + (**self).clone_any() + } +} + +#[derive(Default, Clone)] +pub struct TypeList(Vec<(TypeId, Box)>); + +impl TypeList { + pub fn add, C: CloneAny>(&mut self, value: T::Value) { + self.0.push((TypeId::of::(), Box::new(value))) + } +} + +impl IntoIterator for TypeList { + type Item = (TypeId, Box); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0 + .into_iter() + .map(|(t, v)| { + (t, unsafe { + // SAFETY: CloneAny requires types to be Any + Send + Sync (+ Clone) + std::mem::transmute::, Box>(v) + }) + }) + .collect::)>>() + .into_iter() + } +} diff --git a/mediarepo-daemon/mediarepo-database/Cargo.toml b/mediarepo-daemon/mediarepo-database/Cargo.toml index 6ed1944..df9387a 100644 --- a/mediarepo-daemon/mediarepo-database/Cargo.toml +++ b/mediarepo-daemon/mediarepo-database/Cargo.toml @@ -8,7 +8,7 @@ workspace = ".." [dependencies] chrono = "0.4.19" -tracing = "0.1.31" +tracing = "0.1.32" [dependencies.mediarepo-core] path = "../mediarepo-core" diff --git a/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql b/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql index b7438e9..2a65989 100644 --- a/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql +++ b/mediarepo-daemon/mediarepo-database/migrations/20220225183244_add-jobs-tables.sql @@ -1,15 +1,5 @@ -CREATE TABLE jobs ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - job_type INTEGER NOT NULL, - name VARCHAR(255), - next_run DATETIME, - interval INTEGER -); - CREATE TABLE job_states ( - job_id INTEGER, - key VARCHAR(128) NOT NULL DEFAULT 'default', + job_type INTEGER NOT NULL, value BLOB, - PRIMARY KEY (job_id, key), - FOREIGN KEY (job_id) REFERENCES jobs (id) + PRIMARY KEY (job_type) ); \ No newline at end of file diff --git a/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs b/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs index 8180a6c..01f3360 100644 --- a/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs +++ b/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs @@ -1,29 +1,45 @@ use sea_orm::prelude::*; +use sea_orm::TryFromU64; #[derive(Clone, Debug, PartialEq, DeriveEntityModel)] #[sea_orm(table_name = "namespaces")] pub struct Model { #[sea_orm(primary_key)] - pub job_id: i64, - #[sea_orm(primary_key)] - pub key: String, + pub job_type: JobType, pub value: Vec, } -#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] -pub enum Relation { - #[sea_orm( - belongs_to = "super::job::Entity", - from = "Column::JobId", - to = "super::job::Column::Id" - )] - Job, +#[derive(Clone, Copy, Debug, PartialEq, EnumIter, DeriveActiveEnum)] +#[sea_orm(rs_type = "u32", db_type = "Integer")] +pub enum JobType { + #[sea_orm(num_value = 10)] + MigrateCDs, + #[sea_orm(num_value = 20)] + CalculateSizes, + #[sea_orm(num_value = 30)] + GenerateThumbs, + #[sea_orm(num_value = 40)] + CheckIntegrity, + #[sea_orm(num_value = 50)] + Vacuum, } -impl Related for Entity { - fn to() -> RelationDef { - Relation::Job.def() +impl TryFromU64 for JobType { + fn try_from_u64(n: u64) -> Result { + let value = match n { + 10 => Self::MigrateCDs, + 20 => Self::CalculateSizes, + 30 => Self::GenerateThumbs, + 40 => Self::CheckIntegrity, + 50 => Self::Vacuum, + _ => return Err(DbErr::Custom(String::from("Invalid job type"))), + }; + + Ok(value) } } +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + impl ActiveModelBehavior for ActiveModel {} diff --git a/mediarepo-daemon/mediarepo-database/src/entities/mod.rs b/mediarepo-daemon/mediarepo-database/src/entities/mod.rs index a29c7b5..ca4af27 100644 --- a/mediarepo-daemon/mediarepo-database/src/entities/mod.rs +++ b/mediarepo-daemon/mediarepo-database/src/entities/mod.rs @@ -3,7 +3,6 @@ pub mod content_descriptor_source; pub mod content_descriptor_tag; pub mod file; pub mod file_metadata; -pub mod job; pub mod job_state; pub mod namespace; pub mod sort_key; diff --git a/mediarepo-daemon/mediarepo-logic/Cargo.toml b/mediarepo-daemon/mediarepo-logic/Cargo.toml index 018bcd4..7880acd 100644 --- a/mediarepo-daemon/mediarepo-logic/Cargo.toml +++ b/mediarepo-daemon/mediarepo-logic/Cargo.toml @@ -8,11 +8,10 @@ workspace = ".." [dependencies] chrono = "0.4.19" -typemap_rev = "0.1.5" serde = "1.0.136" mime_guess = "2.0.4" mime = "0.3.16" -tracing = "0.1.31" +tracing = "0.1.32" async-trait = "0.1.52" [dependencies.mediarepo-core] diff --git a/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs b/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs index 1e17ca9..b2077bc 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dao/job/mod.rs @@ -1,9 +1,4 @@ use crate::dao_provider; -use crate::dto::JobDto; -use chrono::Local; -use mediarepo_core::error::RepoResult; -use mediarepo_database::entities::job; -use sea_orm::prelude::*; pub mod generate_missing_thumbnails; pub mod migrate_content_descriptors; @@ -11,19 +6,3 @@ pub mod sqlite_operations; pub mod state; dao_provider!(JobDao); - -impl JobDao { - /// Returns a list of all jobs that are scheduled (have a next_run date) - pub async fn scheduled_for_now(&self) -> RepoResult> { - let jobs = job::Entity::find() - .filter(job::Column::NextRun.is_not_null()) - .filter(job::Column::NextRun.lt(Local::now().naive_local())) - .all(&self.ctx.db) - .await? - .into_iter() - .map(JobDto::new) - .collect(); - - Ok(jobs) - } -} diff --git a/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs index 8370305..f125d5e 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs @@ -2,15 +2,16 @@ use crate::dao::job::JobDao; use crate::dto::{JobStateDto, UpsertJobStateDto}; use mediarepo_core::error::RepoResult; use mediarepo_database::entities::job_state; +use mediarepo_database::entities::job_state::JobType; use sea_orm::prelude::*; use sea_orm::ActiveValue::Set; use sea_orm::{Condition, TransactionTrait}; impl JobDao { /// Returns all job states for a given job id - pub async fn states_for_job_id(&self, job_id: i64) -> RepoResult> { + pub async fn states_for_job_type(&self, job_type: JobType) -> RepoResult> { let states = job_state::Entity::find() - .filter(job_state::Column::JobId.eq(job_id)) + .filter(job_state::Column::JobType.eq(job_type)) .all(&self.ctx.db) .await? .into_iter() @@ -40,11 +41,7 @@ impl JobDao { fn build_state_filters(states: &Vec) -> Condition { states .iter() - .map(|s| { - Condition::all() - .add(job_state::Column::JobId.eq(s.job_id)) - .add(job_state::Column::Key.eq(s.key.to_owned())) - }) + .map(|s| Condition::all().add(job_state::Column::JobType.eq(s.job_type))) .fold(Condition::any(), |acc, cond| acc.add(cond)) } @@ -52,8 +49,7 @@ fn build_active_state_models(states: Vec) -> Vec i64 { - self.model.job_id - } - - pub fn key(&self) -> &String { - &self.model.key + pub fn job_type(&self) -> JobType { + self.model.job_type } pub fn value(&self) -> &[u8] { @@ -29,7 +26,6 @@ impl JobStateDto { #[derive(Clone, Debug)] pub struct UpsertJobStateDto { - pub job_id: i64, - pub key: String, + pub job_type: JobType, pub value: Vec, } diff --git a/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs b/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs index c1285fa..638fe32 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dto/mod.rs @@ -1,6 +1,5 @@ pub use file::*; pub use file_metadata::*; -pub use job::*; pub use job_state::*; pub use namespace::*; pub use sorting_preset::*; @@ -9,7 +8,6 @@ pub use thumbnail::*; mod file; mod file_metadata; -mod job; mod job_state; mod namespace; mod sorting_preset; diff --git a/mediarepo-daemon/mediarepo-logic/src/type_keys.rs b/mediarepo-daemon/mediarepo-logic/src/type_keys.rs index bd12d0c..649405e 100644 --- a/mediarepo-daemon/mediarepo-logic/src/type_keys.rs +++ b/mediarepo-daemon/mediarepo-logic/src/type_keys.rs @@ -1,8 +1,7 @@ use std::sync::Arc; -use typemap_rev::TypeMapKey; - use crate::dao::repo::Repo; +use mediarepo_core::typemap_rev::TypeMapKey; pub struct RepoKey; diff --git a/mediarepo-daemon/mediarepo-socket/Cargo.toml b/mediarepo-daemon/mediarepo-socket/Cargo.toml index 3911bc4..5f5b75a 100644 --- a/mediarepo-daemon/mediarepo-socket/Cargo.toml +++ b/mediarepo-daemon/mediarepo-socket/Cargo.toml @@ -8,7 +8,7 @@ workspace = ".." [dependencies] serde = "1.0.136" -tracing = "0.1.31" +tracing = "0.1.32" compare = "0.1.0" port_check = "0.1.5" rayon = "1.5.1" diff --git a/mediarepo-daemon/mediarepo-socket/src/lib.rs b/mediarepo-daemon/mediarepo-socket/src/lib.rs index b8efdfe..de4847a 100644 --- a/mediarepo-daemon/mediarepo-socket/src/lib.rs +++ b/mediarepo-daemon/mediarepo-socket/src/lib.rs @@ -1,6 +1,4 @@ use std::net::SocketAddr; -use std::path::PathBuf; -use std::sync::Arc; use tokio::net::TcpListener; use tokio::task::JoinHandle; @@ -10,20 +8,18 @@ use mediarepo_core::error::{RepoError, RepoResult}; use mediarepo_core::mediarepo_api::types::misc::InfoResponse; use mediarepo_core::settings::{PortSetting, Settings}; use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; -use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey, SubsystemKey}; -use mediarepo_logic::dao::repo::Repo; -use mediarepo_logic::type_keys::RepoKey; +use mediarepo_core::type_keys::{SizeMetadataKey, SubsystemKey}; +use mediarepo_core::type_list::TypeList; mod from_model; mod namespaces; mod utils; -#[tracing::instrument(skip(subsystem, settings, repo))] +#[tracing::instrument(skip_all)] pub fn start_tcp_server( subsystem: SubsystemHandle, - repo_path: PathBuf, settings: Settings, - repo: Repo, + shared_data: TypeList, ) -> RepoResult<(String, JoinHandle<()>)> { let port = match &settings.server.tcp.port { PortSetting::Fixed(p) => { @@ -45,9 +41,7 @@ pub fn start_tcp_server( .spawn(async move { get_builder::(address) .insert::(subsystem) - .insert::(Arc::new(repo)) - .insert::(settings) - .insert::(repo_path) + .insert_all(shared_data) .insert::(Default::default()) .build_server() .await @@ -58,13 +52,11 @@ pub fn start_tcp_server( } #[cfg(unix)] -#[tracing::instrument(skip(subsystem, settings, repo))] +#[tracing::instrument(skip_all)] pub fn create_unix_socket( subsystem: SubsystemHandle, path: std::path::PathBuf, - repo_path: PathBuf, - settings: Settings, - repo: Repo, + shared_data: TypeList, ) -> RepoResult> { use std::fs; use tokio::net::UnixListener; @@ -77,9 +69,7 @@ pub fn create_unix_socket( .spawn(async move { get_builder::(path) .insert::(subsystem) - .insert::(Arc::new(repo)) - .insert::(settings) - .insert::(repo_path) + .insert_all(shared_data) .insert::(Default::default()) .build_server() .await diff --git a/mediarepo-daemon/mediarepo-worker/Cargo.toml b/mediarepo-daemon/mediarepo-worker/Cargo.toml index d448ff5..abebc5e 100644 --- a/mediarepo-daemon/mediarepo-worker/Cargo.toml +++ b/mediarepo-daemon/mediarepo-worker/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] async-trait = "0.1.52" -tracing = "0.1.31" +tracing = "0.1.32" [dependencies.mediarepo-core] path = "../mediarepo-core" @@ -28,4 +28,4 @@ features = ["serde"] [dependencies.serde] version = "1.0.136" -features = ["derive"] \ No newline at end of file +features = ["derive"] diff --git a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs index e69de29..41c1c22 100644 --- a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs +++ b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs @@ -0,0 +1,79 @@ +use crate::jobs::{Job, JobTypeKey}; +use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; +use mediarepo_core::typemap_rev::{TypeMap, TypeMapKey}; +use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dao::DaoProvider; +use std::cell::UnsafeCell; +use std::sync::Arc; +use tokio::sync::RwLock; + +#[derive(Clone)] +pub struct JobDispatcher { + subsystem: Arc>, + job_status_map: Arc>, + repo: Arc, +} + +impl JobDispatcher { + pub fn new(subsystem: SubsystemHandle, repo: Repo) -> Self { + Self { + job_status_map: Default::default(), + subsystem: Arc::new(UnsafeCell::new(subsystem)), + repo: Arc::new(repo), + } + } + + #[tracing::instrument(level = "debug", skip_all)] + pub async fn dispatch(&self, job: T) -> Arc> { + let status = job.status(); + self.add_status::>(status.clone()).await; + + let subsystem = unsafe { + // SAFETY: the subsystem requires a mutable borrow for the start method + // the implementation of start doesn't need that mutability. So until that's + // changed we have to do some trickery. + &mut *self.subsystem.get() + }; + + let repo = self.repo.clone(); + + subsystem.start("worker-job", |subsystem| async move { + let job_2 = job.clone(); + let result = tokio::select! { + _ = subsystem.on_shutdown_requested() => { + job_2.save_status(repo.job()).await + } + r = job.run(repo.clone()) => { + + if let Err(e) = r { + Err(e) + } else { + job.save_status(repo.job()).await + } + } + }; + if let Err(e) = result { + tracing::error!("job failed with error: {}", e); + } + + Ok(()) + }); + + status + } + + #[inline] + async fn add_status(&self, status: T::Value) { + let mut status_map = self.job_status_map.write().await; + status_map.insert::(status); + } +} + +pub struct DispatcherKey; + +impl TypeMapKey for DispatcherKey { + type Value = JobDispatcher; +} + +unsafe impl Send for JobDispatcher {} +unsafe impl Sync for JobDispatcher {} diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index 4f42740..8b34449 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -1,18 +1,34 @@ mod vacuum; +use std::marker::PhantomData; +use std::sync::Arc; pub use vacuum::*; -use crate::execution_state::JobExecutionState; -use crate::progress::{JobProgressUpdate, ProgressSender}; -use crate::state_data::StateData; use async_trait::async_trait; use mediarepo_core::error::RepoResult; +use mediarepo_core::typemap_rev::TypeMapKey; +use mediarepo_logic::dao::job::JobDao; use mediarepo_logic::dao::repo::Repo; -use tokio::sync::mpsc::Sender; +use tokio::sync::RwLock; + +type EmptyStatus = Arc>; #[async_trait] -pub trait ScheduledJob { - async fn set_state(&self, state: StateData) -> RepoResult<()>; +pub trait Job: Clone + Send + Sync { + type JobStatus: Send + Sync; + + fn status(&self) -> Arc>; + + async fn run(&self, repo: Arc) -> RepoResult<()>; + + async fn save_status(&self, job_dao: JobDao) -> RepoResult<()>; +} + +pub struct JobTypeKey(PhantomData); - async fn run(&self, sender: &ProgressSender, repo: Repo) -> RepoResult<()>; +impl TypeMapKey for JobTypeKey +where + T: Job, +{ + type Value = Arc>; } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs index 7527d85..0be7d76 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs @@ -1,29 +1,32 @@ -use crate::execution_state::{ExecutionStateSynchronizer, JobExecutionState}; -use crate::jobs::ScheduledJob; -use crate::progress::{JobProgressUpdate, ProgressSender}; -use crate::state_data::StateData; +use crate::jobs::{EmptyStatus, Job}; use async_trait::async_trait; use mediarepo_core::error::RepoResult; +use mediarepo_logic::dao::job::JobDao; use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::dao::DaoProvider; use std::sync::Arc; -use tokio::sync::mpsc::Sender; use tokio::sync::RwLock; #[derive(Default, Clone)] pub struct VacuumJob; #[async_trait] -impl ScheduledJob for VacuumJob { - async fn set_state(&self, _: StateData) -> RepoResult<()> { - Ok(()) +impl Job for VacuumJob { + type JobStatus = (); + + fn status(&self) -> Arc> { + EmptyStatus::default() } - async fn run(&self, sender: &ProgressSender, repo: Repo) -> RepoResult<()> { - sender.send_progress_percent(0.0); + #[tracing::instrument(level = "debug", skip_all)] + async fn run(&self, repo: Arc) -> RepoResult<()> { repo.job().vacuum().await?; - sender.send_progress_percent(1.0); Ok(()) } + + #[tracing::instrument(level = "debug", skip_all)] + async fn save_status(&self, _: JobDao) -> RepoResult<()> { + Ok(()) + } } diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index 8044986..b72a922 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -1,6 +1,27 @@ -pub mod execution_state; +use crate::job_dispatcher::JobDispatcher; +use crate::jobs::VacuumJob; +use mediarepo_core::error::RepoError; +use mediarepo_core::tokio_graceful_shutdown::Toplevel; +use mediarepo_logic::dao::repo::Repo; +use tokio::sync::oneshot::channel; + +pub mod job_dispatcher; pub mod jobs; -pub mod jobs_table; -pub mod progress; -pub mod scheduler; -pub mod state_data; + +pub async fn start(top_level: Toplevel, repo: Repo) -> (Toplevel, JobDispatcher) { + let (tx, rx) = channel(); + + let top_level = top_level.start("mediarepo-worker", |subsystem| async move { + let dispatcher = JobDispatcher::new(subsystem, repo); + tx.send(dispatcher.clone()) + .map_err(|_| RepoError::from("failed to send dispatcher"))?; + dispatcher.dispatch(VacuumJob::default()).await; + + Ok(()) + }); + let receiver = rx + .await + .expect("failed to create background job dispatcher"); + + (top_level, receiver) +} diff --git a/mediarepo-daemon/src/main.rs b/mediarepo-daemon/src/main.rs index f633bb4..dc4b950 100644 --- a/mediarepo-daemon/src/main.rs +++ b/mediarepo-daemon/src/main.rs @@ -1,5 +1,6 @@ use std::env; use std::path::PathBuf; +use std::sync::Arc; use std::time::Duration; use structopt::StructOpt; @@ -10,8 +11,12 @@ use mediarepo_core::error::RepoResult; use mediarepo_core::fs::drop_file::DropFile; use mediarepo_core::settings::{PathSettings, Settings}; use mediarepo_core::tokio_graceful_shutdown::{SubsystemHandle, Toplevel}; +use mediarepo_core::type_keys::{RepoPathKey, SettingsKey}; +use mediarepo_core::type_list::TypeList; use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::type_keys::RepoKey; use mediarepo_socket::start_tcp_server; +use mediarepo_worker::job_dispatcher::DispatcherKey; use crate::utils::{create_paths_for_repo, get_repo, load_settings}; @@ -99,18 +104,23 @@ async fn init_repo(opt: &Opt, paths: &PathSettings) -> RepoResult { /// Starts the server async fn start_server(opt: Opt, settings: Settings) -> RepoResult<()> { let repo = init_repo(&opt, &settings.paths).await?; - let mut top_level = Toplevel::new(); + let (mut top_level, dispatcher) = mediarepo_worker::start(Toplevel::new(), repo.clone()).await; + + let mut shared_data = TypeList::default(); + shared_data.add::(Arc::new(repo)); + shared_data.add::(settings.clone()); + shared_data.add::(opt.repo.clone()); + shared_data.add::(dispatcher); #[cfg(unix)] { if settings.server.unix_socket.enabled { - let settings = settings.clone(); let repo_path = opt.repo.clone(); - let repo = repo.clone(); + let shared_data = shared_data.clone(); top_level = top_level.start("mediarepo-unix-socket", |subsystem| { Box::pin(async move { - start_and_await_unix_socket(subsystem, repo_path, settings, repo).await?; + start_and_await_unix_socket(subsystem, repo_path, shared_data).await?; Ok(()) }) }) @@ -120,7 +130,7 @@ async fn start_server(opt: Opt, settings: Settings) -> RepoResult<()> { if settings.server.tcp.enabled { top_level = top_level.start("mediarepo-tcp", move |subsystem| { Box::pin(async move { - start_and_await_tcp_server(subsystem, opt.repo, settings, repo).await?; + start_and_await_tcp_server(subsystem, opt.repo, settings, shared_data).await?; Ok(()) }) @@ -147,9 +157,9 @@ async fn start_and_await_tcp_server( subsystem: SubsystemHandle, repo_path: PathBuf, settings: Settings, - repo: Repo, + shared_data: TypeList, ) -> RepoResult<()> { - let (address, handle) = start_tcp_server(subsystem.clone(), repo_path.clone(), settings, repo)?; + let (address, handle) = start_tcp_server(subsystem.clone(), settings, shared_data)?; let (mut file, _guard) = DropFile::new(repo_path.join("repo.tcp")).await?; file.write_all(&address.into_bytes()).await?; @@ -172,17 +182,10 @@ async fn start_and_await_tcp_server( async fn start_and_await_unix_socket( subsystem: SubsystemHandle, repo_path: PathBuf, - settings: Settings, - repo: Repo, + shared_data: TypeList, ) -> RepoResult<()> { let socket_path = repo_path.join("repo.sock"); - let handle = mediarepo_socket::create_unix_socket( - subsystem.clone(), - socket_path, - repo_path.clone(), - settings, - repo, - )?; + let handle = mediarepo_socket::create_unix_socket(subsystem.clone(), socket_path, shared_data)?; let _guard = DropFile::from_path(repo_path.join("repo.sock")); tokio::select! { From a57a6f32c495cd45f57536ae740b7a3bb2fdf785 Mon Sep 17 00:00:00 2001 From: trivernis Date: Sat, 12 Mar 2022 17:54:59 +0100 Subject: [PATCH 09/15] Implement periodic job dispatching Signed-off-by: trivernis --- .../mediarepo-worker/src/job_dispatcher.rs | 60 ++++++++++++++----- mediarepo-daemon/mediarepo-worker/src/lib.rs | 5 +- 2 files changed, 49 insertions(+), 16 deletions(-) diff --git a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs index 41c1c22..03f4c61 100644 --- a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs +++ b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs @@ -5,7 +5,9 @@ use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::dao::DaoProvider; use std::cell::UnsafeCell; use std::sync::Arc; +use std::time::Duration; use tokio::sync::RwLock; +use tokio::time::Instant; #[derive(Clone)] pub struct JobDispatcher { @@ -23,8 +25,24 @@ impl JobDispatcher { } } - #[tracing::instrument(level = "debug", skip_all)] pub async fn dispatch(&self, job: T) -> Arc> { + self._dispatch(job, None).await + } + + pub async fn dispatch_periodically( + &self, + job: T, + interval: Duration, + ) -> Arc> { + self._dispatch(job, Some(interval)).await + } + + #[tracing::instrument(level = "debug", skip_all)] + async fn _dispatch( + &self, + job: T, + interval: Option, + ) -> Arc> { let status = job.status(); self.add_status::>(status.clone()).await; @@ -37,23 +55,35 @@ impl JobDispatcher { let repo = self.repo.clone(); - subsystem.start("worker-job", |subsystem| async move { - let job_2 = job.clone(); - let result = tokio::select! { - _ = subsystem.on_shutdown_requested() => { - job_2.save_status(repo.job()).await - } - r = job.run(repo.clone()) => { + subsystem.start("worker-job", move |subsystem| async move { + loop { + let start = Instant::now(); + let job_2 = job.clone(); + let result = tokio::select! { + _ = subsystem.on_shutdown_requested() => { + job_2.save_status(repo.job()).await + } + r = job.run(repo.clone()) => { - if let Err(e) = r { - Err(e) - } else { - job.save_status(repo.job()).await + if let Err(e) = r { + Err(e) + } else { + job.save_status(repo.job()).await + } + } + }; + if let Err(e) = result { + tracing::error!("job failed with error: {}", e); + } + if let Some(interval) = interval { + let sleep_duration = interval - start.elapsed(); + tokio::select! { + _ = tokio::time::sleep(sleep_duration) => {}, + _ = subsystem.on_shutdown_requested() => {break} } + } else { + break; } - }; - if let Err(e) = result { - tracing::error!("job failed with error: {}", e); } Ok(()) diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index b72a922..a362087 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -3,6 +3,7 @@ use crate::jobs::VacuumJob; use mediarepo_core::error::RepoError; use mediarepo_core::tokio_graceful_shutdown::Toplevel; use mediarepo_logic::dao::repo::Repo; +use std::time::Duration; use tokio::sync::oneshot::channel; pub mod job_dispatcher; @@ -15,7 +16,9 @@ pub async fn start(top_level: Toplevel, repo: Repo) -> (Toplevel, JobDispatcher) let dispatcher = JobDispatcher::new(subsystem, repo); tx.send(dispatcher.clone()) .map_err(|_| RepoError::from("failed to send dispatcher"))?; - dispatcher.dispatch(VacuumJob::default()).await; + dispatcher + .dispatch_periodically(VacuumJob::default(), Duration::from_secs(60 * 30)) + .await; Ok(()) }); From 2f11c873951b369f20c74406aba2892f519a95c7 Mon Sep 17 00:00:00 2001 From: trivernis Date: Sat, 12 Mar 2022 18:32:22 +0100 Subject: [PATCH 10/15] Add calculate_sizes implementation as dispatchable job Signed-off-by: trivernis --- mediarepo-daemon/Cargo.lock | 1 + mediarepo-daemon/mediarepo-socket/Cargo.toml | 3 + .../mediarepo-socket/src/namespaces/jobs.rs | 35 +++++--- .../mediarepo-socket/src/namespaces/repo.rs | 4 +- .../mediarepo-socket/src/utils.rs | 37 ++------ .../mediarepo-worker/src/job_dispatcher.rs | 12 +-- .../src/jobs/calculate_sizes.rs | 90 +++++++++++++++++++ .../mediarepo-worker/src/jobs/mod.rs | 13 ++- .../mediarepo-worker/src/jobs/vacuum.rs | 10 +-- mediarepo-daemon/mediarepo-worker/src/lib.rs | 1 + .../mediarepo-worker/src/status_utils.rs | 34 +++++++ 11 files changed, 178 insertions(+), 62 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs create mode 100644 mediarepo-daemon/mediarepo-worker/src/status_utils.rs diff --git a/mediarepo-daemon/Cargo.lock b/mediarepo-daemon/Cargo.lock index 3f7b0bc..4596f26 100644 --- a/mediarepo-daemon/Cargo.lock +++ b/mediarepo-daemon/Cargo.lock @@ -1383,6 +1383,7 @@ dependencies = [ "mediarepo-core", "mediarepo-database", "mediarepo-logic", + "mediarepo-worker", "port_check", "rayon", "serde", diff --git a/mediarepo-daemon/mediarepo-socket/Cargo.toml b/mediarepo-daemon/mediarepo-socket/Cargo.toml index 5f5b75a..b498f81 100644 --- a/mediarepo-daemon/mediarepo-socket/Cargo.toml +++ b/mediarepo-daemon/mediarepo-socket/Cargo.toml @@ -22,6 +22,9 @@ path = "../mediarepo-database" [dependencies.mediarepo-logic] path = "../mediarepo-logic" +[dependencies.mediarepo-worker] +path = "../mediarepo-worker" + [dependencies.tokio] version = "1.17.0" features = ["net"] diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs index c912b70..0fe150c 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs @@ -1,11 +1,11 @@ use mediarepo_core::bromine::prelude::*; use mediarepo_core::error::RepoResult; use mediarepo_core::mediarepo_api::types::jobs::{JobType, RunJobRequest}; -use mediarepo_core::mediarepo_api::types::repo::SizeType; -use mediarepo_core::type_keys::SizeMetadataKey; +use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey}; use mediarepo_logic::dao::DaoProvider; +use mediarepo_worker::jobs::{CalculateSizesJob, VacuumJob}; -use crate::utils::{calculate_size, get_repo_from_context}; +use crate::utils::{get_job_dispatcher_from_context, get_repo_from_context}; pub struct JobsNamespace; @@ -26,6 +26,7 @@ impl JobsNamespace { pub async fn run_job(ctx: &Context, event: Event) -> IPCResult { let run_request = event.payload::()?; let job_dao = get_repo_from_context(ctx).await.job(); + let dispatcher = get_job_dispatcher_from_context(ctx).await; if !run_request.sync { // early response to indicate that the job will be run @@ -36,7 +37,9 @@ impl JobsNamespace { JobType::MigrateContentDescriptors => job_dao.migrate_content_descriptors().await?, JobType::CalculateSizes => calculate_all_sizes(ctx).await?, JobType::CheckIntegrity => job_dao.check_integrity().await?, - JobType::Vacuum => job_dao.vacuum().await?, + JobType::Vacuum => { + dispatcher.dispatch(VacuumJob::default()).await; + } JobType::GenerateThumbnails => job_dao.generate_missing_thumbnails().await?, } @@ -45,14 +48,22 @@ impl JobsNamespace { } async fn calculate_all_sizes(ctx: &Context) -> RepoResult<()> { - let size_types = vec![ - SizeType::Total, - SizeType::FileFolder, - SizeType::ThumbFolder, - SizeType::DatabaseFile, - ]; - for size_type in size_types { - let size = calculate_size(&size_type, ctx).await?; + let (repo_path, settings) = { + let data = ctx.data.read().await; + ( + data.get::().unwrap().clone(), + data.get::().unwrap().clone(), + ) + }; + let job = CalculateSizesJob::new(repo_path, settings); + let dispatcher = get_job_dispatcher_from_context(ctx).await; + let state = dispatcher.dispatch(job).await; + let mut rx = { + let state = state.read().await; + state.sizes_channel.subscribe() + }; + + while let Ok((size_type, size)) = rx.recv().await { let mut data = ctx.data.write().await; let size_map = data.get_mut::().unwrap(); size_map.insert(size_type, size); diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs index 953f0a3..d1516c2 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs @@ -8,7 +8,7 @@ use mediarepo_core::mediarepo_api::types::repo::{ }; use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey}; -use crate::utils::{calculate_size, get_repo_from_context}; +use crate::utils::get_repo_from_context; pub struct RepoNamespace; @@ -56,7 +56,7 @@ impl RepoNamespace { let size = if let Some(size) = size_cache.get(&size_type) { *size } else { - calculate_size(&size_type, ctx).await? + 0 }; ctx.response(SizeMetadata { size, size_type }) diff --git a/mediarepo-daemon/mediarepo-socket/src/utils.rs b/mediarepo-daemon/mediarepo-socket/src/utils.rs index 5e48cdc..e61681a 100644 --- a/mediarepo-daemon/mediarepo-socket/src/utils.rs +++ b/mediarepo-daemon/mediarepo-socket/src/utils.rs @@ -1,18 +1,14 @@ use std::sync::Arc; -use tokio::fs; - use mediarepo_core::bromine::ipc::context::Context; use mediarepo_core::content_descriptor::decode_content_descriptor; use mediarepo_core::error::{RepoError, RepoResult}; use mediarepo_core::mediarepo_api::types::identifier::FileIdentifier; -use mediarepo_core::mediarepo_api::types::repo::SizeType; -use mediarepo_core::type_keys::{RepoPathKey, SettingsKey}; -use mediarepo_core::utils::get_folder_size; -use mediarepo_logic::dao::DaoProvider; use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dao::DaoProvider; use mediarepo_logic::dto::FileDto; use mediarepo_logic::type_keys::RepoKey; +use mediarepo_worker::job_dispatcher::{DispatcherKey, JobDispatcher}; pub async fn get_repo_from_context(ctx: &Context) -> Arc { let data = ctx.data.read().await; @@ -20,6 +16,11 @@ pub async fn get_repo_from_context(ctx: &Context) -> Arc { Arc::clone(repo) } +pub async fn get_job_dispatcher_from_context(ctx: &Context) -> JobDispatcher { + let data = ctx.data.read().await; + data.get::().unwrap().clone() +} + pub async fn file_by_identifier(identifier: FileIdentifier, repo: &Repo) -> RepoResult { let file = match identifier { FileIdentifier::ID(id) => repo.file().by_id(id).await, @@ -41,27 +42,3 @@ pub async fn cd_by_identifier(identifier: FileIdentifier, repo: &Repo) -> RepoRe FileIdentifier::CD(cd) => decode_content_descriptor(cd), } } - -pub async fn calculate_size(size_type: &SizeType, ctx: &Context) -> RepoResult { - let repo = get_repo_from_context(ctx).await; - let (repo_path, settings) = { - let data = ctx.data.read().await; - ( - data.get::().unwrap().clone(), - data.get::().unwrap().clone(), - ) - }; - let size = match &size_type { - SizeType::Total => get_folder_size(repo_path).await?, - SizeType::FileFolder => repo.get_main_store_size().await?, - SizeType::ThumbFolder => repo.get_thumb_store_size().await?, - SizeType::DatabaseFile => { - let db_path = settings.paths.db_file_path(&repo_path); - - let database_metadata = fs::metadata(db_path).await?; - database_metadata.len() - } - }; - - Ok(size) -} diff --git a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs index 03f4c61..869ea77 100644 --- a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs +++ b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs @@ -25,7 +25,7 @@ impl JobDispatcher { } } - pub async fn dispatch(&self, job: T) -> Arc> { + pub async fn dispatch(&self, job: T) -> Arc> { self._dispatch(job, None).await } @@ -33,7 +33,7 @@ impl JobDispatcher { &self, job: T, interval: Duration, - ) -> Arc> { + ) -> Arc> { self._dispatch(job, Some(interval)).await } @@ -42,8 +42,8 @@ impl JobDispatcher { &self, job: T, interval: Option, - ) -> Arc> { - let status = job.status(); + ) -> Arc> { + let status = job.state(); self.add_status::>(status.clone()).await; let subsystem = unsafe { @@ -61,14 +61,14 @@ impl JobDispatcher { let job_2 = job.clone(); let result = tokio::select! { _ = subsystem.on_shutdown_requested() => { - job_2.save_status(repo.job()).await + job_2.save_state(repo.job()).await } r = job.run(repo.clone()) => { if let Err(e) = r { Err(e) } else { - job.save_status(repo.job()).await + job.save_state(repo.job()).await } } }; diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs new file mode 100644 index 0000000..6f13a60 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs @@ -0,0 +1,90 @@ +use crate::jobs::Job; +use crate::status_utils::SimpleProgress; +use async_trait::async_trait; +use mediarepo_core::error::{RepoError, RepoResult}; +use mediarepo_core::mediarepo_api::types::repo::SizeType; +use mediarepo_core::settings::Settings; +use mediarepo_core::utils::get_folder_size; +use mediarepo_logic::dao::repo::Repo; +use std::path::PathBuf; +use std::sync::Arc; +use tokio::fs; +use tokio::sync::broadcast::{self, Sender}; +use tokio::sync::RwLock; + +pub struct CalculateSizesState { + pub progress: SimpleProgress, + pub sizes_channel: Sender<(SizeType, u64)>, +} + +#[derive(Clone)] +pub struct CalculateSizesJob { + repo_path: PathBuf, + settings: Settings, + state: Arc>, +} + +impl CalculateSizesJob { + pub fn new(repo_path: PathBuf, settings: Settings) -> Self { + let (tx, _) = broadcast::channel(4); + Self { + repo_path, + settings, + state: Arc::new(RwLock::new(CalculateSizesState { + sizes_channel: tx, + progress: SimpleProgress::new(4), + })), + } + } +} + +#[async_trait] +impl Job for CalculateSizesJob { + type JobState = CalculateSizesState; + + fn state(&self) -> Arc> { + self.state.clone() + } + + #[tracing::instrument(level = "debug", skip_all)] + async fn run(&self, repo: Arc) -> RepoResult<()> { + let size_types = vec![ + SizeType::Total, + SizeType::FileFolder, + SizeType::ThumbFolder, + SizeType::DatabaseFile, + ]; + for size_type in size_types { + let size = calculate_size(&size_type, &repo, &self.repo_path, &self.settings).await?; + let mut state = self.state.write().await; + state + .sizes_channel + .send((size_type, size)) + .map_err(|_| RepoError::from("failed to broadcast new size"))?; + state.progress.tick(); + } + + Ok(()) + } +} + +async fn calculate_size( + size_type: &SizeType, + repo: &Repo, + repo_path: &PathBuf, + settings: &Settings, +) -> RepoResult { + let size = match &size_type { + SizeType::Total => get_folder_size(repo_path.clone()).await?, + SizeType::FileFolder => repo.get_main_store_size().await?, + SizeType::ThumbFolder => repo.get_thumb_store_size().await?, + SizeType::DatabaseFile => { + let db_path = settings.paths.db_file_path(repo_path); + + let database_metadata = fs::metadata(db_path).await?; + database_metadata.len() + } + }; + + Ok(size) +} diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index 8b34449..607ea62 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -1,5 +1,7 @@ +mod calculate_sizes; mod vacuum; +pub use calculate_sizes::*; use std::marker::PhantomData; use std::sync::Arc; pub use vacuum::*; @@ -15,13 +17,16 @@ type EmptyStatus = Arc>; #[async_trait] pub trait Job: Clone + Send + Sync { - type JobStatus: Send + Sync; + type JobState: Send + Sync; - fn status(&self) -> Arc>; + fn state(&self) -> Arc>; async fn run(&self, repo: Arc) -> RepoResult<()>; - async fn save_status(&self, job_dao: JobDao) -> RepoResult<()>; + #[tracing::instrument(level = "debug", skip_all)] + async fn save_state(&self, _job_dao: JobDao) -> RepoResult<()> { + Ok(()) + } } pub struct JobTypeKey(PhantomData); @@ -30,5 +35,5 @@ impl TypeMapKey for JobTypeKey where T: Job, { - type Value = Arc>; + type Value = Arc>; } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs index 0be7d76..5335de6 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs @@ -1,7 +1,6 @@ use crate::jobs::{EmptyStatus, Job}; use async_trait::async_trait; use mediarepo_core::error::RepoResult; -use mediarepo_logic::dao::job::JobDao; use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::dao::DaoProvider; use std::sync::Arc; @@ -12,9 +11,9 @@ pub struct VacuumJob; #[async_trait] impl Job for VacuumJob { - type JobStatus = (); + type JobState = (); - fn status(&self) -> Arc> { + fn state(&self) -> Arc> { EmptyStatus::default() } @@ -24,9 +23,4 @@ impl Job for VacuumJob { Ok(()) } - - #[tracing::instrument(level = "debug", skip_all)] - async fn save_status(&self, _: JobDao) -> RepoResult<()> { - Ok(()) - } } diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index a362087..617eb4f 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -8,6 +8,7 @@ use tokio::sync::oneshot::channel; pub mod job_dispatcher; pub mod jobs; +pub mod status_utils; pub async fn start(top_level: Toplevel, repo: Repo) -> (Toplevel, JobDispatcher) { let (tx, rx) = channel(); diff --git a/mediarepo-daemon/mediarepo-worker/src/status_utils.rs b/mediarepo-daemon/mediarepo-worker/src/status_utils.rs new file mode 100644 index 0000000..2cdafc4 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/status_utils.rs @@ -0,0 +1,34 @@ +pub struct SimpleProgress { + pub current: u64, + pub total: u64, +} + +impl Default for SimpleProgress { + fn default() -> Self { + Self { + total: 100, + current: 0, + } + } +} + +impl SimpleProgress { + pub fn new(total: u64) -> Self { + Self { total, current: 0 } + } + + /// Increments the current progress by 1 + pub fn tick(&mut self) { + self.current += 1; + } + + /// Sets the current progress to a defined value + pub fn set_current(&mut self, current: u64) { + self.current = current; + } + + /// Returns the total progress in percent + pub fn percent(&self) -> f64 { + (self.current as f64) / (self.total as f64) + } +} From 22f28a79d105e26040a015abc4538fa75579c511 Mon Sep 17 00:00:00 2001 From: trivernis Date: Wed, 16 Mar 2022 21:40:13 +0100 Subject: [PATCH 11/15] Update to latest bromine Signed-off-by: trivernis --- mediarepo-api/Cargo.toml | 3 +- mediarepo-daemon/Cargo.lock | 39 ++++++---- mediarepo-daemon/mediarepo-core/Cargo.toml | 7 +- mediarepo-daemon/mediarepo-core/src/lib.rs | 3 +- .../mediarepo-core/src/type_keys.rs | 2 +- .../mediarepo-core/src/type_list.rs | 46 ----------- .../mediarepo-logic/src/type_keys.rs | 2 +- mediarepo-daemon/mediarepo-socket/src/lib.rs | 6 +- .../mediarepo-socket/src/namespaces/jobs.rs | 1 + .../mediarepo-socket/src/namespaces/repo.rs | 1 + .../mediarepo-socket/src/utils.rs | 1 + .../mediarepo-worker/src/job_dispatcher.rs | 78 +++++++++---------- .../mediarepo-worker/src/jobs/mod.rs | 2 +- mediarepo-daemon/src/main.rs | 32 +++++--- 14 files changed, 97 insertions(+), 126 deletions(-) delete mode 100644 mediarepo-daemon/mediarepo-core/src/type_list.rs diff --git a/mediarepo-api/Cargo.toml b/mediarepo-api/Cargo.toml index 62a9c17..ba7b42b 100644 --- a/mediarepo-api/Cargo.toml +++ b/mediarepo-api/Cargo.toml @@ -20,9 +20,8 @@ url = { version = "2.2.2", optional = true } pathsearch = { version = "0.2.0", optional = true } [dependencies.bromine] -version = "0.18.1" +version = "0.19.0" optional = true -git = "https://github.com/Trivernis/bromine" features = ["serialize_bincode"] [dependencies.serde] diff --git a/mediarepo-daemon/Cargo.lock b/mediarepo-daemon/Cargo.lock index 4596f26..5c4ebb6 100644 --- a/mediarepo-daemon/Cargo.lock +++ b/mediarepo-daemon/Cargo.lock @@ -81,9 +81,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-recursion" -version = "0.3.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" +checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" dependencies = [ "proc-macro2 1.0.36", "quote 1.0.15", @@ -270,8 +270,9 @@ dependencies = [ [[package]] name = "bromine" -version = "0.18.3" -source = "git+https://github.com/Trivernis/bromine#c2728a44ead210e1535bce5fc2d3979530700b96" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a05cd0cd5646e705df88816dcc36eaf4e21b940cea66f1e027970cd58e3dc897" dependencies = [ "async-trait", "bincode", @@ -283,7 +284,7 @@ dependencies = [ "thiserror", "tokio", "tracing", - "typemap_rev", + "trait-bound-typemap", ] [[package]] @@ -1318,7 +1319,7 @@ dependencies = [ "toml", "tracing", "tracing-subscriber", - "typemap_rev", + "trait-bound-typemap", ] [[package]] @@ -1485,6 +1486,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "multi-trait-object" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a54c9ed2b86c7927b63e7d51f8d7ed4e1f8513c8672828ca1a850ff9d32ab1c" + [[package]] name = "multibase" version = "0.9.1" @@ -2871,16 +2878,16 @@ dependencies = [ [[package]] name = "tokio-graceful-shutdown" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08ebea7dc6b22273290d8ece2ca448f979f836e38ba629b650595c64204b4f2" +checksum = "4f21c36e43c82d5f32302aff8ac9efb79e10db9538b0940ef69cce38a01614ae" dependencies = [ "anyhow", "async-recursion", "futures 0.3.21", "log", "tokio", - "tokio-util 0.6.9", + "tokio-util 0.7.0", ] [[package]] @@ -3159,17 +3166,21 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "trait-bound-typemap" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3631df5ba73c0e41b1aa337df3bcca7f15219f042f8fec1100857bc1eb60c767" +dependencies = [ + "multi-trait-object", +] + [[package]] name = "try-lock" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "typemap_rev" -version = "0.1.5" -source = "git+https://github.com/Trivernis/typemap_rev?rev=750c67bffe8024d2a47725daa473f068ad653fc4#750c67bffe8024d2a47725daa473f068ad653fc4" - [[package]] name = "typenum" version = "1.15.0" diff --git a/mediarepo-daemon/mediarepo-core/Cargo.toml b/mediarepo-daemon/mediarepo-core/Cargo.toml index d7af792..48d2bbc 100644 --- a/mediarepo-daemon/mediarepo-core/Cargo.toml +++ b/mediarepo-daemon/mediarepo-core/Cargo.toml @@ -18,14 +18,11 @@ itertools = "0.10.3" glob = "0.3.0" tracing = "0.1.32" data-encoding = "2.3.2" -tokio-graceful-shutdown = "0.4.3" +tokio-graceful-shutdown = "0.4.4" thumbnailer = "0.4.0" bincode = "1.3.3" tracing-subscriber = "0.3.9" - -[dependencies.typemap_rev] -git = "https://github.com/Trivernis/typemap_rev" -rev = "750c67bffe8024d2a47725daa473f068ad653fc4" +trait-bound-typemap = "0.3.3" [dependencies.sea-orm] version = "0.6.0" diff --git a/mediarepo-daemon/mediarepo-core/src/lib.rs b/mediarepo-daemon/mediarepo-core/src/lib.rs index d98957f..1edc42d 100644 --- a/mediarepo-daemon/mediarepo-core/src/lib.rs +++ b/mediarepo-daemon/mediarepo-core/src/lib.rs @@ -5,7 +5,7 @@ pub use mediarepo_api; pub use mediarepo_api::bromine; pub use thumbnailer; pub use tokio_graceful_shutdown; -pub use typemap_rev; +pub use trait_bound_typemap; pub mod content_descriptor; pub mod context; @@ -14,5 +14,4 @@ pub mod fs; pub mod settings; pub mod tracing_layer_list; pub mod type_keys; -pub mod type_list; pub mod utils; diff --git a/mediarepo-daemon/mediarepo-core/src/type_keys.rs b/mediarepo-daemon/mediarepo-core/src/type_keys.rs index 8cebd2c..778bcb8 100644 --- a/mediarepo-daemon/mediarepo-core/src/type_keys.rs +++ b/mediarepo-daemon/mediarepo-core/src/type_keys.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use mediarepo_api::types::repo::SizeType; use tokio_graceful_shutdown::SubsystemHandle; -use typemap_rev::TypeMapKey; +use trait_bound_typemap::TypeMapKey; use crate::settings::Settings; diff --git a/mediarepo-daemon/mediarepo-core/src/type_list.rs b/mediarepo-daemon/mediarepo-core/src/type_list.rs deleted file mode 100644 index fbe913f..0000000 --- a/mediarepo-daemon/mediarepo-core/src/type_list.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::any::{Any, TypeId}; -use std::vec::IntoIter; -use typemap_rev::TypeMapKey; - -pub trait CloneAny: Any + Send + Sync { - fn clone_any(&self) -> Box; -} - -impl CloneAny for T { - fn clone_any(&self) -> Box { - Box::new(self.clone()) - } -} - -impl Clone for Box { - fn clone(&self) -> Self { - (**self).clone_any() - } -} - -#[derive(Default, Clone)] -pub struct TypeList(Vec<(TypeId, Box)>); - -impl TypeList { - pub fn add, C: CloneAny>(&mut self, value: T::Value) { - self.0.push((TypeId::of::(), Box::new(value))) - } -} - -impl IntoIterator for TypeList { - type Item = (TypeId, Box); - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0 - .into_iter() - .map(|(t, v)| { - (t, unsafe { - // SAFETY: CloneAny requires types to be Any + Send + Sync (+ Clone) - std::mem::transmute::, Box>(v) - }) - }) - .collect::)>>() - .into_iter() - } -} diff --git a/mediarepo-daemon/mediarepo-logic/src/type_keys.rs b/mediarepo-daemon/mediarepo-logic/src/type_keys.rs index 649405e..b19a866 100644 --- a/mediarepo-daemon/mediarepo-logic/src/type_keys.rs +++ b/mediarepo-daemon/mediarepo-logic/src/type_keys.rs @@ -1,7 +1,7 @@ +use mediarepo_core::trait_bound_typemap::TypeMapKey; use std::sync::Arc; use crate::dao::repo::Repo; -use mediarepo_core::typemap_rev::TypeMapKey; pub struct RepoKey; diff --git a/mediarepo-daemon/mediarepo-socket/src/lib.rs b/mediarepo-daemon/mediarepo-socket/src/lib.rs index de4847a..5818419 100644 --- a/mediarepo-daemon/mediarepo-socket/src/lib.rs +++ b/mediarepo-daemon/mediarepo-socket/src/lib.rs @@ -8,8 +8,8 @@ use mediarepo_core::error::{RepoError, RepoResult}; use mediarepo_core::mediarepo_api::types::misc::InfoResponse; use mediarepo_core::settings::{PortSetting, Settings}; use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; +use mediarepo_core::trait_bound_typemap::{SendSyncTypeMap, TypeMap}; use mediarepo_core::type_keys::{SizeMetadataKey, SubsystemKey}; -use mediarepo_core::type_list::TypeList; mod from_model; mod namespaces; @@ -19,7 +19,7 @@ mod utils; pub fn start_tcp_server( subsystem: SubsystemHandle, settings: Settings, - shared_data: TypeList, + shared_data: SendSyncTypeMap, ) -> RepoResult<(String, JoinHandle<()>)> { let port = match &settings.server.tcp.port { PortSetting::Fixed(p) => { @@ -56,7 +56,7 @@ pub fn start_tcp_server( pub fn create_unix_socket( subsystem: SubsystemHandle, path: std::path::PathBuf, - shared_data: TypeList, + shared_data: SendSyncTypeMap, ) -> RepoResult> { use std::fs; use tokio::net::UnixListener; diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs index 0fe150c..998d279 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs @@ -1,3 +1,4 @@ +use crate::TypeMap; use mediarepo_core::bromine::prelude::*; use mediarepo_core::error::RepoResult; use mediarepo_core::mediarepo_api::types::jobs::{JobType, RunJobRequest}; diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs index d1516c2..05d605f 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/repo.rs @@ -2,6 +2,7 @@ use std::path::PathBuf; use tokio::fs; +use crate::TypeMap; use mediarepo_core::bromine::prelude::*; use mediarepo_core::mediarepo_api::types::repo::{ FrontendState, RepositoryMetadata, SizeMetadata, SizeType, diff --git a/mediarepo-daemon/mediarepo-socket/src/utils.rs b/mediarepo-daemon/mediarepo-socket/src/utils.rs index e61681a..2c3fc57 100644 --- a/mediarepo-daemon/mediarepo-socket/src/utils.rs +++ b/mediarepo-daemon/mediarepo-socket/src/utils.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use crate::TypeMap; use mediarepo_core::bromine::ipc::context::Context; use mediarepo_core::content_descriptor::decode_content_descriptor; use mediarepo_core::error::{RepoError, RepoResult}; diff --git a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs index 869ea77..5d17e05 100644 --- a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs +++ b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs @@ -1,9 +1,8 @@ use crate::jobs::{Job, JobTypeKey}; use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; -use mediarepo_core::typemap_rev::{TypeMap, TypeMapKey}; +use mediarepo_core::trait_bound_typemap::{SendSyncTypeMap, TypeMap, TypeMapKey}; use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::dao::DaoProvider; -use std::cell::UnsafeCell; use std::sync::Arc; use std::time::Duration; use tokio::sync::RwLock; @@ -11,16 +10,16 @@ use tokio::time::Instant; #[derive(Clone)] pub struct JobDispatcher { - subsystem: Arc>, - job_status_map: Arc>, + subsystem: SubsystemHandle, + job_status_map: Arc>, repo: Arc, } impl JobDispatcher { pub fn new(subsystem: SubsystemHandle, repo: Repo) -> Self { Self { - job_status_map: Default::default(), - subsystem: Arc::new(UnsafeCell::new(subsystem)), + job_status_map: Arc::new(RwLock::new(SendSyncTypeMap::new())), + subsystem, repo: Arc::new(repo), } } @@ -46,54 +45,51 @@ impl JobDispatcher { let status = job.state(); self.add_status::>(status.clone()).await; - let subsystem = unsafe { - // SAFETY: the subsystem requires a mutable borrow for the start method - // the implementation of start doesn't need that mutability. So until that's - // changed we have to do some trickery. - &mut *self.subsystem.get() - }; - let repo = self.repo.clone(); - subsystem.start("worker-job", move |subsystem| async move { - loop { - let start = Instant::now(); - let job_2 = job.clone(); - let result = tokio::select! { - _ = subsystem.on_shutdown_requested() => { - job_2.save_state(repo.job()).await - } - r = job.run(repo.clone()) => { + self.subsystem + .start("worker-job", move |subsystem| async move { + loop { + let start = Instant::now(); + let job_2 = job.clone(); + let result = tokio::select! { + _ = subsystem.on_shutdown_requested() => { + job_2.save_state(repo.job()).await + } + r = job.run(repo.clone()) => { - if let Err(e) = r { - Err(e) - } else { - job.save_state(repo.job()).await + if let Err(e) = r { + Err(e) + } else { + job.save_state(repo.job()).await + } } + }; + if let Err(e) = result { + tracing::error!("job failed with error: {}", e); } - }; - if let Err(e) = result { - tracing::error!("job failed with error: {}", e); - } - if let Some(interval) = interval { - let sleep_duration = interval - start.elapsed(); - tokio::select! { - _ = tokio::time::sleep(sleep_duration) => {}, - _ = subsystem.on_shutdown_requested() => {break} + if let Some(interval) = interval { + let sleep_duration = interval - start.elapsed(); + tokio::select! { + _ = tokio::time::sleep(sleep_duration) => {}, + _ = subsystem.on_shutdown_requested() => {break} + } + } else { + break; } - } else { - break; } - } - Ok(()) - }); + Ok(()) + }); status } #[inline] - async fn add_status(&self, status: T::Value) { + async fn add_status(&self, status: T::Value) + where + ::Value: Send + Sync, + { let mut status_map = self.job_status_map.write().await; status_map.insert::(status); } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index 607ea62..afb8075 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -8,7 +8,7 @@ pub use vacuum::*; use async_trait::async_trait; use mediarepo_core::error::RepoResult; -use mediarepo_core::typemap_rev::TypeMapKey; +use mediarepo_core::trait_bound_typemap::TypeMapKey; use mediarepo_logic::dao::job::JobDao; use mediarepo_logic::dao::repo::Repo; use tokio::sync::RwLock; diff --git a/mediarepo-daemon/src/main.rs b/mediarepo-daemon/src/main.rs index dc4b950..d28a49c 100644 --- a/mediarepo-daemon/src/main.rs +++ b/mediarepo-daemon/src/main.rs @@ -1,4 +1,5 @@ use std::env; +use std::iter::FromIterator; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -11,8 +12,8 @@ use mediarepo_core::error::RepoResult; use mediarepo_core::fs::drop_file::DropFile; use mediarepo_core::settings::{PathSettings, Settings}; use mediarepo_core::tokio_graceful_shutdown::{SubsystemHandle, Toplevel}; +use mediarepo_core::trait_bound_typemap::{CloneSendSyncTypeMap, SendSyncTypeMap, TypeMap}; use mediarepo_core::type_keys::{RepoPathKey, SettingsKey}; -use mediarepo_core::type_list::TypeList; use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::type_keys::RepoKey; use mediarepo_socket::start_tcp_server; @@ -106,11 +107,11 @@ async fn start_server(opt: Opt, settings: Settings) -> RepoResult<()> { let repo = init_repo(&opt, &settings.paths).await?; let (mut top_level, dispatcher) = mediarepo_worker::start(Toplevel::new(), repo.clone()).await; - let mut shared_data = TypeList::default(); - shared_data.add::(Arc::new(repo)); - shared_data.add::(settings.clone()); - shared_data.add::(opt.repo.clone()); - shared_data.add::(dispatcher); + let mut shared_data = CloneSendSyncTypeMap::new(); + shared_data.insert::(Arc::new(repo)); + shared_data.insert::(settings.clone()); + shared_data.insert::(opt.repo.clone()); + shared_data.insert::(dispatcher); #[cfg(unix)] { @@ -120,7 +121,12 @@ async fn start_server(opt: Opt, settings: Settings) -> RepoResult<()> { top_level = top_level.start("mediarepo-unix-socket", |subsystem| { Box::pin(async move { - start_and_await_unix_socket(subsystem, repo_path, shared_data).await?; + start_and_await_unix_socket( + subsystem, + repo_path, + SendSyncTypeMap::from_iter(shared_data), + ) + .await?; Ok(()) }) }) @@ -130,7 +136,13 @@ async fn start_server(opt: Opt, settings: Settings) -> RepoResult<()> { if settings.server.tcp.enabled { top_level = top_level.start("mediarepo-tcp", move |subsystem| { Box::pin(async move { - start_and_await_tcp_server(subsystem, opt.repo, settings, shared_data).await?; + start_and_await_tcp_server( + subsystem, + opt.repo, + settings, + SendSyncTypeMap::from_iter(shared_data), + ) + .await?; Ok(()) }) @@ -157,7 +169,7 @@ async fn start_and_await_tcp_server( subsystem: SubsystemHandle, repo_path: PathBuf, settings: Settings, - shared_data: TypeList, + shared_data: SendSyncTypeMap, ) -> RepoResult<()> { let (address, handle) = start_tcp_server(subsystem.clone(), settings, shared_data)?; let (mut file, _guard) = DropFile::new(repo_path.join("repo.tcp")).await?; @@ -182,7 +194,7 @@ async fn start_and_await_tcp_server( async fn start_and_await_unix_socket( subsystem: SubsystemHandle, repo_path: PathBuf, - shared_data: TypeList, + shared_data: SendSyncTypeMap, ) -> RepoResult<()> { let socket_path = repo_path.join("repo.sock"); let handle = mediarepo_socket::create_unix_socket(subsystem.clone(), socket_path, shared_data)?; From a87c341867ab2c878c280e302cbcbcf5cc9fb6aa Mon Sep 17 00:00:00 2001 From: trivernis Date: Wed, 23 Mar 2022 19:48:25 +0100 Subject: [PATCH 12/15] Add job implementation to generate missing thumbnails Signed-off-by: trivernis --- .../mediarepo-socket/src/namespaces/jobs.rs | 8 +++- .../src/jobs/generate_missing_thumbnails.rs | 46 +++++++++++++++++++ .../mediarepo-worker/src/jobs/mod.rs | 2 + .../mediarepo-worker/src/status_utils.rs | 5 ++ 4 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs index 998d279..568c283 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs @@ -4,7 +4,7 @@ use mediarepo_core::error::RepoResult; use mediarepo_core::mediarepo_api::types::jobs::{JobType, RunJobRequest}; use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey}; use mediarepo_logic::dao::DaoProvider; -use mediarepo_worker::jobs::{CalculateSizesJob, VacuumJob}; +use mediarepo_worker::jobs::{CalculateSizesJob, GenerateMissingThumbsJob, VacuumJob}; use crate::utils::{get_job_dispatcher_from_context, get_repo_from_context}; @@ -41,7 +41,11 @@ impl JobsNamespace { JobType::Vacuum => { dispatcher.dispatch(VacuumJob::default()).await; } - JobType::GenerateThumbnails => job_dao.generate_missing_thumbnails().await?, + JobType::GenerateThumbnails => { + dispatcher + .dispatch(GenerateMissingThumbsJob::default()) + .await; + } } Ok(Response::empty()) diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs new file mode 100644 index 0000000..9483fd4 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs @@ -0,0 +1,46 @@ +use crate::jobs::Job; +use crate::status_utils::SimpleProgress; +use async_trait::async_trait; +use mediarepo_core::error::RepoResult; +use mediarepo_core::thumbnailer::ThumbnailSize; +use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dao::DaoProvider; +use std::sync::Arc; +use tokio::sync::RwLock; + +#[derive(Clone, Default)] +pub struct GenerateMissingThumbsJob { + state: Arc>, +} + +#[async_trait] +impl Job for GenerateMissingThumbsJob { + type JobState = SimpleProgress; + + fn state(&self) -> Arc> { + self.state.clone() + } + + async fn run(&self, repo: Arc) -> RepoResult<()> { + let file_dao = repo.file(); + let all_files = file_dao.all().await?; + { + let mut progress = self.state.write().await; + progress.set_total(all_files.len() as u64); + } + + for file in all_files { + if file_dao.thumbnails(file.encoded_cd()).await?.is_empty() { + let _ = file_dao + .create_thumbnails(&file, vec![ThumbnailSize::Medium]) + .await; + } + { + let mut progress = self.state.write().await; + progress.tick(); + } + } + + Ok(()) + } +} diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index afb8075..73f1887 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -1,7 +1,9 @@ mod calculate_sizes; +mod generate_missing_thumbnails; mod vacuum; pub use calculate_sizes::*; +pub use generate_missing_thumbnails::*; use std::marker::PhantomData; use std::sync::Arc; pub use vacuum::*; diff --git a/mediarepo-daemon/mediarepo-worker/src/status_utils.rs b/mediarepo-daemon/mediarepo-worker/src/status_utils.rs index 2cdafc4..0045cc7 100644 --- a/mediarepo-daemon/mediarepo-worker/src/status_utils.rs +++ b/mediarepo-daemon/mediarepo-worker/src/status_utils.rs @@ -17,6 +17,11 @@ impl SimpleProgress { Self { total, current: 0 } } + /// Sets the total count + pub fn set_total(&mut self, total: u64) { + self.total = total; + } + /// Increments the current progress by 1 pub fn tick(&mut self) { self.current += 1; From e9dbbd1bd5855cbb39860fb03817166b4fed801b Mon Sep 17 00:00:00 2001 From: trivernis Date: Sun, 27 Mar 2022 12:45:59 +0200 Subject: [PATCH 13/15] Add job handle to jobs to wait for results Signed-off-by: trivernis --- .../mediarepo-socket/src/namespaces/jobs.rs | 16 ++- .../mediarepo-worker/src/handle.rs | 102 ++++++++++++++++++ .../mediarepo-worker/src/job_dispatcher.rs | 55 +++++++--- .../src/jobs/calculate_sizes.rs | 5 +- .../src/jobs/generate_missing_thumbnails.rs | 5 +- .../mediarepo-worker/src/jobs/mod.rs | 10 +- .../mediarepo-worker/src/jobs/vacuum.rs | 5 +- mediarepo-daemon/mediarepo-worker/src/lib.rs | 1 + 8 files changed, 169 insertions(+), 30 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-worker/src/handle.rs diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs index 568c283..273dd11 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs @@ -39,12 +39,18 @@ impl JobsNamespace { JobType::CalculateSizes => calculate_all_sizes(ctx).await?, JobType::CheckIntegrity => job_dao.check_integrity().await?, JobType::Vacuum => { - dispatcher.dispatch(VacuumJob::default()).await; + let mut handle = dispatcher.dispatch(VacuumJob::default()).await; + if run_request.sync { + handle.try_result().await?; + } } JobType::GenerateThumbnails => { - dispatcher + let mut handle = dispatcher .dispatch(GenerateMissingThumbsJob::default()) .await; + if run_request.sync { + handle.try_result().await?; + } } } @@ -62,10 +68,10 @@ async fn calculate_all_sizes(ctx: &Context) -> RepoResult<()> { }; let job = CalculateSizesJob::new(repo_path, settings); let dispatcher = get_job_dispatcher_from_context(ctx).await; - let state = dispatcher.dispatch(job).await; + let handle = dispatcher.dispatch(job).await; let mut rx = { - let state = state.read().await; - state.sizes_channel.subscribe() + let status = handle.status().read().await; + status.sizes_channel.subscribe() }; while let Ok((size_type, size)) = rx.recv().await { diff --git a/mediarepo-daemon/mediarepo-worker/src/handle.rs b/mediarepo-daemon/mediarepo-worker/src/handle.rs new file mode 100644 index 0000000..8c9a603 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/handle.rs @@ -0,0 +1,102 @@ +use mediarepo_core::error::{RepoError, RepoResult}; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; +use tokio::sync::broadcast::{Receiver, Sender}; +use tokio::sync::RwLock; + +pub struct JobHandle { + status: Arc>, + state: Arc>, + result_receiver: CloneableReceiver>>>, +} + +impl Clone for JobHandle { + fn clone(&self) -> Self { + Self { + status: self.status.clone(), + state: self.state.clone(), + result_receiver: self.result_receiver.clone(), + } + } +} + +impl JobHandle { + pub fn new( + status: Arc>, + state: Arc>, + result_receiver: CloneableReceiver>>>, + ) -> Self { + Self { + status, + state, + result_receiver, + } + } + + pub async fn state(&self) -> JobState { + *self.state.read().await + } + + pub fn status(&self) -> &Arc> { + &self.status + } + + pub async fn result(&mut self) -> Arc>> { + match self.result_receiver.recv().await { + Ok(v) => v, + Err(e) => Arc::new(RwLock::new(Err(RepoError::from(&*e.to_string())))), + } + } + + pub async fn try_result(&mut self) -> RepoResult { + let shared_result = self.result().await; + let mut result = shared_result.write().await; + mem::replace(&mut *result, Err(RepoError::from("result taken"))) + } +} + +#[derive(Clone, Copy, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum JobState { + Queued, + Scheduled, + Running, + Finished, +} + +pub struct CloneableReceiver { + receiver: Receiver, + sender: Sender, +} + +impl CloneableReceiver { + pub fn new(sender: Sender) -> Self { + Self { + receiver: sender.subscribe(), + sender, + } + } +} + +impl Clone for CloneableReceiver { + fn clone(&self) -> Self { + Self { + sender: self.sender.clone(), + receiver: self.sender.subscribe(), + } + } +} + +impl Deref for CloneableReceiver { + type Target = Receiver; + + fn deref(&self) -> &Self::Target { + &self.receiver + } +} + +impl DerefMut for CloneableReceiver { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.receiver + } +} diff --git a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs index 5d17e05..1cf58c2 100644 --- a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs +++ b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs @@ -1,3 +1,4 @@ +use crate::handle::{CloneableReceiver, JobHandle, JobState}; use crate::jobs::{Job, JobTypeKey}; use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle; use mediarepo_core::trait_bound_typemap::{SendSyncTypeMap, TypeMap, TypeMapKey}; @@ -5,26 +6,27 @@ use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::dao::DaoProvider; use std::sync::Arc; use std::time::Duration; +use tokio::sync::broadcast::channel; use tokio::sync::RwLock; use tokio::time::Instant; #[derive(Clone)] pub struct JobDispatcher { subsystem: SubsystemHandle, - job_status_map: Arc>, + job_handle_map: Arc>, repo: Arc, } impl JobDispatcher { pub fn new(subsystem: SubsystemHandle, repo: Repo) -> Self { Self { - job_status_map: Arc::new(RwLock::new(SendSyncTypeMap::new())), + job_handle_map: Arc::new(RwLock::new(SendSyncTypeMap::new())), subsystem, repo: Arc::new(repo), } } - pub async fn dispatch(&self, job: T) -> Arc> { + pub async fn dispatch(&self, job: T) -> JobHandle { self._dispatch(job, None).await } @@ -32,7 +34,7 @@ impl JobDispatcher { &self, job: T, interval: Duration, - ) -> Arc> { + ) -> JobHandle { self._dispatch(job, Some(interval)).await } @@ -41,9 +43,21 @@ impl JobDispatcher { &self, job: T, interval: Option, - ) -> Arc> { - let status = job.state(); - self.add_status::>(status.clone()).await; + ) -> JobHandle { + let status = job.status(); + let state = Arc::new(RwLock::new(JobState::Queued)); + let (sender, mut receiver) = channel(1); + self.subsystem + .start("channel-consumer", move |subsystem| async move { + tokio::select! { + _ = receiver.recv() => (), + _ = subsystem.on_shutdown_requested() => (), + } + Ok(()) + }); + let receiver = CloneableReceiver::new(sender.clone()); + let handle = JobHandle::new(status.clone(), state.clone(), receiver); + self.add_handle::>(handle.clone()).await; let repo = self.repo.clone(); @@ -52,16 +66,21 @@ impl JobDispatcher { loop { let start = Instant::now(); let job_2 = job.clone(); + { + let mut state = state.write().await; + *state = JobState::Running; + } let result = tokio::select! { _ = subsystem.on_shutdown_requested() => { job_2.save_state(repo.job()).await } r = job.run(repo.clone()) => { - - if let Err(e) = r { - Err(e) - } else { - job.save_state(repo.job()).await + match r { + Err(e) => Err(e), + Ok(v) => { + let _ = sender.send(Arc::new(RwLock::new(Ok(v)))); + job.save_state(repo.job()).await + } } } }; @@ -69,12 +88,18 @@ impl JobDispatcher { tracing::error!("job failed with error: {}", e); } if let Some(interval) = interval { + { + let mut state = state.write().await; + *state = JobState::Scheduled; + } let sleep_duration = interval - start.elapsed(); tokio::select! { _ = tokio::time::sleep(sleep_duration) => {}, _ = subsystem.on_shutdown_requested() => {break} } } else { + let mut state = state.write().await; + *state = JobState::Finished; break; } } @@ -82,15 +107,15 @@ impl JobDispatcher { Ok(()) }); - status + handle } #[inline] - async fn add_status(&self, status: T::Value) + async fn add_handle(&self, status: T::Value) where ::Value: Send + Sync, { - let mut status_map = self.job_status_map.write().await; + let mut status_map = self.job_handle_map.write().await; status_map.insert::(status); } } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs index 6f13a60..d9ce393 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/calculate_sizes.rs @@ -40,9 +40,10 @@ impl CalculateSizesJob { #[async_trait] impl Job for CalculateSizesJob { - type JobState = CalculateSizesState; + type JobStatus = CalculateSizesState; + type Result = (); - fn state(&self) -> Arc> { + fn status(&self) -> Arc> { self.state.clone() } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs index 9483fd4..05cf9e7 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs @@ -15,9 +15,10 @@ pub struct GenerateMissingThumbsJob { #[async_trait] impl Job for GenerateMissingThumbsJob { - type JobState = SimpleProgress; + type JobStatus = SimpleProgress; + type Result = (); - fn state(&self) -> Arc> { + fn status(&self) -> Arc> { self.state.clone() } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index 73f1887..b56b4ed 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -8,6 +8,7 @@ use std::marker::PhantomData; use std::sync::Arc; pub use vacuum::*; +use crate::handle::JobHandle; use async_trait::async_trait; use mediarepo_core::error::RepoResult; use mediarepo_core::trait_bound_typemap::TypeMapKey; @@ -19,11 +20,12 @@ type EmptyStatus = Arc>; #[async_trait] pub trait Job: Clone + Send + Sync { - type JobState: Send + Sync; + type JobStatus: Send + Sync; + type Result: Send + Sync; - fn state(&self) -> Arc>; + fn status(&self) -> Arc>; - async fn run(&self, repo: Arc) -> RepoResult<()>; + async fn run(&self, repo: Arc) -> RepoResult; #[tracing::instrument(level = "debug", skip_all)] async fn save_state(&self, _job_dao: JobDao) -> RepoResult<()> { @@ -37,5 +39,5 @@ impl TypeMapKey for JobTypeKey where T: Job, { - type Value = Arc>; + type Value = JobHandle; } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs index 5335de6..60bc275 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/vacuum.rs @@ -11,9 +11,10 @@ pub struct VacuumJob; #[async_trait] impl Job for VacuumJob { - type JobState = (); + type JobStatus = (); + type Result = (); - fn state(&self) -> Arc> { + fn status(&self) -> Arc> { EmptyStatus::default() } diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index 617eb4f..1d6dde8 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -6,6 +6,7 @@ use mediarepo_logic::dao::repo::Repo; use std::time::Duration; use tokio::sync::oneshot::channel; +pub mod handle; pub mod job_dispatcher; pub mod jobs; pub mod status_utils; From fb869dabb179f89d468c52e596386971ffd331e9 Mon Sep 17 00:00:00 2001 From: trivernis Date: Sun, 27 Mar 2022 16:59:44 +0200 Subject: [PATCH 14/15] Add job for checking the integrity Signed-off-by: trivernis --- .../mediarepo-socket/src/namespaces/jobs.rs | 38 +++++++++++++------ .../src/jobs/check_integrity.rs | 32 ++++++++++++++++ .../mediarepo-worker/src/jobs/mod.rs | 2 + mediarepo-daemon/mediarepo-worker/src/lib.rs | 8 +++- 4 files changed, 67 insertions(+), 13 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs/check_integrity.rs diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs index 273dd11..5025786 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs @@ -4,7 +4,10 @@ use mediarepo_core::error::RepoResult; use mediarepo_core::mediarepo_api::types::jobs::{JobType, RunJobRequest}; use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey}; use mediarepo_logic::dao::DaoProvider; -use mediarepo_worker::jobs::{CalculateSizesJob, GenerateMissingThumbsJob, VacuumJob}; +use mediarepo_worker::job_dispatcher::JobDispatcher; +use mediarepo_worker::jobs::{ + CalculateSizesJob, CheckIntegrityJob, GenerateMissingThumbsJob, Job, VacuumJob, +}; use crate::utils::{get_job_dispatcher_from_context, get_repo_from_context}; @@ -37,20 +40,19 @@ impl JobsNamespace { match run_request.job_type { JobType::MigrateContentDescriptors => job_dao.migrate_content_descriptors().await?, JobType::CalculateSizes => calculate_all_sizes(ctx).await?, - JobType::CheckIntegrity => job_dao.check_integrity().await?, + JobType::CheckIntegrity => { + dispatch_job(&dispatcher, CheckIntegrityJob::default(), run_request.sync).await? + } JobType::Vacuum => { - let mut handle = dispatcher.dispatch(VacuumJob::default()).await; - if run_request.sync { - handle.try_result().await?; - } + dispatch_job(&dispatcher, VacuumJob::default(), run_request.sync).await? } JobType::GenerateThumbnails => { - let mut handle = dispatcher - .dispatch(GenerateMissingThumbsJob::default()) - .await; - if run_request.sync { - handle.try_result().await?; - } + dispatch_job( + &dispatcher, + GenerateMissingThumbsJob::default(), + run_request.sync, + ) + .await? } } @@ -58,6 +60,18 @@ impl JobsNamespace { } } +async fn dispatch_job( + dispatcher: &JobDispatcher, + job: J, + sync: bool, +) -> RepoResult<()> { + let mut handle = dispatcher.dispatch(job).await; + if sync { + handle.try_result().await?; + } + Ok(()) +} + async fn calculate_all_sizes(ctx: &Context) -> RepoResult<()> { let (repo_path, settings) = { let data = ctx.data.read().await; diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/check_integrity.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/check_integrity.rs new file mode 100644 index 0000000..8ef6d65 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/check_integrity.rs @@ -0,0 +1,32 @@ +use crate::jobs::Job; +use crate::status_utils::SimpleProgress; +use async_trait::async_trait; +use mediarepo_core::error::RepoResult; +use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dao::DaoProvider; +use std::sync::Arc; +use tokio::sync::RwLock; + +#[derive(Clone, Default)] +pub struct CheckIntegrityJob { + progress: Arc>, +} + +#[async_trait] +impl Job for CheckIntegrityJob { + type JobStatus = SimpleProgress; + type Result = (); + + fn status(&self) -> Arc> { + self.progress.clone() + } + + async fn run(&self, repo: Arc) -> RepoResult { + repo.job().check_integrity().await?; + { + let mut progress = self.progress.write().await; + progress.set_total(100); + } + Ok(()) + } +} diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index b56b4ed..f2d652f 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -1,8 +1,10 @@ mod calculate_sizes; +mod check_integrity; mod generate_missing_thumbnails; mod vacuum; pub use calculate_sizes::*; +pub use check_integrity::*; pub use generate_missing_thumbnails::*; use std::marker::PhantomData; use std::sync::Arc; diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index 1d6dde8..9dd35d8 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -1,5 +1,5 @@ use crate::job_dispatcher::JobDispatcher; -use crate::jobs::VacuumJob; +use crate::jobs::{CheckIntegrityJob, VacuumJob}; use mediarepo_core::error::RepoError; use mediarepo_core::tokio_graceful_shutdown::Toplevel; use mediarepo_logic::dao::repo::Repo; @@ -21,6 +21,12 @@ pub async fn start(top_level: Toplevel, repo: Repo) -> (Toplevel, JobDispatcher) dispatcher .dispatch_periodically(VacuumJob::default(), Duration::from_secs(60 * 30)) .await; + dispatcher + .dispatch_periodically( + CheckIntegrityJob::default(), + Duration::from_secs(60 * 60 * 24), + ) + .await; Ok(()) }); From 0eda0d2c2226f4a5d70e7c287beaaf31a141a09f Mon Sep 17 00:00:00 2001 From: trivernis Date: Sun, 27 Mar 2022 18:01:43 +0200 Subject: [PATCH 15/15] Improve job state loading and storing Signed-off-by: trivernis --- .../src/entities/job_state.rs | 2 +- .../mediarepo-logic/src/dao/job/state.rs | 16 +++-- .../mediarepo-socket/src/namespaces/jobs.rs | 25 +++++-- .../mediarepo-worker/src/handle.rs | 13 ++-- .../mediarepo-worker/src/job_dispatcher.rs | 21 ++++-- .../src/jobs/generate_missing_thumbnails.rs | 64 +++++++++++++++++- .../src/jobs/migrate_content_descriptors.rs | 65 +++++++++++++++++++ .../mediarepo-worker/src/jobs/mod.rs | 30 ++++++++- mediarepo-daemon/mediarepo-worker/src/lib.rs | 3 +- 9 files changed, 206 insertions(+), 33 deletions(-) create mode 100644 mediarepo-daemon/mediarepo-worker/src/jobs/migrate_content_descriptors.rs diff --git a/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs b/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs index 01f3360..2c60e59 100644 --- a/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs +++ b/mediarepo-daemon/mediarepo-database/src/entities/job_state.rs @@ -2,7 +2,7 @@ use sea_orm::prelude::*; use sea_orm::TryFromU64; #[derive(Clone, Debug, PartialEq, DeriveEntityModel)] -#[sea_orm(table_name = "namespaces")] +#[sea_orm(table_name = "job_states")] pub struct Model { #[sea_orm(primary_key)] pub job_type: JobType, diff --git a/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs index f125d5e..a8aab72 100644 --- a/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs +++ b/mediarepo-daemon/mediarepo-logic/src/dao/job/state.rs @@ -9,16 +9,18 @@ use sea_orm::{Condition, TransactionTrait}; impl JobDao { /// Returns all job states for a given job id - pub async fn states_for_job_type(&self, job_type: JobType) -> RepoResult> { - let states = job_state::Entity::find() + pub async fn state_for_job_type(&self, job_type: JobType) -> RepoResult> { + let state = job_state::Entity::find() .filter(job_state::Column::JobType.eq(job_type)) - .all(&self.ctx.db) + .one(&self.ctx.db) .await? - .into_iter() - .map(JobStateDto::new) - .collect(); + .map(JobStateDto::new); - Ok(states) + Ok(state) + } + + pub async fn upsert_state(&self, state: UpsertJobStateDto) -> RepoResult<()> { + self.upsert_multiple_states(vec![state]).await } pub async fn upsert_multiple_states(&self, states: Vec) -> RepoResult<()> { diff --git a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs index 5025786..d86b3f1 100644 --- a/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs +++ b/mediarepo-daemon/mediarepo-socket/src/namespaces/jobs.rs @@ -3,13 +3,13 @@ use mediarepo_core::bromine::prelude::*; use mediarepo_core::error::RepoResult; use mediarepo_core::mediarepo_api::types::jobs::{JobType, RunJobRequest}; use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey}; -use mediarepo_logic::dao::DaoProvider; +use mediarepo_worker::handle::JobState; use mediarepo_worker::job_dispatcher::JobDispatcher; use mediarepo_worker::jobs::{ - CalculateSizesJob, CheckIntegrityJob, GenerateMissingThumbsJob, Job, VacuumJob, + CalculateSizesJob, CheckIntegrityJob, GenerateMissingThumbsJob, Job, MigrateCDsJob, VacuumJob, }; -use crate::utils::{get_job_dispatcher_from_context, get_repo_from_context}; +use crate::utils::get_job_dispatcher_from_context; pub struct JobsNamespace; @@ -29,7 +29,6 @@ impl JobsNamespace { #[tracing::instrument(skip_all)] pub async fn run_job(ctx: &Context, event: Event) -> IPCResult { let run_request = event.payload::()?; - let job_dao = get_repo_from_context(ctx).await.job(); let dispatcher = get_job_dispatcher_from_context(ctx).await; if !run_request.sync { @@ -38,7 +37,9 @@ impl JobsNamespace { } match run_request.job_type { - JobType::MigrateContentDescriptors => job_dao.migrate_content_descriptors().await?, + JobType::MigrateContentDescriptors => { + dispatch_job(&dispatcher, MigrateCDsJob::default(), run_request.sync).await? + } JobType::CalculateSizes => calculate_all_sizes(ctx).await?, JobType::CheckIntegrity => { dispatch_job(&dispatcher, CheckIntegrityJob::default(), run_request.sync).await? @@ -65,9 +66,19 @@ async fn dispatch_job( job: J, sync: bool, ) -> RepoResult<()> { - let mut handle = dispatcher.dispatch(job).await; + let mut handle = if let Some(handle) = dispatcher.get_handle::().await { + if handle.state().await == JobState::Running { + handle + } else { + dispatcher.dispatch(job).await + } + } else { + dispatcher.dispatch(job).await + }; if sync { - handle.try_result().await?; + if let Some(result) = handle.take_result().await { + result?; + } } Ok(()) } diff --git a/mediarepo-daemon/mediarepo-worker/src/handle.rs b/mediarepo-daemon/mediarepo-worker/src/handle.rs index 8c9a603..b9929ea 100644 --- a/mediarepo-daemon/mediarepo-worker/src/handle.rs +++ b/mediarepo-daemon/mediarepo-worker/src/handle.rs @@ -1,5 +1,4 @@ use mediarepo_core::error::{RepoError, RepoResult}; -use std::mem; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use tokio::sync::broadcast::{Receiver, Sender}; @@ -8,7 +7,7 @@ use tokio::sync::RwLock; pub struct JobHandle { status: Arc>, state: Arc>, - result_receiver: CloneableReceiver>>>, + result_receiver: CloneableReceiver>>>>, } impl Clone for JobHandle { @@ -25,7 +24,7 @@ impl JobHandle { pub fn new( status: Arc>, state: Arc>, - result_receiver: CloneableReceiver>>>, + result_receiver: CloneableReceiver>>>>, ) -> Self { Self { status, @@ -42,17 +41,17 @@ impl JobHandle { &self.status } - pub async fn result(&mut self) -> Arc>> { + pub async fn result(&mut self) -> Arc>>> { match self.result_receiver.recv().await { Ok(v) => v, - Err(e) => Arc::new(RwLock::new(Err(RepoError::from(&*e.to_string())))), + Err(e) => Arc::new(RwLock::new(Some(Err(RepoError::from(&*e.to_string()))))), } } - pub async fn try_result(&mut self) -> RepoResult { + pub async fn take_result(&mut self) -> Option> { let shared_result = self.result().await; let mut result = shared_result.write().await; - mem::replace(&mut *result, Err(RepoError::from("result taken"))) + result.take() } } diff --git a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs index 1cf58c2..2c74c02 100644 --- a/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs +++ b/mediarepo-daemon/mediarepo-worker/src/job_dispatcher.rs @@ -57,7 +57,7 @@ impl JobDispatcher { }); let receiver = CloneableReceiver::new(sender.clone()); let handle = JobHandle::new(status.clone(), state.clone(), receiver); - self.add_handle::>(handle.clone()).await; + self.add_handle::(handle.clone()).await; let repo = self.repo.clone(); @@ -70,6 +70,9 @@ impl JobDispatcher { let mut state = state.write().await; *state = JobState::Running; } + if let Err(e) = job.load_state(repo.job()).await { + tracing::error!("failed to load the jobs state: {}", e); + } let result = tokio::select! { _ = subsystem.on_shutdown_requested() => { job_2.save_state(repo.job()).await @@ -78,7 +81,7 @@ impl JobDispatcher { match r { Err(e) => Err(e), Ok(v) => { - let _ = sender.send(Arc::new(RwLock::new(Ok(v)))); + let _ = sender.send(Arc::new(RwLock::new(Some(Ok(v))))); job.save_state(repo.job()).await } } @@ -86,6 +89,7 @@ impl JobDispatcher { }; if let Err(e) = result { tracing::error!("job failed with error: {}", e); + let _ = sender.send(Arc::new(RwLock::new(Some(Err(e))))); } if let Some(interval) = interval { { @@ -111,12 +115,15 @@ impl JobDispatcher { } #[inline] - async fn add_handle(&self, status: T::Value) - where - ::Value: Send + Sync, - { + async fn add_handle(&self, handle: JobHandle) { let mut status_map = self.job_handle_map.write().await; - status_map.insert::(status); + status_map.insert::>(handle); + } + + #[inline] + pub async fn get_handle(&self) -> Option> { + let map = self.job_handle_map.read().await; + map.get::>().cloned() } } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs index 05cf9e7..9eee427 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/generate_missing_thumbnails.rs @@ -1,16 +1,22 @@ -use crate::jobs::Job; +use crate::jobs::{deserialize_state, serialize_state, Job}; use crate::status_utils::SimpleProgress; use async_trait::async_trait; use mediarepo_core::error::RepoResult; use mediarepo_core::thumbnailer::ThumbnailSize; +use mediarepo_database::entities::job_state::JobType; +use mediarepo_logic::dao::job::JobDao; use mediarepo_logic::dao::repo::Repo; use mediarepo_logic::dao::DaoProvider; +use serde::{Deserialize, Serialize}; +use std::mem; use std::sync::Arc; +use std::time::{Duration, SystemTime}; use tokio::sync::RwLock; #[derive(Clone, Default)] pub struct GenerateMissingThumbsJob { state: Arc>, + inner_state: Arc>, } #[async_trait] @@ -22,7 +28,20 @@ impl Job for GenerateMissingThumbsJob { self.state.clone() } + async fn load_state(&self, job_dao: JobDao) -> RepoResult<()> { + if let Some(state) = job_dao.state_for_job_type(JobType::GenerateThumbs).await? { + let mut inner_state = self.inner_state.write().await; + let state = deserialize_state::(state)?; + let _ = mem::replace(&mut *inner_state, state); + } + + Ok(()) + } + async fn run(&self, repo: Arc) -> RepoResult<()> { + if !self.needs_generation(&repo).await? { + return Ok(()); + } let file_dao = repo.file(); let all_files = file_dao.all().await?; { @@ -42,6 +61,49 @@ impl Job for GenerateMissingThumbsJob { } } + self.refresh_state(&repo).await?; + Ok(()) } + + async fn save_state(&self, job_dao: JobDao) -> RepoResult<()> { + let state = self.inner_state.read().await; + let state = serialize_state(JobType::GenerateThumbs, &*state)?; + job_dao.upsert_state(state).await + } +} + +impl GenerateMissingThumbsJob { + async fn needs_generation(&self, repo: &Repo) -> RepoResult { + let repo_counts = repo.get_counts().await?; + let file_count = repo_counts.file_count as u64; + let state = self.inner_state.read().await; + + Ok(state.file_count != file_count + || state.last_run.elapsed().unwrap() > Duration::from_secs(60 * 60)) + } + + async fn refresh_state(&self, repo: &Repo) -> RepoResult<()> { + let repo_counts = repo.get_counts().await?; + let mut state = self.inner_state.write().await; + state.last_run = SystemTime::now(); + state.file_count = repo_counts.file_count as u64; + + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +struct GenerateThumbsState { + file_count: u64, + last_run: SystemTime, +} + +impl Default for GenerateThumbsState { + fn default() -> Self { + Self { + file_count: 0, + last_run: SystemTime::now(), + } + } } diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/migrate_content_descriptors.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/migrate_content_descriptors.rs new file mode 100644 index 0000000..e320371 --- /dev/null +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/migrate_content_descriptors.rs @@ -0,0 +1,65 @@ +use crate::jobs::{deserialize_state, serialize_state, Job}; +use crate::status_utils::SimpleProgress; +use async_trait::async_trait; +use mediarepo_core::error::RepoResult; +use mediarepo_database::entities::job_state::JobType; +use mediarepo_logic::dao::job::JobDao; +use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dao::DaoProvider; +use serde::{Deserialize, Serialize}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use tokio::sync::RwLock; + +#[derive(Clone, Default)] +pub struct MigrateCDsJob { + progress: Arc>, + migrated: Arc, +} + +#[async_trait] +impl Job for MigrateCDsJob { + type JobStatus = SimpleProgress; + type Result = (); + + fn status(&self) -> Arc> { + self.progress.clone() + } + + async fn load_state(&self, job_dao: JobDao) -> RepoResult<()> { + if let Some(state) = job_dao.state_for_job_type(JobType::MigrateCDs).await? { + let state = deserialize_state::(state)?; + self.migrated.store(state.migrated, Ordering::SeqCst); + } + + Ok(()) + } + + async fn run(&self, repo: Arc) -> RepoResult { + if self.migrated.load(Ordering::SeqCst) { + return Ok(()); + } + let job_dao = repo.job(); + + job_dao.migrate_content_descriptors().await?; + self.migrated.store(true, Ordering::Relaxed); + { + let mut progress = self.progress.write().await; + progress.set_total(100); + } + Ok(()) + } + + async fn save_state(&self, job_dao: JobDao) -> RepoResult<()> { + if self.migrated.load(Ordering::Relaxed) { + let state = serialize_state(JobType::MigrateCDs, &MigrationStatus { migrated: true })?; + job_dao.upsert_state(state).await?; + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +struct MigrationStatus { + pub migrated: bool, +} diff --git a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs index f2d652f..846a39e 100644 --- a/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs +++ b/mediarepo-daemon/mediarepo-worker/src/jobs/mod.rs @@ -1,21 +1,28 @@ mod calculate_sizes; mod check_integrity; mod generate_missing_thumbnails; +mod migrate_content_descriptors; mod vacuum; pub use calculate_sizes::*; pub use check_integrity::*; pub use generate_missing_thumbnails::*; +pub use migrate_content_descriptors::*; use std::marker::PhantomData; use std::sync::Arc; pub use vacuum::*; use crate::handle::JobHandle; use async_trait::async_trait; -use mediarepo_core::error::RepoResult; +use mediarepo_core::bincode; +use mediarepo_core::error::{RepoError, RepoResult}; use mediarepo_core::trait_bound_typemap::TypeMapKey; +use mediarepo_database::entities::job_state::JobType; use mediarepo_logic::dao::job::JobDao; use mediarepo_logic::dao::repo::Repo; +use mediarepo_logic::dto::{JobStateDto, UpsertJobStateDto}; +use serde::de::DeserializeOwned; +use serde::Serialize; use tokio::sync::RwLock; type EmptyStatus = Arc>; @@ -27,9 +34,12 @@ pub trait Job: Clone + Send + Sync { fn status(&self) -> Arc>; + async fn load_state(&self, _job_dao: JobDao) -> RepoResult<()> { + Ok(()) + } + async fn run(&self, repo: Arc) -> RepoResult; - #[tracing::instrument(level = "debug", skip_all)] async fn save_state(&self, _job_dao: JobDao) -> RepoResult<()> { Ok(()) } @@ -43,3 +53,19 @@ where { type Value = JobHandle; } + +pub fn deserialize_state(dto: JobStateDto) -> RepoResult { + bincode::deserialize(dto.value()).map_err(RepoError::from) +} + +pub fn serialize_state( + job_type: JobType, + state: &T, +) -> RepoResult { + let dto = UpsertJobStateDto { + value: bincode::serialize(state)?, + job_type, + }; + + Ok(dto) +} diff --git a/mediarepo-daemon/mediarepo-worker/src/lib.rs b/mediarepo-daemon/mediarepo-worker/src/lib.rs index 9dd35d8..1b696f5 100644 --- a/mediarepo-daemon/mediarepo-worker/src/lib.rs +++ b/mediarepo-daemon/mediarepo-worker/src/lib.rs @@ -1,5 +1,5 @@ use crate::job_dispatcher::JobDispatcher; -use crate::jobs::{CheckIntegrityJob, VacuumJob}; +use crate::jobs::{CheckIntegrityJob, MigrateCDsJob, VacuumJob}; use mediarepo_core::error::RepoError; use mediarepo_core::tokio_graceful_shutdown::Toplevel; use mediarepo_logic::dao::repo::Repo; @@ -27,6 +27,7 @@ pub async fn start(top_level: Toplevel, repo: Repo) -> (Toplevel, JobDispatcher) Duration::from_secs(60 * 60 * 24), ) .await; + dispatcher.dispatch(MigrateCDsJob::default()).await; Ok(()) });