Merge pull request #22 from Trivernis/develop

Version 1.0.1
main v1.0.1
Julius Riegel 2 years ago committed by GitHub
commit 16cb977e45
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,42 @@
ARG BASE_IMAGE=docker.io/alpine:latest
FROM ${BASE_IMAGE} AS base
RUN apk update
RUN apk add --no-cache \
build-base \
openssl3-dev \
gtk+3.0-dev \
libappindicator-dev \
patchelf \
librsvg-dev \
curl \
wget \
clang \
nodejs \
npm \
libsoup-dev \
webkit2gtk-dev \
file \
python3 \
bash \
protoc
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
RUN rm -rf /var/lib/{cache,log}/ /var/cache
FROM base AS sources
WORKDIR /usr/src
COPY mediarepo-api ./mediarepo-api
COPY mediarepo-daemon ./mediarepo-daemon
COPY mediarepo-ui ./mediarepo-ui
COPY scripts ./scripts
RUN python3 scripts/clean.py
RUN python3 scripts/check.py --install
FROM sources AS build_daemon
WORKDIR /usr/src
RUN python3 scripts/build.py daemon --verbose
FROM sources AS build_ui
WORKDIR /usr/src
RUN python3 scripts/build.py ui --verbose --bundles deb

@ -1,37 +0,0 @@
ARG DEBIAN_RELEASE=bullseye
FROM bitnami/minideb:${DEBIAN_RELEASE} AS builder
WORKDIR /usr/src
COPY mediarepo-api ./mediarepo-api
COPY mediarepo-daemon ./mediarepo-daemon
COPY mediarepo-ui ./mediarepo-ui
COPY scripts ./scripts
RUN apt-get update
RUN apt-get install -y \
build-essential \
libssl-dev \
libgtk-3-dev \
libappindicator3-0.1-cil-dev \
patchelf \
librsvg2-dev \
curl \
wget \
pkg-config \
clang \
nodejs \
npm \
libsoup2.4-dev \
libwebkit2gtk-4.0-dev \
file \
python
RUN apt remove cmdtest -y
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
RUN python3 scripts/clean.py
RUN python3 scripts/check.py --install
RUN python3 scripts/build.py all --verbose

@ -99,7 +99,7 @@ After building the `out` directory contains all the built binaries and bundles.
### Test Builds
For test builds the `Dockerfile` in this repository can be used. This way no build dependencies need to be installed on the system. The dockerfile doesn't provide any artifacts and can only be used for validation.
For test builds the `Containerfile` in this repository can be used. This way no build dependencies need to be installed on the system. The Containerfile doesn't provide any artifacts and can only be used for validation.
## Usage and Further Information

@ -1,28 +1,28 @@
[package]
name = "mediarepo-api"
version = "0.31.0"
version = "0.32.0"
edition = "2018"
license = "gpl-3"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tracing = "0.1.30"
tracing = "0.1.32"
thiserror = "1.0.30"
async-trait = { version = "0.1.52", optional = true }
async-trait = { version = "0.1.53", optional = true }
parking_lot = { version = "0.12.0", optional = true }
serde_json = { version = "1.0.78", optional = true }
serde_json = { version = "1.0.79", optional = true }
directories = { version = "4.0.1", optional = true }
mime_guess = { version = "2.0.3", optional = true }
mime_guess = { version = "2.0.4", optional = true }
serde_piecewise_default = "0.2.0"
futures = { version = "0.3.21", optional = true }
url = { version = "2.2.2", optional = true }
pathsearch = { version = "0.2.0", optional = true }
[dependencies.bromine]
version = "0.18.1"
version = "0.20.1"
optional = true
features = ["serialize_bincode"]
features = ["serialize_bincode", "encryption_layer"]
[dependencies.serde]
version = "1.0.136"
@ -33,13 +33,13 @@ version = "0.4.19"
features = ["serde"]
[dependencies.tauri]
version = "1.0.0-beta.8"
optional=true
version = "1.0.0-rc.4"
optional = true
default-features = false
features = []
[dependencies.tokio]
version = "1.16.1"
version = "1.17.0"
optional = true
features = ["sync", "fs", "net", "io-util", "io-std", "time", "rt", "process"]

@ -1,6 +1,8 @@
use async_trait::async_trait;
use bromine::error::Result;
use bromine::prelude::encrypted::{EncryptedStream, EncryptionOptions};
use bromine::prelude::IPCResult;
use bromine::protocol::encrypted::EncryptedListener;
use bromine::protocol::*;
use std::io::Error;
use std::net::ToSocketAddrs;
@ -9,12 +11,11 @@ use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::{TcpListener, TcpStream};
#[derive(Debug)]
pub enum ApiProtocolListener {
#[cfg(unix)]
UnixSocket(tokio::net::UnixListener),
Tcp(TcpListener),
Tcp(EncryptedListener<TcpListener>),
}
unsafe impl Send for ApiProtocolListener {}
@ -25,12 +26,14 @@ impl AsyncStreamProtocolListener for ApiProtocolListener {
type AddressType = String;
type RemoteAddressType = String;
type Stream = ApiProtocolStream;
type ListenerOptions = ();
#[tracing::instrument]
async fn protocol_bind(address: Self::AddressType) -> Result<Self> {
async fn protocol_bind(address: Self::AddressType, _: Self::ListenerOptions) -> Result<Self> {
if let Some(addr) = address.to_socket_addrs().ok().and_then(|mut a| a.next()) {
let listener = TcpListener::bind(addr).await?;
tracing::info!("Connecting via TCP");
let listener =
EncryptedListener::protocol_bind(addr, EncryptionOptions::default()).await?;
tracing::info!("Connecting via encrypted TCP");
Ok(Self::Tcp(listener))
} else {
@ -67,19 +70,18 @@ impl AsyncStreamProtocolListener for ApiProtocolListener {
))
}
ApiProtocolListener::Tcp(listener) => {
let (stream, addr) = listener.accept().await?;
let (stream, addr) = listener.protocol_accept().await?;
Ok((ApiProtocolStream::Tcp(stream), addr.to_string()))
}
}
}
}
#[derive(Debug)]
pub enum ApiProtocolStream {
#[cfg(unix)]
UnixSocket(tokio::net::UnixStream),
Tcp(TcpStream),
Tcp(EncryptedStream<TcpStream>),
}
unsafe impl Send for ApiProtocolStream {}
@ -97,7 +99,7 @@ impl AsyncProtocolStreamSplit for ApiProtocolStream {
(Box::new(read), Box::new(write))
}
ApiProtocolStream::Tcp(stream) => {
let (read, write) = stream.into_split();
let (read, write) = stream.protocol_into_split();
(Box::new(read), Box::new(write))
}
}
@ -107,10 +109,15 @@ impl AsyncProtocolStreamSplit for ApiProtocolStream {
#[async_trait]
impl AsyncProtocolStream for ApiProtocolStream {
type AddressType = String;
type StreamOptions = ();
async fn protocol_connect(address: Self::AddressType) -> IPCResult<Self> {
async fn protocol_connect(
address: Self::AddressType,
_: Self::StreamOptions,
) -> IPCResult<Self> {
if let Some(addr) = address.to_socket_addrs().ok().and_then(|mut a| a.next()) {
let stream = TcpStream::connect(addr).await?;
let stream =
EncryptedStream::protocol_connect(addr, EncryptionOptions::default()).await?;
Ok(Self::Tcp(stream))
} else {
#[cfg(unix)]

@ -2,6 +2,7 @@ use crate::daemon_management::find_daemon_executable;
use crate::tauri_plugin::commands::AppAccess;
use crate::tauri_plugin::error::PluginResult;
use crate::tauri_plugin::settings::save_settings;
use bromine::prelude::encrypted::EncryptedListener;
use bromine::prelude::{IPCError, IPCResult};
use bromine::IPCBuilder;
use std::io::ErrorKind;
@ -53,7 +54,7 @@ pub async fn check_daemon_running(address: String) -> PluginResult<bool> {
async fn try_connect_daemon(address: String) -> IPCResult<()> {
let address = get_socket_address(address)?;
let ctx = IPCBuilder::<TcpListener>::new()
let ctx = IPCBuilder::<EncryptedListener<TcpListener>>::new()
.address(address)
.build_client()
.await?;

File diff suppressed because it is too large Load Diff

@ -1,10 +1,10 @@
[workspace]
members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "."]
default-members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "."]
members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "mediarepo-worker", "."]
default-members = ["mediarepo-core", "mediarepo-database", "mediarepo-logic", "mediarepo-socket", "mediarepo-worker", "."]
[package]
name = "mediarepo-daemon"
version = "1.0.0"
version = "1.0.1"
edition = "2018"
license = "gpl-3"
repository = "https://github.com/Trivernis/mediarepo-daemon"
@ -16,17 +16,20 @@ name = "mediarepo-daemon"
path = "src/main.rs"
[dependencies]
tracing = "0.1.31"
tracing = "0.1.32"
toml = "0.5.8"
structopt = "0.3.26"
glob = "0.3.0"
tracing-flame = "0.2.0"
tracing-appender = "0.2.0"
tracing-appender = "0.2.2"
tracing-log = "0.1.2"
rolling-file = "0.1.0"
num-integer = "0.1.44"
console-subscriber = "0.1.3"
log = "0.4.14"
log = "0.4.16"
opentelemetry = { version = "0.17.0", features = ["rt-tokio"] }
opentelemetry-jaeger = { version = "0.16.0", features = ["rt-tokio"] }
tracing-opentelemetry = "0.17.2"
[dependencies.mediarepo-core]
path = "./mediarepo-core"
@ -37,10 +40,13 @@ path = "mediarepo-logic"
[dependencies.mediarepo-socket]
path = "./mediarepo-socket"
[dependencies.mediarepo-worker]
path = "./mediarepo-worker"
[dependencies.tokio]
version = "1.17.0"
features = ["macros", "rt-multi-thread", "io-std", "io-util"]
[dependencies.tracing-subscriber]
version= "0.3.9"
version = "0.3.9"
features = ["env-filter", "ansi", "json"]

@ -13,17 +13,19 @@ multibase = "0.9.1"
base64 = "0.13.0"
toml = "0.5.8"
serde = "1.0.136"
typemap_rev = "0.1.5"
futures = "0.3.21"
itertools = "0.10.3"
glob = "0.3.0"
tracing = "0.1.31"
tracing = "0.1.32"
data-encoding = "2.3.2"
tokio-graceful-shutdown = "0.4.3"
tokio-graceful-shutdown = "0.5.0"
thumbnailer = "0.4.0"
bincode = "1.3.3"
tracing-subscriber = "0.3.9"
trait-bound-typemap = "0.3.3"
[dependencies.sea-orm]
version = "0.6.0"
version = "0.7.1"
default-features = false
[dependencies.sqlx]
@ -41,4 +43,4 @@ features = ["toml"]
[dependencies.mediarepo-api]
path = "../../mediarepo-api"
features = ["bromine"]
features = ["bromine"]

@ -43,6 +43,9 @@ pub enum RepoError {
#[error("the database file is corrupted {0}")]
Corrupted(String),
#[error("bincode de-/serialization failed {0}")]
Bincode(#[from] bincode::Error),
}
#[derive(Error, Debug)]

@ -1,14 +1,17 @@
pub use bincode;
pub use futures;
pub use itertools;
pub use mediarepo_api;
pub use mediarepo_api::bromine;
pub use thumbnailer;
pub use tokio_graceful_shutdown;
pub use trait_bound_typemap;
pub mod content_descriptor;
pub mod context;
pub mod error;
pub mod fs;
pub mod settings;
pub mod tracing_layer_list;
pub mod type_keys;
pub mod utils;

@ -1,11 +1,15 @@
use serde::{Deserialize, Serialize};
use tracing::Level;
const DEFAULT_TELEMETRY_ENDPOINT: &str = "telemetry.trivernis.net:6831";
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct LoggingSettings {
pub level: LogLevel,
pub trace_sql: bool,
pub trace_api_calls: bool,
pub telemetry: bool,
pub telemetry_endpoint: String,
}
impl Default for LoggingSettings {
@ -14,6 +18,8 @@ impl Default for LoggingSettings {
level: LogLevel::Info,
trace_sql: false,
trace_api_calls: false,
telemetry: false,
telemetry_endpoint: String::from(DEFAULT_TELEMETRY_ENDPOINT),
}
}
}

@ -0,0 +1,118 @@
use std::slice::{Iter, IterMut};
use tracing::level_filters::LevelFilter;
use tracing::span::{Attributes, Record};
use tracing::subscriber::Interest;
use tracing::{Event, Id, Metadata, Subscriber};
use tracing_subscriber::Layer;
pub struct DynLayerList<S>(Vec<Box<dyn Layer<S> + Send + Sync + 'static>>);
impl<S> DynLayerList<S> {
pub fn new() -> Self {
Self(Vec::new())
}
pub fn iter(&self) -> Iter<'_, Box<dyn Layer<S> + Send + Sync>> {
self.0.iter()
}
pub fn iter_mut(&mut self) -> IterMut<'_, Box<dyn Layer<S> + Send + Sync>> {
self.0.iter_mut()
}
}
impl<S> DynLayerList<S>
where
S: Subscriber,
{
pub fn add<L: Layer<S> + Send + Sync>(&mut self, layer: L) {
self.0.push(Box::new(layer));
}
}
impl<S> Layer<S> for DynLayerList<S>
where
S: Subscriber,
{
fn on_layer(&mut self, subscriber: &mut S) {
self.iter_mut().for_each(|l| l.on_layer(subscriber));
}
fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest {
// Return highest level of interest.
let mut interest = Interest::never();
for layer in &self.0 {
let new_interest = layer.register_callsite(metadata);
if (interest.is_sometimes() && new_interest.is_always())
|| (interest.is_never() && !new_interest.is_never())
{
interest = new_interest;
}
}
interest
}
fn enabled(
&self,
metadata: &Metadata<'_>,
ctx: tracing_subscriber::layer::Context<'_, S>,
) -> bool {
self.iter().any(|l| l.enabled(metadata, ctx.clone()))
}
fn on_new_span(
&self,
attrs: &Attributes<'_>,
id: &Id,
ctx: tracing_subscriber::layer::Context<'_, S>,
) {
self.iter()
.for_each(|l| l.on_new_span(attrs, id, ctx.clone()));
}
fn max_level_hint(&self) -> Option<LevelFilter> {
self.iter().filter_map(|l| l.max_level_hint()).max()
}
fn on_record(
&self,
span: &Id,
values: &Record<'_>,
ctx: tracing_subscriber::layer::Context<'_, S>,
) {
self.iter()
.for_each(|l| l.on_record(span, values, ctx.clone()));
}
fn on_follows_from(
&self,
span: &Id,
follows: &Id,
ctx: tracing_subscriber::layer::Context<'_, S>,
) {
self.iter()
.for_each(|l| l.on_follows_from(span, follows, ctx.clone()));
}
fn on_event(&self, event: &Event<'_>, ctx: tracing_subscriber::layer::Context<'_, S>) {
self.iter().for_each(|l| l.on_event(event, ctx.clone()));
}
fn on_enter(&self, id: &Id, ctx: tracing_subscriber::layer::Context<'_, S>) {
self.iter().for_each(|l| l.on_enter(id, ctx.clone()));
}
fn on_exit(&self, id: &Id, ctx: tracing_subscriber::layer::Context<'_, S>) {
self.iter().for_each(|l| l.on_exit(id, ctx.clone()));
}
fn on_close(&self, id: Id, ctx: tracing_subscriber::layer::Context<'_, S>) {
self.iter()
.for_each(|l| l.on_close(id.clone(), ctx.clone()));
}
fn on_id_change(&self, old: &Id, new: &Id, ctx: tracing_subscriber::layer::Context<'_, S>) {
self.iter()
.for_each(|l| l.on_id_change(old, new, ctx.clone()));
}
}

@ -3,7 +3,7 @@ use std::path::PathBuf;
use mediarepo_api::types::repo::SizeType;
use tokio_graceful_shutdown::SubsystemHandle;
use typemap_rev::TypeMapKey;
use trait_bound_typemap::TypeMapKey;
use crate::settings::Settings;

@ -8,7 +8,7 @@ workspace = ".."
[dependencies]
chrono = "0.4.19"
tracing = "0.1.31"
tracing = "0.1.32"
[dependencies.mediarepo-core]
path = "../mediarepo-core"
@ -18,6 +18,6 @@ version = "0.5.11"
features = ["migrate"]
[dependencies.sea-orm]
version = "0.6.0"
features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros", "debug-print"]
version = "0.7.1"
features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros"]
default-features = false

@ -0,0 +1,5 @@
CREATE TABLE job_states (
job_type INTEGER NOT NULL,
value BLOB,
PRIMARY KEY (job_type)
);

@ -0,0 +1,45 @@
use sea_orm::prelude::*;
use sea_orm::TryFromU64;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel)]
#[sea_orm(table_name = "job_states")]
pub struct Model {
#[sea_orm(primary_key)]
pub job_type: JobType,
pub value: Vec<u8>,
}
#[derive(Clone, Copy, Debug, PartialEq, EnumIter, DeriveActiveEnum)]
#[sea_orm(rs_type = "u32", db_type = "Integer")]
pub enum JobType {
#[sea_orm(num_value = 10)]
MigrateCDs,
#[sea_orm(num_value = 20)]
CalculateSizes,
#[sea_orm(num_value = 30)]
GenerateThumbs,
#[sea_orm(num_value = 40)]
CheckIntegrity,
#[sea_orm(num_value = 50)]
Vacuum,
}
impl TryFromU64 for JobType {
fn try_from_u64(n: u64) -> Result<Self, DbErr> {
let value = match n {
10 => Self::MigrateCDs,
20 => Self::CalculateSizes,
30 => Self::GenerateThumbs,
40 => Self::CheckIntegrity,
50 => Self::Vacuum,
_ => return Err(DbErr::Custom(String::from("Invalid job type"))),
};
Ok(value)
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

@ -3,6 +3,7 @@ pub mod content_descriptor_source;
pub mod content_descriptor_tag;
pub mod file;
pub mod file_metadata;
pub mod job_state;
pub mod namespace;
pub mod sort_key;
pub mod sorting_preset;

@ -13,7 +13,8 @@ pub async fn get_database<S: AsRef<str>>(uri: S) -> RepoDatabaseResult<DatabaseC
migrate(uri.as_ref()).await?;
let mut opt = ConnectOptions::new(uri.as_ref().to_string());
opt.connect_timeout(Duration::from_secs(10))
.idle_timeout(Duration::from_secs(10));
.idle_timeout(Duration::from_secs(10))
.sqlx_logging(false);
let conn = Database::connect(opt).await?;

@ -8,12 +8,11 @@ workspace = ".."
[dependencies]
chrono = "0.4.19"
typemap_rev = "0.1.5"
serde = "1.0.136"
mime_guess = "2.0.4"
mime = "0.3.16"
tracing = "0.1.31"
async-trait = "0.1.52"
tracing = "0.1.32"
async-trait = "0.1.53"
[dependencies.mediarepo-core]
path = "../mediarepo-core"
@ -22,7 +21,7 @@ path = "../mediarepo-core"
path = "../mediarepo-database"
[dependencies.sea-orm]
version = "0.6.0"
version = "0.7.1"
features = ["runtime-tokio-native-tls", "macros"]
default-features = false

@ -3,5 +3,6 @@ use crate::dao_provider;
pub mod generate_missing_thumbnails;
pub mod migrate_content_descriptors;
pub mod sqlite_operations;
pub mod state;
dao_provider!(JobDao);

@ -0,0 +1,58 @@
use crate::dao::job::JobDao;
use crate::dto::{JobStateDto, UpsertJobStateDto};
use mediarepo_core::error::RepoResult;
use mediarepo_database::entities::job_state;
use mediarepo_database::entities::job_state::JobType;
use sea_orm::prelude::*;
use sea_orm::ActiveValue::Set;
use sea_orm::{Condition, TransactionTrait};
impl JobDao {
/// Returns all job states for a given job id
pub async fn state_for_job_type(&self, job_type: JobType) -> RepoResult<Option<JobStateDto>> {
let state = job_state::Entity::find()
.filter(job_state::Column::JobType.eq(job_type))
.one(&self.ctx.db)
.await?
.map(JobStateDto::new);
Ok(state)
}
pub async fn upsert_state(&self, state: UpsertJobStateDto) -> RepoResult<()> {
self.upsert_multiple_states(vec![state]).await
}
pub async fn upsert_multiple_states(&self, states: Vec<UpsertJobStateDto>) -> RepoResult<()> {
let trx = self.ctx.db.begin().await?;
job_state::Entity::delete_many()
.filter(build_state_filters(&states))
.exec(&trx)
.await?;
job_state::Entity::insert_many(build_active_state_models(states))
.exec(&trx)
.await?;
trx.commit().await?;
Ok(())
}
}
fn build_state_filters(states: &Vec<UpsertJobStateDto>) -> Condition {
states
.iter()
.map(|s| Condition::all().add(job_state::Column::JobType.eq(s.job_type)))
.fold(Condition::any(), |acc, cond| acc.add(cond))
}
fn build_active_state_models(states: Vec<UpsertJobStateDto>) -> Vec<job_state::ActiveModel> {
states
.into_iter()
.map(|s| job_state::ActiveModel {
job_type: Set(s.job_type),
value: Set(s.value),
})
.collect()
}

@ -0,0 +1,31 @@
use mediarepo_database::entities::job_state;
use mediarepo_database::entities::job_state::JobType;
#[derive(Clone, Debug)]
pub struct JobStateDto {
model: job_state::Model,
}
impl JobStateDto {
pub(crate) fn new(model: job_state::Model) -> Self {
Self { model }
}
pub fn job_type(&self) -> JobType {
self.model.job_type
}
pub fn value(&self) -> &[u8] {
&self.model.value
}
pub fn into_value(self) -> Vec<u8> {
self.model.value
}
}
#[derive(Clone, Debug)]
pub struct UpsertJobStateDto {
pub job_type: JobType,
pub value: Vec<u8>,
}

@ -1,5 +1,6 @@
pub use file::*;
pub use file_metadata::*;
pub use job_state::*;
pub use namespace::*;
pub use sorting_preset::*;
pub use tag::*;
@ -7,6 +8,7 @@ pub use thumbnail::*;
mod file;
mod file_metadata;
mod job_state;
mod namespace;
mod sorting_preset;
mod tag;

@ -1,7 +1,6 @@
use mediarepo_core::trait_bound_typemap::TypeMapKey;
use std::sync::Arc;
use typemap_rev::TypeMapKey;
use crate::dao::repo::Repo;
pub struct RepoKey;

@ -8,7 +8,7 @@ workspace = ".."
[dependencies]
serde = "1.0.136"
tracing = "0.1.31"
tracing = "0.1.32"
compare = "0.1.0"
port_check = "0.1.5"
rayon = "1.5.1"
@ -22,6 +22,9 @@ path = "../mediarepo-database"
[dependencies.mediarepo-logic]
path = "../mediarepo-logic"
[dependencies.mediarepo-worker]
path = "../mediarepo-worker"
[dependencies.tokio]
version = "1.17.0"
features = ["net"]

@ -1,29 +1,26 @@
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::net::TcpListener;
use tokio::task::JoinHandle;
use crate::encrypted::EncryptedListener;
use mediarepo_core::bromine::prelude::*;
use mediarepo_core::error::{RepoError, RepoResult};
use mediarepo_core::mediarepo_api::types::misc::InfoResponse;
use mediarepo_core::settings::{PortSetting, Settings};
use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle;
use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey, SubsystemKey};
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::type_keys::RepoKey;
use mediarepo_core::trait_bound_typemap::{SendSyncTypeMap, TypeMap};
use mediarepo_core::type_keys::{SizeMetadataKey, SubsystemKey};
mod from_model;
mod namespaces;
mod utils;
#[tracing::instrument(skip(subsystem, settings, repo))]
#[tracing::instrument(skip_all)]
pub fn start_tcp_server(
subsystem: SubsystemHandle,
repo_path: PathBuf,
settings: Settings,
repo: Repo,
shared_data: SendSyncTypeMap,
) -> RepoResult<(String, JoinHandle<()>)> {
let port = match &settings.server.tcp.port {
PortSetting::Fixed(p) => {
@ -43,11 +40,9 @@ pub fn start_tcp_server(
let join_handle = tokio::task::Builder::new()
.name("mediarepo_tcp::listen")
.spawn(async move {
get_builder::<TcpListener>(address)
get_builder::<EncryptedListener<TcpListener>>(address)
.insert::<SubsystemKey>(subsystem)
.insert::<RepoKey>(Arc::new(repo))
.insert::<SettingsKey>(settings)
.insert::<RepoPathKey>(repo_path)
.insert_all(shared_data)
.insert::<SizeMetadataKey>(Default::default())
.build_server()
.await
@ -58,13 +53,11 @@ pub fn start_tcp_server(
}
#[cfg(unix)]
#[tracing::instrument(skip(subsystem, settings, repo))]
#[tracing::instrument(skip_all)]
pub fn create_unix_socket(
subsystem: SubsystemHandle,
path: std::path::PathBuf,
repo_path: PathBuf,
settings: Settings,
repo: Repo,
shared_data: SendSyncTypeMap,
) -> RepoResult<JoinHandle<()>> {
use std::fs;
use tokio::net::UnixListener;
@ -77,9 +70,7 @@ pub fn create_unix_socket(
.spawn(async move {
get_builder::<UnixListener>(path)
.insert::<SubsystemKey>(subsystem)
.insert::<RepoKey>(Arc::new(repo))
.insert::<SettingsKey>(settings)
.insert::<RepoPathKey>(repo_path)
.insert_all(shared_data)
.insert::<SizeMetadataKey>(Default::default())
.build_server()
.await

@ -1,11 +1,15 @@
use crate::TypeMap;
use mediarepo_core::bromine::prelude::*;
use mediarepo_core::error::RepoResult;
use mediarepo_core::mediarepo_api::types::jobs::{JobType, RunJobRequest};
use mediarepo_core::mediarepo_api::types::repo::SizeType;
use mediarepo_core::type_keys::SizeMetadataKey;
use mediarepo_logic::dao::DaoProvider;
use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey};
use mediarepo_worker::handle::JobState;
use mediarepo_worker::job_dispatcher::JobDispatcher;
use mediarepo_worker::jobs::{
CalculateSizesJob, CheckIntegrityJob, GenerateMissingThumbsJob, Job, MigrateCDsJob, VacuumJob,
};
use crate::utils::{calculate_size, get_repo_from_context};
use crate::utils::get_job_dispatcher_from_context;
pub struct JobsNamespace;
@ -25,7 +29,7 @@ impl JobsNamespace {
#[tracing::instrument(skip_all)]
pub async fn run_job(ctx: &Context, event: Event) -> IPCResult<Response> {
let run_request = event.payload::<RunJobRequest>()?;
let job_dao = get_repo_from_context(ctx).await.job();
let dispatcher = get_job_dispatcher_from_context(ctx).await;
if !run_request.sync {
// early response to indicate that the job will be run
@ -33,26 +37,69 @@ impl JobsNamespace {
}
match run_request.job_type {
JobType::MigrateContentDescriptors => job_dao.migrate_content_descriptors().await?,
JobType::MigrateContentDescriptors => {
dispatch_job(&dispatcher, MigrateCDsJob::default(), run_request.sync).await?
}
JobType::CalculateSizes => calculate_all_sizes(ctx).await?,
JobType::CheckIntegrity => job_dao.check_integrity().await?,
JobType::Vacuum => job_dao.vacuum().await?,
JobType::GenerateThumbnails => job_dao.generate_missing_thumbnails().await?,
JobType::CheckIntegrity => {
dispatch_job(&dispatcher, CheckIntegrityJob::default(), run_request.sync).await?
}
JobType::Vacuum => {
dispatch_job(&dispatcher, VacuumJob::default(), run_request.sync).await?
}
JobType::GenerateThumbnails => {
dispatch_job(
&dispatcher,
GenerateMissingThumbsJob::default(),
run_request.sync,
)
.await?
}
}
Ok(Response::empty())
}
}
async fn dispatch_job<J: 'static + Job>(
dispatcher: &JobDispatcher,
job: J,
sync: bool,
) -> RepoResult<()> {
let mut handle = if let Some(handle) = dispatcher.get_handle::<J>().await {
if handle.state().await == JobState::Running {
handle
} else {
dispatcher.dispatch(job).await
}
} else {
dispatcher.dispatch(job).await
};
if sync {
if let Some(result) = handle.take_result().await {
result?;
}
}
Ok(())
}
async fn calculate_all_sizes(ctx: &Context) -> RepoResult<()> {
let size_types = vec![
SizeType::Total,
SizeType::FileFolder,
SizeType::ThumbFolder,
SizeType::DatabaseFile,
];
for size_type in size_types {
let size = calculate_size(&size_type, ctx).await?;
let (repo_path, settings) = {
let data = ctx.data.read().await;
(
data.get::<RepoPathKey>().unwrap().clone(),
data.get::<SettingsKey>().unwrap().clone(),
)
};
let job = CalculateSizesJob::new(repo_path, settings);
let dispatcher = get_job_dispatcher_from_context(ctx).await;
let handle = dispatcher.dispatch(job).await;
let mut rx = {
let status = handle.status().read().await;
status.sizes_channel.subscribe()
};
while let Ok((size_type, size)) = rx.recv().await {
let mut data = ctx.data.write().await;
let size_map = data.get_mut::<SizeMetadataKey>().unwrap();
size_map.insert(size_type, size);

@ -2,13 +2,14 @@ use std::path::PathBuf;
use tokio::fs;
use crate::TypeMap;
use mediarepo_core::bromine::prelude::*;
use mediarepo_core::mediarepo_api::types::repo::{
FrontendState, RepositoryMetadata, SizeMetadata, SizeType,
};
use mediarepo_core::type_keys::{RepoPathKey, SettingsKey, SizeMetadataKey};
use crate::utils::{calculate_size, get_repo_from_context};
use crate::utils::get_repo_from_context;
pub struct RepoNamespace;
@ -56,7 +57,7 @@ impl RepoNamespace {
let size = if let Some(size) = size_cache.get(&size_type) {
*size
} else {
calculate_size(&size_type, ctx).await?
0
};
ctx.response(SizeMetadata { size, size_type })

@ -1,18 +1,15 @@
use std::sync::Arc;
use tokio::fs;
use crate::TypeMap;
use mediarepo_core::bromine::ipc::context::Context;
use mediarepo_core::content_descriptor::decode_content_descriptor;
use mediarepo_core::error::{RepoError, RepoResult};
use mediarepo_core::mediarepo_api::types::identifier::FileIdentifier;
use mediarepo_core::mediarepo_api::types::repo::SizeType;
use mediarepo_core::type_keys::{RepoPathKey, SettingsKey};
use mediarepo_core::utils::get_folder_size;
use mediarepo_logic::dao::DaoProvider;
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::dao::DaoProvider;
use mediarepo_logic::dto::FileDto;
use mediarepo_logic::type_keys::RepoKey;
use mediarepo_worker::job_dispatcher::{DispatcherKey, JobDispatcher};
pub async fn get_repo_from_context(ctx: &Context) -> Arc<Repo> {
let data = ctx.data.read().await;
@ -20,6 +17,11 @@ pub async fn get_repo_from_context(ctx: &Context) -> Arc<Repo> {
Arc::clone(repo)
}
pub async fn get_job_dispatcher_from_context(ctx: &Context) -> JobDispatcher {
let data = ctx.data.read().await;
data.get::<DispatcherKey>().unwrap().clone()
}
pub async fn file_by_identifier(identifier: FileIdentifier, repo: &Repo) -> RepoResult<FileDto> {
let file = match identifier {
FileIdentifier::ID(id) => repo.file().by_id(id).await,
@ -41,27 +43,3 @@ pub async fn cd_by_identifier(identifier: FileIdentifier, repo: &Repo) -> RepoRe
FileIdentifier::CD(cd) => decode_content_descriptor(cd),
}
}
pub async fn calculate_size(size_type: &SizeType, ctx: &Context) -> RepoResult<u64> {
let repo = get_repo_from_context(ctx).await;
let (repo_path, settings) = {
let data = ctx.data.read().await;
(
data.get::<RepoPathKey>().unwrap().clone(),
data.get::<SettingsKey>().unwrap().clone(),
)
};
let size = match &size_type {
SizeType::Total => get_folder_size(repo_path).await?,
SizeType::FileFolder => repo.get_main_store_size().await?,
SizeType::ThumbFolder => repo.get_thumb_store_size().await?,
SizeType::DatabaseFile => {
let db_path = settings.paths.db_file_path(&repo_path);
let database_metadata = fs::metadata(db_path).await?;
database_metadata.len()
}
};
Ok(size)
}

@ -0,0 +1,31 @@
[package]
name = "mediarepo-worker"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-trait = "0.1.53"
tracing = "0.1.32"
[dependencies.mediarepo-core]
path = "../mediarepo-core"
[dependencies.mediarepo-logic]
path = "../mediarepo-logic"
[dependencies.mediarepo-database]
path = "../mediarepo-database"
[dependencies.tokio]
version = "1.17.0"
features = ["macros"]
[dependencies.chrono]
version = "0.4.19"
features = ["serde"]
[dependencies.serde]
version = "1.0.136"
features = ["derive"]

@ -0,0 +1,101 @@
use mediarepo_core::error::{RepoError, RepoResult};
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use tokio::sync::broadcast::{Receiver, Sender};
use tokio::sync::RwLock;
pub struct JobHandle<T: Send + Sync, R: Send + Sync> {
status: Arc<RwLock<T>>,
state: Arc<RwLock<JobState>>,
result_receiver: CloneableReceiver<Arc<RwLock<Option<RepoResult<R>>>>>,
}
impl<T: Send + Sync, R: Send + Sync> Clone for JobHandle<T, R> {
fn clone(&self) -> Self {
Self {
status: self.status.clone(),
state: self.state.clone(),
result_receiver: self.result_receiver.clone(),
}
}
}
impl<T: Send + Sync, R: Send + Sync> JobHandle<T, R> {
pub fn new(
status: Arc<RwLock<T>>,
state: Arc<RwLock<JobState>>,
result_receiver: CloneableReceiver<Arc<RwLock<Option<RepoResult<R>>>>>,
) -> Self {
Self {
status,
state,
result_receiver,
}
}
pub async fn state(&self) -> JobState {
*self.state.read().await
}
pub fn status(&self) -> &Arc<RwLock<T>> {
&self.status
}
pub async fn result(&mut self) -> Arc<RwLock<Option<RepoResult<R>>>> {
match self.result_receiver.recv().await {
Ok(v) => v,
Err(e) => Arc::new(RwLock::new(Some(Err(RepoError::from(&*e.to_string()))))),
}
}
pub async fn take_result(&mut self) -> Option<RepoResult<R>> {
let shared_result = self.result().await;
let mut result = shared_result.write().await;
result.take()
}
}
#[derive(Clone, Copy, Debug, Ord, PartialOrd, Eq, PartialEq)]
pub enum JobState {
Queued,
Scheduled,
Running,
Finished,
}
pub struct CloneableReceiver<T: Clone> {
receiver: Receiver<T>,
sender: Sender<T>,
}
impl<T: Clone> CloneableReceiver<T> {
pub fn new(sender: Sender<T>) -> Self {
Self {
receiver: sender.subscribe(),
sender,
}
}
}
impl<T: Clone> Clone for CloneableReceiver<T> {
fn clone(&self) -> Self {
Self {
sender: self.sender.clone(),
receiver: self.sender.subscribe(),
}
}
}
impl<T: Clone> Deref for CloneableReceiver<T> {
type Target = Receiver<T>;
fn deref(&self) -> &Self::Target {
&self.receiver
}
}
impl<T: Clone> DerefMut for CloneableReceiver<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.receiver
}
}

@ -0,0 +1,139 @@
use crate::handle::{CloneableReceiver, JobHandle, JobState};
use crate::jobs::{Job, JobTypeKey};
use mediarepo_core::error::RepoError;
use mediarepo_core::tokio_graceful_shutdown::SubsystemHandle;
use mediarepo_core::trait_bound_typemap::{SendSyncTypeMap, TypeMap, TypeMapKey};
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::dao::DaoProvider;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::broadcast::channel;
use tokio::sync::RwLock;
use tokio::time::Instant;
#[derive(Clone)]
pub struct JobDispatcher {
subsystem: SubsystemHandle,
job_handle_map: Arc<RwLock<SendSyncTypeMap>>,
repo: Arc<Repo>,
}
impl JobDispatcher {
pub fn new(subsystem: SubsystemHandle, repo: Repo) -> Self {
Self {
job_handle_map: Arc::new(RwLock::new(SendSyncTypeMap::new())),
subsystem,
repo: Arc::new(repo),
}
}
pub async fn dispatch<T: 'static + Job>(&self, job: T) -> JobHandle<T::JobStatus, T::Result> {
self._dispatch(job, None).await
}
pub async fn dispatch_periodically<T: 'static + Job>(
&self,
job: T,
interval: Duration,
) -> JobHandle<T::JobStatus, T::Result> {
self._dispatch(job, Some(interval)).await
}
#[tracing::instrument(level = "debug", skip_all)]
async fn _dispatch<T: 'static + Job>(
&self,
job: T,
interval: Option<Duration>,
) -> JobHandle<T::JobStatus, T::Result> {
let status = job.status();
let state = Arc::new(RwLock::new(JobState::Queued));
let (sender, mut receiver) = channel(1);
self.subsystem
.start::<RepoError, _, _>("channel-consumer", move |subsystem| async move {
tokio::select! {
_ = receiver.recv() => (),
_ = subsystem.on_shutdown_requested() => (),
}
Ok(())
});
let receiver = CloneableReceiver::new(sender.clone());
let handle = JobHandle::new(status.clone(), state.clone(), receiver);
self.add_handle::<T>(handle.clone()).await;
let repo = self.repo.clone();
self.subsystem
.start::<RepoError, _, _>("worker-job", move |subsystem| async move {
loop {
let start = Instant::now();
let job_2 = job.clone();
{
let mut state = state.write().await;
*state = JobState::Running;
}
if let Err(e) = job.load_state(repo.job()).await {
tracing::error!("failed to load the jobs state: {}", e);
}
let result = tokio::select! {
_ = subsystem.on_shutdown_requested() => {
job_2.save_state(repo.job()).await
}
r = job.run(repo.clone()) => {
match r {
Err(e) => Err(e),
Ok(v) => {
let _ = sender.send(Arc::new(RwLock::new(Some(Ok(v)))));
job.save_state(repo.job()).await
}
}
}
};
if let Err(e) = result {
tracing::error!("job failed with error: {}", e);
let _ = sender.send(Arc::new(RwLock::new(Some(Err(e)))));
}
if let Some(interval) = interval {
{
let mut state = state.write().await;
*state = JobState::Scheduled;
}
let sleep_duration = interval - start.elapsed();
tokio::select! {
_ = tokio::time::sleep(sleep_duration) => {},
_ = subsystem.on_shutdown_requested() => {break}
}
} else {
let mut state = state.write().await;
*state = JobState::Finished;
break;
}
}
Ok(())
});
handle
}
#[inline]
async fn add_handle<T: 'static + Job>(&self, handle: JobHandle<T::JobStatus, T::Result>) {
let mut status_map = self.job_handle_map.write().await;
status_map.insert::<JobTypeKey<T>>(handle);
}
#[inline]
pub async fn get_handle<T: 'static + Job>(&self) -> Option<JobHandle<T::JobStatus, T::Result>> {
let map = self.job_handle_map.read().await;
map.get::<JobTypeKey<T>>().cloned()
}
}
pub struct DispatcherKey;
impl TypeMapKey for DispatcherKey {
type Value = JobDispatcher;
}
unsafe impl Send for JobDispatcher {}
unsafe impl Sync for JobDispatcher {}

@ -0,0 +1,91 @@
use crate::jobs::Job;
use crate::status_utils::SimpleProgress;
use async_trait::async_trait;
use mediarepo_core::error::{RepoError, RepoResult};
use mediarepo_core::mediarepo_api::types::repo::SizeType;
use mediarepo_core::settings::Settings;
use mediarepo_core::utils::get_folder_size;
use mediarepo_logic::dao::repo::Repo;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::fs;
use tokio::sync::broadcast::{self, Sender};
use tokio::sync::RwLock;
pub struct CalculateSizesState {
pub progress: SimpleProgress,
pub sizes_channel: Sender<(SizeType, u64)>,
}
#[derive(Clone)]
pub struct CalculateSizesJob {
repo_path: PathBuf,
settings: Settings,
state: Arc<RwLock<CalculateSizesState>>,
}
impl CalculateSizesJob {
pub fn new(repo_path: PathBuf, settings: Settings) -> Self {
let (tx, _) = broadcast::channel(4);
Self {
repo_path,
settings,
state: Arc::new(RwLock::new(CalculateSizesState {
sizes_channel: tx,
progress: SimpleProgress::new(4),
})),
}
}
}
#[async_trait]
impl Job for CalculateSizesJob {
type JobStatus = CalculateSizesState;
type Result = ();
fn status(&self) -> Arc<RwLock<Self::JobStatus>> {
self.state.clone()
}
#[tracing::instrument(level = "debug", skip_all)]
async fn run(&self, repo: Arc<Repo>) -> RepoResult<()> {
let size_types = vec![
SizeType::Total,
SizeType::FileFolder,
SizeType::ThumbFolder,
SizeType::DatabaseFile,
];
for size_type in size_types {
let size = calculate_size(&size_type, &repo, &self.repo_path, &self.settings).await?;
let mut state = self.state.write().await;
state
.sizes_channel
.send((size_type, size))
.map_err(|_| RepoError::from("failed to broadcast new size"))?;
state.progress.tick();
}
Ok(())
}
}
async fn calculate_size(
size_type: &SizeType,
repo: &Repo,
repo_path: &PathBuf,
settings: &Settings,
) -> RepoResult<u64> {
let size = match &size_type {
SizeType::Total => get_folder_size(repo_path.clone()).await?,
SizeType::FileFolder => repo.get_main_store_size().await?,
SizeType::ThumbFolder => repo.get_thumb_store_size().await?,
SizeType::DatabaseFile => {
let db_path = settings.paths.db_file_path(repo_path);
let database_metadata = fs::metadata(db_path).await?;
database_metadata.len()
}
};
Ok(size)
}

@ -0,0 +1,32 @@
use crate::jobs::Job;
use crate::status_utils::SimpleProgress;
use async_trait::async_trait;
use mediarepo_core::error::RepoResult;
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::dao::DaoProvider;
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Clone, Default)]
pub struct CheckIntegrityJob {
progress: Arc<RwLock<SimpleProgress>>,
}
#[async_trait]
impl Job for CheckIntegrityJob {
type JobStatus = SimpleProgress;
type Result = ();
fn status(&self) -> Arc<RwLock<Self::JobStatus>> {
self.progress.clone()
}
async fn run(&self, repo: Arc<Repo>) -> RepoResult<Self::Result> {
repo.job().check_integrity().await?;
{
let mut progress = self.progress.write().await;
progress.set_total(100);
}
Ok(())
}
}

@ -0,0 +1,109 @@
use crate::jobs::{deserialize_state, serialize_state, Job};
use crate::status_utils::SimpleProgress;
use async_trait::async_trait;
use mediarepo_core::error::RepoResult;
use mediarepo_core::thumbnailer::ThumbnailSize;
use mediarepo_database::entities::job_state::JobType;
use mediarepo_logic::dao::job::JobDao;
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::dao::DaoProvider;
use serde::{Deserialize, Serialize};
use std::mem;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use tokio::sync::RwLock;
#[derive(Clone, Default)]
pub struct GenerateMissingThumbsJob {
state: Arc<RwLock<SimpleProgress>>,
inner_state: Arc<RwLock<GenerateThumbsState>>,
}
#[async_trait]
impl Job for GenerateMissingThumbsJob {
type JobStatus = SimpleProgress;
type Result = ();
fn status(&self) -> Arc<RwLock<Self::JobStatus>> {
self.state.clone()
}
async fn load_state(&self, job_dao: JobDao) -> RepoResult<()> {
if let Some(state) = job_dao.state_for_job_type(JobType::GenerateThumbs).await? {
let mut inner_state = self.inner_state.write().await;
let state = deserialize_state::<GenerateThumbsState>(state)?;
let _ = mem::replace(&mut *inner_state, state);
}
Ok(())
}
async fn run(&self, repo: Arc<Repo>) -> RepoResult<()> {
if !self.needs_generation(&repo).await? {
return Ok(());
}
let file_dao = repo.file();
let all_files = file_dao.all().await?;
{
let mut progress = self.state.write().await;
progress.set_total(all_files.len() as u64);
}
for file in all_files {
if file_dao.thumbnails(file.encoded_cd()).await?.is_empty() {
let _ = file_dao
.create_thumbnails(&file, vec![ThumbnailSize::Medium])
.await;
}
{
let mut progress = self.state.write().await;
progress.tick();
}
}
self.refresh_state(&repo).await?;
Ok(())
}
async fn save_state(&self, job_dao: JobDao) -> RepoResult<()> {
let state = self.inner_state.read().await;
let state = serialize_state(JobType::GenerateThumbs, &*state)?;
job_dao.upsert_state(state).await
}
}
impl GenerateMissingThumbsJob {
async fn needs_generation(&self, repo: &Repo) -> RepoResult<bool> {
let repo_counts = repo.get_counts().await?;
let file_count = repo_counts.file_count as u64;
let state = self.inner_state.read().await;
Ok(state.file_count != file_count
|| state.last_run.elapsed().unwrap() > Duration::from_secs(60 * 60))
}
async fn refresh_state(&self, repo: &Repo) -> RepoResult<()> {
let repo_counts = repo.get_counts().await?;
let mut state = self.inner_state.write().await;
state.last_run = SystemTime::now();
state.file_count = repo_counts.file_count as u64;
Ok(())
}
}
#[derive(Serialize, Deserialize)]
struct GenerateThumbsState {
file_count: u64,
last_run: SystemTime,
}
impl Default for GenerateThumbsState {
fn default() -> Self {
Self {
file_count: 0,
last_run: SystemTime::now(),
}
}
}

@ -0,0 +1,65 @@
use crate::jobs::{deserialize_state, serialize_state, Job};
use crate::status_utils::SimpleProgress;
use async_trait::async_trait;
use mediarepo_core::error::RepoResult;
use mediarepo_database::entities::job_state::JobType;
use mediarepo_logic::dao::job::JobDao;
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::dao::DaoProvider;
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Clone, Default)]
pub struct MigrateCDsJob {
progress: Arc<RwLock<SimpleProgress>>,
migrated: Arc<AtomicBool>,
}
#[async_trait]
impl Job for MigrateCDsJob {
type JobStatus = SimpleProgress;
type Result = ();
fn status(&self) -> Arc<tokio::sync::RwLock<Self::JobStatus>> {
self.progress.clone()
}
async fn load_state(&self, job_dao: JobDao) -> RepoResult<()> {
if let Some(state) = job_dao.state_for_job_type(JobType::MigrateCDs).await? {
let state = deserialize_state::<MigrationStatus>(state)?;
self.migrated.store(state.migrated, Ordering::SeqCst);
}
Ok(())
}
async fn run(&self, repo: Arc<Repo>) -> RepoResult<Self::Result> {
if self.migrated.load(Ordering::SeqCst) {
return Ok(());
}
let job_dao = repo.job();
job_dao.migrate_content_descriptors().await?;
self.migrated.store(true, Ordering::Relaxed);
{
let mut progress = self.progress.write().await;
progress.set_total(100);
}
Ok(())
}
async fn save_state(&self, job_dao: JobDao) -> RepoResult<()> {
if self.migrated.load(Ordering::Relaxed) {
let state = serialize_state(JobType::MigrateCDs, &MigrationStatus { migrated: true })?;
job_dao.upsert_state(state).await?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
struct MigrationStatus {
pub migrated: bool,
}

@ -0,0 +1,71 @@
mod calculate_sizes;
mod check_integrity;
mod generate_missing_thumbnails;
mod migrate_content_descriptors;
mod vacuum;
pub use calculate_sizes::*;
pub use check_integrity::*;
pub use generate_missing_thumbnails::*;
pub use migrate_content_descriptors::*;
use std::marker::PhantomData;
use std::sync::Arc;
pub use vacuum::*;
use crate::handle::JobHandle;
use async_trait::async_trait;
use mediarepo_core::bincode;
use mediarepo_core::error::{RepoError, RepoResult};
use mediarepo_core::trait_bound_typemap::TypeMapKey;
use mediarepo_database::entities::job_state::JobType;
use mediarepo_logic::dao::job::JobDao;
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::dto::{JobStateDto, UpsertJobStateDto};
use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::sync::RwLock;
type EmptyStatus = Arc<RwLock<()>>;
#[async_trait]
pub trait Job: Clone + Send + Sync {
type JobStatus: Send + Sync;
type Result: Send + Sync;
fn status(&self) -> Arc<RwLock<Self::JobStatus>>;
async fn load_state(&self, _job_dao: JobDao) -> RepoResult<()> {
Ok(())
}
async fn run(&self, repo: Arc<Repo>) -> RepoResult<Self::Result>;
async fn save_state(&self, _job_dao: JobDao) -> RepoResult<()> {
Ok(())
}
}
pub struct JobTypeKey<T: Job>(PhantomData<T>);
impl<T: 'static> TypeMapKey for JobTypeKey<T>
where
T: Job,
{
type Value = JobHandle<T::JobStatus, T::Result>;
}
pub fn deserialize_state<T: DeserializeOwned>(dto: JobStateDto) -> RepoResult<T> {
bincode::deserialize(dto.value()).map_err(RepoError::from)
}
pub fn serialize_state<T: Serialize>(
job_type: JobType,
state: &T,
) -> RepoResult<UpsertJobStateDto> {
let dto = UpsertJobStateDto {
value: bincode::serialize(state)?,
job_type,
};
Ok(dto)
}

@ -0,0 +1,27 @@
use crate::jobs::{EmptyStatus, Job};
use async_trait::async_trait;
use mediarepo_core::error::RepoResult;
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::dao::DaoProvider;
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Default, Clone)]
pub struct VacuumJob;
#[async_trait]
impl Job for VacuumJob {
type JobStatus = ();
type Result = ();
fn status(&self) -> Arc<RwLock<Self::JobStatus>> {
EmptyStatus::default()
}
#[tracing::instrument(level = "debug", skip_all)]
async fn run(&self, repo: Arc<Repo>) -> RepoResult<()> {
repo.job().vacuum().await?;
Ok(())
}
}

@ -0,0 +1,40 @@
use crate::job_dispatcher::JobDispatcher;
use crate::jobs::{CheckIntegrityJob, MigrateCDsJob, VacuumJob};
use mediarepo_core::error::RepoError;
use mediarepo_core::tokio_graceful_shutdown::Toplevel;
use mediarepo_logic::dao::repo::Repo;
use std::time::Duration;
use tokio::sync::oneshot::channel;
pub mod handle;
pub mod job_dispatcher;
pub mod jobs;
pub mod status_utils;
pub async fn start(top_level: Toplevel, repo: Repo) -> (Toplevel, JobDispatcher) {
let (tx, rx) = channel();
let top_level =
top_level.start::<RepoError, _, _>("mediarepo-worker", |subsystem| async move {
let dispatcher = JobDispatcher::new(subsystem, repo);
tx.send(dispatcher.clone())
.map_err(|_| RepoError::from("failed to send dispatcher"))?;
dispatcher
.dispatch_periodically(VacuumJob::default(), Duration::from_secs(60 * 30))
.await;
dispatcher
.dispatch_periodically(
CheckIntegrityJob::default(),
Duration::from_secs(60 * 60 * 24),
)
.await;
dispatcher.dispatch(MigrateCDsJob::default()).await;
Ok(())
});
let receiver = rx
.await
.expect("failed to create background job dispatcher");
(top_level, receiver)
}

@ -0,0 +1,39 @@
pub struct SimpleProgress {
pub current: u64,
pub total: u64,
}
impl Default for SimpleProgress {
fn default() -> Self {
Self {
total: 100,
current: 0,
}
}
}
impl SimpleProgress {
pub fn new(total: u64) -> Self {
Self { total, current: 0 }
}
/// Sets the total count
pub fn set_total(&mut self, total: u64) {
self.total = total;
}
/// Increments the current progress by 1
pub fn tick(&mut self) {
self.current += 1;
}
/// Sets the current progress to a defined value
pub fn set_current(&mut self, current: u64) {
self.current = current;
}
/// Returns the total progress in percent
pub fn percent(&self) -> f64 {
(self.current as f64) / (self.total as f64)
}
}

@ -2,21 +2,24 @@ use std::fs;
use std::path::PathBuf;
use console_subscriber::ConsoleLayer;
use opentelemetry::sdk::Resource;
use opentelemetry::{sdk, KeyValue};
use rolling_file::RollingConditionBasic;
use tracing::Level;
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_flame::FlameLayer;
use tracing_log::LogTracer;
use tracing_subscriber::{
fmt::{self},
Layer, Registry,
};
use tracing_subscriber::filter::{self, Targets};
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{
fmt::{self},
Layer, Registry,
};
use mediarepo_core::settings::LoggingSettings;
use mediarepo_core::tracing_layer_list::DynLayerList;
#[allow(dyn_drop)]
pub type DropGuard = Box<dyn Drop>;
@ -25,39 +28,97 @@ pub fn init_tracing(repo_path: &PathBuf, log_cfg: &LoggingSettings) -> Vec<DropG
LogTracer::init().expect("failed to subscribe to log entries");
let log_path = repo_path.join("logs");
let mut guards = Vec::new();
let mut layer_list = DynLayerList::new();
if !log_path.exists() {
fs::create_dir(&log_path).expect("failed to create directory for log files");
}
let (stdout_writer, guard) = tracing_appender::non_blocking(std::io::stdout());
guards.push(Box::new(guard) as DropGuard);
let stdout_layer = fmt::layer()
.with_thread_names(false)
.with_target(true)
.with_writer(stdout_writer)
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(
std::env::var("RUST_LOG")
.unwrap_or(String::from("info,sqlx=warn"))
.parse::<filter::Targets>()
.unwrap_or(
add_stdout_layer(&mut guards, &mut layer_list);
add_sql_layer(log_cfg, &log_path, &mut guards, &mut layer_list);
add_bromine_layer(log_cfg, &log_path, &mut guards, &mut layer_list);
add_app_log_layer(log_cfg, &log_path, &mut guards, &mut layer_list);
if log_cfg.telemetry {
add_telemetry_layer(log_cfg, &mut layer_list);
}
let tokio_console_enabled = std::env::var("TOKIO_CONSOLE")
.map(|v| v.eq_ignore_ascii_case("true"))
.unwrap_or(false);
if tokio_console_enabled {
add_tokio_console_layer(&mut layer_list);
}
let registry = Registry::default().with(layer_list);
tracing::subscriber::set_global_default(registry).expect("Failed to initialize tracing");
guards
}
fn add_tokio_console_layer(layer_list: &mut DynLayerList<Registry>) {
let console_layer = ConsoleLayer::builder().with_default_env().spawn();
layer_list.add(console_layer);
}
fn add_telemetry_layer(log_cfg: &LoggingSettings, layer_list: &mut DynLayerList<Registry>) {
match opentelemetry_jaeger::new_pipeline()
.with_agent_endpoint(&log_cfg.telemetry_endpoint)
.with_trace_config(
sdk::trace::Config::default()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
"mediarepo-daemon",
)]))
.with_max_attributes_per_span(1),
)
.with_instrumentation_library_tags(false)
.with_service_name("mediarepo-daemon")
.install_batch(opentelemetry::runtime::Tokio)
{
Ok(tracer) => {
let telemetry_layer = tracing_opentelemetry::layer()
.with_tracer(tracer)
.with_filter(
filter::Targets::new()
.with_default(Level::INFO)
.with_target("sqlx", Level::WARN),
),
);
.with_target("tokio", Level::INFO)
.with_target("h2", Level::INFO)
.with_target("sqlx", Level::ERROR)
.with_target("sea_orm", Level::INFO),
);
layer_list.add(telemetry_layer);
}
Err(e) => {
eprintln!("Failed to initialize telemetry tracing: {}", e);
}
}
}
let (sql_writer, guard) = get_sql_log_writer(&log_path);
fn add_app_log_layer(
log_cfg: &LoggingSettings,
log_path: &PathBuf,
guards: &mut Vec<DropGuard>,
layer_list: &mut DynLayerList<Registry>,
) {
let (app_log_writer, guard) = get_application_log_writer(&log_path);
guards.push(Box::new(guard) as DropGuard);
let sql_layer = fmt::layer()
.with_writer(sql_writer)
let app_log_layer = fmt::layer()
.with_writer(app_log_writer)
.pretty()
.with_ansi(false)
.with_span_events(FmtSpan::NONE)
.with_filter(get_sql_targets(log_cfg.trace_sql));
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(get_app_targets(log_cfg.level.clone().into()));
layer_list.add(app_log_layer);
}
fn add_bromine_layer(
log_cfg: &LoggingSettings,
log_path: &PathBuf,
guards: &mut Vec<DropGuard>,
layer_list: &mut DynLayerList<Registry>,
) {
let (bromine_writer, guard) = get_bromine_log_writer(&log_path);
guards.push(Box::new(guard) as DropGuard);
@ -67,36 +128,48 @@ pub fn init_tracing(repo_path: &PathBuf, log_cfg: &LoggingSettings) -> Vec<DropG
.with_ansi(false)
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(get_bromine_targets(log_cfg.trace_api_calls));
layer_list.add(bromine_layer);
}
let (app_log_writer, guard) = get_application_log_writer(&log_path);
fn add_sql_layer(
log_cfg: &LoggingSettings,
log_path: &PathBuf,
guards: &mut Vec<DropGuard>,
layer_list: &mut DynLayerList<Registry>,
) {
let (sql_writer, guard) = get_sql_log_writer(&log_path);
guards.push(Box::new(guard) as DropGuard);
let app_log_layer = fmt::layer()
.with_writer(app_log_writer)
let sql_layer = fmt::layer()
.with_writer(sql_writer)
.pretty()
.with_ansi(false)
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(get_app_targets(log_cfg.level.clone().into()));
let registry = Registry::default()
.with(stdout_layer)
.with(sql_layer)
.with(bromine_layer)
.with(app_log_layer);
.with_span_events(FmtSpan::NONE)
.with_filter(get_sql_targets(log_cfg.trace_sql));
let tokio_console_enabled = std::env::var("TOKIO_CONSOLE")
.map(|v| v.eq_ignore_ascii_case("true"))
.unwrap_or(false);
layer_list.add(sql_layer);
}
if tokio_console_enabled {
let console_layer = ConsoleLayer::builder().with_default_env().spawn();
let registry = registry.with(console_layer);
tracing::subscriber::set_global_default(registry).expect("Failed to initialize tracing");
} else {
tracing::subscriber::set_global_default(registry).expect("Failed to initialize tracing");
}
fn add_stdout_layer(guards: &mut Vec<DropGuard>, layer_list: &mut DynLayerList<Registry>) {
let (stdout_writer, guard) = tracing_appender::non_blocking(std::io::stdout());
guards.push(Box::new(guard) as DropGuard);
guards
let stdout_layer = fmt::layer()
.with_thread_names(false)
.with_target(true)
.with_writer(stdout_writer)
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(
std::env::var("RUST_LOG")
.unwrap_or(String::from("info,sqlx=warn"))
.parse::<filter::Targets>()
.unwrap_or(
filter::Targets::new()
.with_default(Level::INFO)
.with_target("sqlx", Level::WARN),
),
);
layer_list.add(stdout_layer);
}
fn get_sql_log_writer(log_path: &PathBuf) -> (NonBlocking, WorkerGuard) {

@ -1,19 +1,23 @@
use std::env;
use std::iter::FromIterator;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use structopt::StructOpt;
use tokio::fs;
use tokio::io::AsyncWriteExt;
use tokio::runtime;
use tokio::runtime::Runtime;
use mediarepo_core::error::RepoResult;
use mediarepo_core::fs::drop_file::DropFile;
use mediarepo_core::settings::{PathSettings, Settings};
use mediarepo_core::tokio_graceful_shutdown::{SubsystemHandle, Toplevel};
use mediarepo_core::trait_bound_typemap::{CloneSendSyncTypeMap, SendSyncTypeMap, TypeMap};
use mediarepo_core::type_keys::{RepoPathKey, SettingsKey};
use mediarepo_logic::dao::repo::Repo;
use mediarepo_logic::type_keys::RepoKey;
use mediarepo_socket::start_tcp_server;
use mediarepo_worker::job_dispatcher::DispatcherKey;
use crate::utils::{create_paths_for_repo, get_repo, load_settings};
@ -49,7 +53,8 @@ enum SubCommand {
Start,
}
fn main() -> RepoResult<()> {
#[tokio::main]
async fn main() -> RepoResult<()> {
let mut opt: Opt = Opt::from_args();
opt.repo = env::current_dir().unwrap().join(opt.repo);
@ -66,7 +71,7 @@ fn main() -> RepoResult<()> {
} else {
Settings::default()
};
clean_old_connection_files(&opt.repo)?;
clean_old_connection_files(&opt.repo).await?;
let mut guards = Vec::new();
if opt.profile {
@ -76,10 +81,11 @@ fn main() -> RepoResult<()> {
}
let result = match opt.cmd.clone() {
SubCommand::Init { force } => get_single_thread_runtime().block_on(init(opt, force)),
SubCommand::Start => get_multi_thread_runtime().block_on(start_server(opt, settings)),
SubCommand::Init { force } => init(opt, force).await,
SubCommand::Start => start_server(opt, settings).await,
};
opentelemetry::global::shutdown_tracer_provider();
match result {
Ok(_) => Ok(()),
Err(e) => {
@ -90,23 +96,6 @@ fn main() -> RepoResult<()> {
}
}
fn get_single_thread_runtime() -> Runtime {
log::info!("Using current thread runtime");
runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(1)
.build()
.unwrap()
}
fn get_multi_thread_runtime() -> Runtime {
log::info!("Using multi thread runtime");
runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
}
async fn init_repo(opt: &Opt, paths: &PathSettings) -> RepoResult<Repo> {
let repo = get_repo(&opt.repo, paths).await?;
@ -116,19 +105,29 @@ async fn init_repo(opt: &Opt, paths: &PathSettings) -> RepoResult<Repo> {
/// Starts the server
async fn start_server(opt: Opt, settings: Settings) -> RepoResult<()> {
let repo = init_repo(&opt, &settings.paths).await?;
let mut top_level = Toplevel::new();
let (mut top_level, dispatcher) = mediarepo_worker::start(Toplevel::new(), repo.clone()).await;
let mut shared_data = CloneSendSyncTypeMap::new();
shared_data.insert::<RepoKey>(Arc::new(repo));
shared_data.insert::<SettingsKey>(settings.clone());
shared_data.insert::<RepoPathKey>(opt.repo.clone());
shared_data.insert::<DispatcherKey>(dispatcher);
#[cfg(unix)]
{
if settings.server.unix_socket.enabled {
let settings = settings.clone();
let repo_path = opt.repo.clone();
let repo = repo.clone();
let shared_data = shared_data.clone();
top_level = top_level.start("mediarepo-unix-socket", |subsystem| {
Box::pin(async move {
start_and_await_unix_socket(subsystem, repo_path, settings, repo).await?;
Ok(())
start_and_await_unix_socket(
subsystem,
repo_path,
SendSyncTypeMap::from_iter(shared_data),
)
.await?;
RepoResult::Ok(())
})
})
}
@ -137,9 +136,15 @@ async fn start_server(opt: Opt, settings: Settings) -> RepoResult<()> {
if settings.server.tcp.enabled {
top_level = top_level.start("mediarepo-tcp", move |subsystem| {
Box::pin(async move {
start_and_await_tcp_server(subsystem, opt.repo, settings, repo).await?;
Ok(())
start_and_await_tcp_server(
subsystem,
opt.repo,
settings,
SendSyncTypeMap::from_iter(shared_data),
)
.await?;
RepoResult::Ok(())
})
})
}
@ -164,9 +169,9 @@ async fn start_and_await_tcp_server(
subsystem: SubsystemHandle,
repo_path: PathBuf,
settings: Settings,
repo: Repo,
shared_data: SendSyncTypeMap,
) -> RepoResult<()> {
let (address, handle) = start_tcp_server(subsystem.clone(), repo_path.clone(), settings, repo)?;
let (address, handle) = start_tcp_server(subsystem.clone(), settings, shared_data)?;
let (mut file, _guard) = DropFile::new(repo_path.join("repo.tcp")).await?;
file.write_all(&address.into_bytes()).await?;
@ -189,17 +194,10 @@ async fn start_and_await_tcp_server(
async fn start_and_await_unix_socket(
subsystem: SubsystemHandle,
repo_path: PathBuf,
settings: Settings,
repo: Repo,
shared_data: SendSyncTypeMap,
) -> RepoResult<()> {
let socket_path = repo_path.join("repo.sock");
let handle = mediarepo_socket::create_unix_socket(
subsystem.clone(),
socket_path,
repo_path.clone(),
settings,
repo,
)?;
let handle = mediarepo_socket::create_unix_socket(subsystem.clone(), socket_path, shared_data)?;
let _guard = DropFile::from_path(repo_path.join("repo.sock"));
tokio::select! {
@ -244,14 +242,14 @@ async fn init(opt: Opt, force: bool) -> RepoResult<()> {
Ok(())
}
fn clean_old_connection_files(root: &PathBuf) -> RepoResult<()> {
async fn clean_old_connection_files(root: &PathBuf) -> RepoResult<()> {
let paths = ["repo.tcp", "repo.sock"];
for path in paths {
let path = root.join(path);
if path.exists() {
std::fs::remove_file(&path)?;
tokio::fs::remove_file(&path).await?;
}
}

@ -1,6 +1,6 @@
{
"name": "mediarepo-ui",
"version": "1.0.0",
"version": "1.0.1",
"scripts": {
"ng": "ng",
"start": "ng serve",
@ -13,51 +13,51 @@
},
"private": true,
"dependencies": {
"@angular/animations": "~13.1.2",
"@angular/cdk": "^13.1.2",
"@angular/common": "~13.1.2",
"@angular/compiler": "~13.1.2",
"@angular/core": "~13.1.2",
"@angular/animations": "~13.3.2",
"@angular/cdk": "^13.3.2",
"@angular/common": "~13.3.2",
"@angular/compiler": "~13.3.2",
"@angular/core": "~13.3.2",
"@angular/flex-layout": "^13.0.0-beta.36",
"@angular/forms": "~13.1.2",
"@angular/material": "^13.1.2",
"@angular/platform-browser": "~13.1.2",
"@angular/platform-browser-dynamic": "~13.1.2",
"@angular/router": "~13.1.2",
"@ng-icons/core": "^13.2.1",
"@ng-icons/feather-icons": "^13.2.1",
"@ng-icons/material-icons": "^13.2.1",
"@tauri-apps/api": "^1.0.0-beta.8",
"@angular/forms": "~13.3.2",
"@angular/material": "^13.3.2",
"@angular/platform-browser": "~13.3.2",
"@angular/platform-browser-dynamic": "~13.3.2",
"@angular/router": "~13.3.2",
"@ng-icons/core": "^15.1.0",
"@ng-icons/feather-icons": "^15.1.0",
"@ng-icons/material-icons": "^15.1.0",
"@tauri-apps/api": "^1.0.0-rc.3",
"chart.js": "^3.7.1",
"primeicons": "^5.0.0",
"primeng": "^13.0.4",
"rxjs": "~7.5.2",
"primeng": "^13.3.2",
"rxjs": "~7.5.5",
"tslib": "^2.3.1",
"w3c-keys": "^1.0.3",
"zone.js": "~0.11.4"
"zone.js": "~0.11.5"
},
"devDependencies": {
"@angular-devkit/build-angular": "~13.1.3",
"@angular-eslint/builder": "^13.0.1",
"@angular-eslint/eslint-plugin": "^13.0.1",
"@angular-eslint/eslint-plugin-template": "^13.0.1",
"@angular-eslint/schematics": "^13.0.1",
"@angular-eslint/template-parser": "^13.0.1",
"@angular/cli": "~13.1.3",
"@angular/compiler-cli": "~13.1.2",
"@tauri-apps/cli": "^1.0.0-beta.10",
"@angular-devkit/build-angular": "~13.3.2",
"@angular-eslint/builder": "^13.2.0",
"@angular-eslint/eslint-plugin": "^13.2.0",
"@angular-eslint/eslint-plugin-template": "^13.2.0",
"@angular-eslint/schematics": "^13.2.0",
"@angular-eslint/template-parser": "^13.2.0",
"@angular/cli": "~13.3.2",
"@angular/compiler-cli": "~13.3.2",
"@tauri-apps/cli": "^1.0.0-rc.8",
"@types/file-saver": "^2.0.4",
"@types/jasmine": "~3.10.3",
"@types/node": "^16.11.19",
"@typescript-eslint/eslint-plugin": "5.9.1",
"@typescript-eslint/parser": "^5.9.1",
"eslint": "^8.6.0",
"@types/jasmine": "~4.0.2",
"@types/node": "^17.0.23",
"@typescript-eslint/eslint-plugin": "5.18.0",
"@typescript-eslint/parser": "^5.18.0",
"eslint": "^8.12.0",
"jasmine-core": "~4.0.0",
"karma": "~6.3.10",
"karma-chrome-launcher": "~3.1.0",
"karma-coverage": "~2.1.0",
"karma-jasmine": "~4.0.1",
"karma-coverage": "~2.2.0",
"karma-jasmine": "~4.0.2",
"karma-jasmine-html-reporter": "~1.7.0",
"typescript": "~4.5.4"
"typescript": "~4.6.3"
}
}

File diff suppressed because it is too large Load Diff

@ -1,6 +1,6 @@
[package]
name = "app"
version = "1.0.0"
version = "1.0.1"
description = "The UI for the mediarepo media management tool"
authors = ["you"]
license = ""
@ -10,20 +10,20 @@ edition = "2018"
build = "src/build.rs"
[build-dependencies]
tauri-build = { version = "1.0.0-beta.4", features = [] }
tauri-build = { version = "1.0.0-rc.5", features = [] }
[dependencies]
serde_json = "1.0.78"
serde_json = "1.0.79"
serde = { version = "1.0.136", features = ["derive"] }
thiserror = "1.0.30"
typemap_rev = "0.1.5"
[dependencies.tauri]
version = "1.0.0-beta.8"
version = "1.0.0-rc.4"
features = ["dialog-all", "path-all", "shell-all"]
[dependencies.tracing-subscriber]
version = "0.3.8"
version = "0.3.9"
features = ["env-filter"]
[dependencies.mediarepo-api]

@ -1,7 +1,7 @@
{
"package": {
"productName": "mediarepo-ui",
"version": "1.0.0"
"version": "1.0.1"
},
"build": {
"distDir": "../dist/mediarepo-ui",

@ -4,7 +4,7 @@ import {BusyIndicatorComponent} from "./busy-indicator/busy-indicator.component"
import {ContextMenuComponent} from "./context-menu/context-menu.component";
import {CommonModule} from "@angular/common";
import {NgIconsModule} from "@ng-icons/core";
import {MatChevronLeft, MatChevronRight} from "@ng-icons/material-icons";
import {MatChevronLeft, MatChevronRight} from "@ng-icons/material-icons/baseline";
import {MatProgressSpinnerModule} from "@angular/material/progress-spinner";
import {MatButtonModule} from "@angular/material/button";
import {MatDialogModule} from "@angular/material/dialog";

@ -14,7 +14,7 @@
<app-content-aware-image (appLoadEnd)="this.loading = false" (appLoadError)="this.loading = false"
[class.hidden]="this.loading"
[imageSrc]="this.imageUrl"
[maxRetry]="3"
[maxRetry]="10"
[retryDelay]="500"
decoding="sync"></app-content-aware-image>
</app-busy-indicator>

@ -1,7 +1,7 @@
<app-content-aware-image (appLoadEnd)="this.loadEnd.emit()" (appLoadError)="this.onImageLoadError()"
*ngIf="this.thumbnailSupported && this.thumbUrl"
[imageSrc]="this.thumbUrl"
[maxRetry]="5" [retryDelay]="100"
[maxRetry]="10" [retryDelay]="100"
borderRadius="0.25em"></app-content-aware-image>
<div *ngIf="this.thumbnailSupported && this.thumbUrl" class="file-icon-overlay">
<ng-icon *ngIf="fileType === 'video'" name="mat-movie"></ng-icon>

@ -25,7 +25,7 @@ import {
MatImage,
MatMovie,
MatRefresh
} from "@ng-icons/material-icons";
} from "@ng-icons/material-icons/baseline";
import {DragDropModule} from "@angular/cdk/drag-drop";
import {MatButtonModule} from "@angular/material/button";
import {MatMenuModule} from "@angular/material/menu";

@ -7,7 +7,7 @@ import {MatFormFieldModule} from "@angular/material/form-field";
import {ReactiveFormsModule} from "@angular/forms";
import {MatInputModule} from "@angular/material/input";
import {NgIconsModule} from "@ng-icons/core";
import {MatFolder, MatInsertDriveFile} from "@ng-icons/material-icons";
import {MatFolder, MatInsertDriveFile} from "@ng-icons/material-icons/baseline";
import {MatButtonModule} from "@angular/material/button";
import {FlexModule} from "@angular/flex-layout";
import {FilterInputComponent} from "./filter-input/filter-input.component";

@ -10,7 +10,7 @@ import {MatSelectModule} from "@angular/material/select";
import {MatInputModule} from "@angular/material/input";
import {ReactiveFormsModule} from "@angular/forms";
import {NgIconsModule} from "@ng-icons/core";
import {MatFolder} from "@ng-icons/material-icons";
import {MatFolder} from "@ng-icons/material-icons/baseline";
@NgModule({

@ -12,7 +12,7 @@ import {
MatFilterAlt,
MatRemove,
MatRemoveCircle,
} from "@ng-icons/material-icons";
} from "@ng-icons/material-icons/baseline";
import {MatRippleModule} from "@angular/material/core";
import {MatButtonModule} from "@angular/material/button";
import {InputModule} from "../input/input.module";

File diff suppressed because it is too large Load Diff

@ -69,7 +69,10 @@ def build(component: str, bundles: List[str] = None):
def build_daemon():
'''Builds daemon'''
cargo('fetch', 'mediarepo-daemon')
cargo('build --release --frozen', 'mediarepo-daemon')
additional_flags = ''
if verbose:
additional_flags = '--verbose'
cargo('build --release --frozen ' + additional_flags, 'mediarepo-daemon')
if windows:
store_artifact('mediarepo-daemon/target/release/mediarepo-daemon.exe')
@ -81,10 +84,15 @@ def build_ui(bundles: List[str] = None):
'''Builds UI'''
yarn('install', 'mediarepo-ui')
additional_flags = ''
if verbose:
additional_flags = '--verbose'
if bundles is not None:
cargo('tauri build --bundles ' + ' '.join(bundles), 'mediarepo-ui')
cargo('tauri build --bundles ' + ' '.join(bundles) + ' ' + additional_flags, 'mediarepo-ui')
else:
cargo('tauri build ', 'mediarepo-ui')
cargo('tauri build ' + additional_flags, 'mediarepo-ui')
if windows:
store_artifact(

@ -4,7 +4,7 @@ import argparse
import os
tauri_cli_version = '1.0.0-rc.5'
tauri_cli_version = '1.0.0-rc.8'
windows = os.name == 'nt'

Loading…
Cancel
Save