Fix cargo clippy warnings

Signed-off-by: trivernis <trivernis@protonmail.com>
pull/25/head
trivernis 2 years ago
parent 37f7a2c82f
commit 580c27bbd1
Signed by: Trivernis
GPG Key ID: DFFFCC2C7A02DB45

@ -71,7 +71,7 @@ impl ThumbnailStore {
let name = file_name.to_string_lossy();
let (height, width) = name
.split_once("-")
.split_once('-')
.and_then(|(height, width)| {
Some((height.parse::<u32>().ok()?, width.parse::<u32>().ok()?))
})

@ -34,6 +34,7 @@ pub enum LogLevel {
Trace,
}
#[allow(clippy::from_over_into)]
impl Into<Option<Level>> for LogLevel {
fn into(self) -> Option<Level> {
match self {

@ -1,5 +1,5 @@
use std::fs;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use config::{Config, FileFormat};
use serde::{Deserialize, Serialize};
@ -24,7 +24,7 @@ pub struct Settings {
}
impl Settings {
pub fn read(root: &PathBuf) -> RepoResult<Self> {
pub fn read(root: &Path) -> RepoResult<Self> {
let settings = Config::builder()
.add_source(config::File::from_str(
&*Settings::default().to_toml_string()?,
@ -44,7 +44,7 @@ impl Settings {
settings_main.server.tcp.enabled = true;
settings_main.server.tcp.port = PortSetting::Range(settings_v1.port_range);
settings_main.server.tcp.listen_address = settings_v1.listen_address;
settings_main.paths.thumbnail_directory = settings_v1.thumbnail_store.into();
settings_main.paths.thumbnail_directory = settings_v1.thumbnail_store;
settings_main.paths.database_directory = PathBuf::from(settings_v1.database_path)
.parent()
.map(|p| p.to_string_lossy().to_string())
@ -69,7 +69,7 @@ impl Settings {
Ok(string)
}
pub fn save(&self, root: &PathBuf) -> RepoResult<()> {
pub fn save(&self, root: &Path) -> RepoResult<()> {
let string = toml::to_string_pretty(&self)?;
fs::write(root.join("repo.toml"), string.into_bytes())?;

@ -1,4 +1,4 @@
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use serde::{Deserialize, Serialize};
@ -21,27 +21,27 @@ impl Default for PathSettings {
impl PathSettings {
#[inline]
pub fn database_dir(&self, root: &PathBuf) -> PathBuf {
pub fn database_dir(&self, root: &Path) -> PathBuf {
root.join(&self.database_directory)
}
#[inline]
pub fn files_dir(&self, root: &PathBuf) -> PathBuf {
pub fn files_dir(&self, root: &Path) -> PathBuf {
root.join(&self.files_directory)
}
#[inline]
pub fn thumbs_dir(&self, root: &PathBuf) -> PathBuf {
pub fn thumbs_dir(&self, root: &Path) -> PathBuf {
root.join(&self.thumbnail_directory)
}
#[inline]
pub fn db_file_path(&self, root: &PathBuf) -> PathBuf {
pub fn db_file_path(&self, root: &Path) -> PathBuf {
self.database_dir(root).join("repo.db")
}
#[inline]
pub fn frontend_state_file_path(&self, root: &PathBuf) -> PathBuf {
pub fn frontend_state_file_path(&self, root: &Path) -> PathBuf {
self.database_dir(root).join("frontend-state.json")
}
}

@ -7,9 +7,15 @@ use tracing_subscriber::Layer;
pub struct DynLayerList<S>(Vec<Box<dyn Layer<S> + Send + Sync + 'static>>);
impl<S> Default for DynLayerList<S> {
fn default() -> Self {
Self(Vec::new())
}
}
impl<S> DynLayerList<S> {
pub fn new() -> Self {
Self(Vec::new())
Self::default()
}
pub fn iter(&self) -> Iter<'_, Box<dyn Layer<S> + Send + Sync>> {

@ -1,7 +0,0 @@
use async_trait::async_trait;
#[async_trait]
pub trait AsyncTryFrom<T> {
type Error;
fn async_try_from(other: T) -> Result<Self, Self::Error>;
}

@ -47,7 +47,7 @@ pub async fn get_folder_size(path: PathBuf) -> RepoResult<u64> {
}
}
}
let futures = all_files.into_iter().map(|f| read_file_size(f));
let futures = all_files.into_iter().map(read_file_size);
let results = future::join_all(futures).await;
let size = results.into_iter().filter_map(|r| r.ok()).sum();

@ -30,7 +30,7 @@ pub async fn get_all_counts(db: &DatabaseConnection) -> RepoResult<Counts> {
))
.one(db)
.await?
.ok_or(RepoError::from("could not retrieve metadata from database"))?;
.ok_or_else(|| RepoError::from("could not retrieve metadata from database"))?;
Ok(counts)
}

@ -55,7 +55,7 @@ fn vec_to_query_list<D: Display>(input: Vec<D>) -> String {
let mut entries = input
.into_iter()
.fold(String::new(), |acc, val| format!("{}{},", acc, val));
if entries.len() > 0 {
if !entries.is_empty() {
entries.remove(entries.len() - 1);
}

@ -93,7 +93,7 @@ impl FileDao {
.all(&self.ctx.db)
.await?
.into_iter()
.map(|m| FileMetadataDto::new(m))
.map(FileMetadataDto::new)
.collect();
Ok(metadata)

@ -22,8 +22,8 @@ impl FileDao {
let trx = self.ctx.db.begin().await?;
let model = file::ActiveModel {
id: Set(update_dto.id),
cd_id: update_dto.cd_id.map(|v| Set(v)).unwrap_or(NotSet),
mime_type: update_dto.mime_type.map(|v| Set(v)).unwrap_or(NotSet),
cd_id: update_dto.cd_id.map(Set).unwrap_or(NotSet),
mime_type: update_dto.mime_type.map(Set).unwrap_or(NotSet),
status: update_dto.status.map(|v| Set(v as i32)).unwrap_or(NotSet),
};
let file_model = model.update(&trx).await?;
@ -62,8 +62,8 @@ impl FileDao {
sizes: I,
) -> RepoResult<Vec<ThumbnailDto>> {
let bytes = self.get_bytes(file.cd()).await?;
let mime_type = mime::Mime::from_str(file.mime_type())
.unwrap_or_else(|_| mime::APPLICATION_OCTET_STREAM);
let mime_type =
mime::Mime::from_str(file.mime_type()).unwrap_or(mime::APPLICATION_OCTET_STREAM);
let thumbnails =
thumbnailer::create_thumbnails(Cursor::new(bytes), mime_type.clone(), sizes)?;
let mut dtos = Vec::new();

@ -40,7 +40,7 @@ impl JobDao {
}
}
fn build_state_filters(states: &Vec<UpsertJobStateDto>) -> Condition {
fn build_state_filters(states: &[UpsertJobStateDto]) -> Condition {
states
.iter()
.map(|s| Condition::all().add(job_state::Column::JobType.eq(s.job_type)))

@ -122,7 +122,7 @@ async fn add_keys(
async fn find_sort_keys(
trx: &DatabaseTransaction,
keys: &Vec<AddSortKeyDto>,
keys: &[AddSortKeyDto],
) -> RepoResult<Vec<SortKeyDto>> {
if keys.is_empty() {
return Ok(vec![]);

@ -77,14 +77,12 @@ fn create_cd_tag_map(
)>,
tag_id_map: HashMap<i64, TagDto>,
) -> HashMap<Vec<u8>, Vec<TagDto>> {
let cd_tag_map = tag_cd_entries
tag_cd_entries
.into_iter()
.filter_map(|(t, cd)| Some((cd?, tag_id_map.get(&t.tag_id)?.clone())))
.sorted_by_key(|(cd, _)| cd.id)
.group_by(|(cd, _)| cd.descriptor.to_owned())
.into_iter()
.map(|(key, group)| (key, group.map(|(_, t)| t).collect::<Vec<TagDto>>()))
.collect();
cd_tag_map
.collect::<HashMap<Vec<u8>, Vec<TagDto>>>()
}

@ -45,11 +45,12 @@ fn name_query_to_condition(query: TagByNameQuery) -> Option<Condition> {
let TagByNameQuery { namespace, name } = query;
let mut condition = Condition::all();
#[allow(clippy::question_mark)]
if !name.ends_with('*') {
condition = condition.add(tag::Column::Name.eq(name))
} else if name.len() > 1 {
condition =
condition.add(tag::Column::Name.like(&*format!("{}%", name.trim_end_matches("*"))))
condition.add(tag::Column::Name.like(&*format!("{}%", name.trim_end_matches('*'))))
} else if namespace.is_none() {
return None;
}

@ -58,12 +58,12 @@ impl TagDao {
async fn get_existing_mappings(
trx: &DatabaseTransaction,
cd_ids: &Vec<i64>,
tag_ids: &Vec<i64>,
cd_ids: &[i64],
tag_ids: &[i64],
) -> RepoResult<Vec<(i64, i64)>> {
let existing_mappings: Vec<(i64, i64)> = content_descriptor_tag::Entity::find()
.filter(content_descriptor_tag::Column::CdId.is_in(cd_ids.clone()))
.filter(content_descriptor_tag::Column::TagId.is_in(tag_ids.clone()))
.filter(content_descriptor_tag::Column::CdId.is_in(cd_ids.to_vec()))
.filter(content_descriptor_tag::Column::TagId.is_in(tag_ids.to_vec()))
.all(trx)
.await?
.into_iter()

@ -75,7 +75,7 @@ pub struct AddFileDto {
pub name: Option<String>,
}
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Default)]
pub struct UpdateFileDto {
pub id: i64,
pub cd_id: Option<i64>,
@ -83,17 +83,6 @@ pub struct UpdateFileDto {
pub status: Option<FileStatus>,
}
impl Default for UpdateFileDto {
fn default() -> Self {
Self {
id: 0,
cd_id: None,
mime_type: None,
status: None,
}
}
}
#[derive(Copy, Clone, Debug)]
pub enum FileStatus {
Imported = 10,

@ -84,7 +84,7 @@ impl KeyType {
}
pub fn to_number(&self) -> i32 {
self.clone() as i32
*self as i32
}
}

@ -30,8 +30,9 @@ pub fn start_tcp_server(
return Err(RepoError::PortUnavailable);
}
}
PortSetting::Range((l, r)) => port_check::free_local_port_in_range(*l, *r)
.ok_or_else(|| RepoError::PortUnavailable)?,
PortSetting::Range((l, r)) => {
port_check::free_local_port_in_range(*l, *r).ok_or(RepoError::PortUnavailable)?
}
};
let ip = settings.server.tcp.listen_address.to_owned();
let address = SocketAddr::new(ip, port);

@ -151,7 +151,7 @@ impl FilesNamespace {
content: bytes,
mime_type: metadata
.mime_type
.unwrap_or(String::from("application/octet-stream")),
.unwrap_or_else(|| String::from("application/octet-stream")),
creation_time: metadata.creation_time,
change_time: metadata.change_time,
name: Some(metadata.name),

@ -69,10 +69,10 @@ fn build_filters_from_expressions(
}
}
};
if filters.len() > 0 {
Some(filters)
} else {
if filters.is_empty() {
None
} else {
Some(filters)
}
})
.collect()
@ -92,7 +92,7 @@ fn map_tag_query_to_filter(
query: TagQuery,
tag_id_map: &HashMap<String, i64>,
) -> Option<FilterProperty> {
if query.tag.ends_with("*") {
if query.tag.ends_with('*') {
map_wildcard_tag_to_filter(query, tag_id_map)
} else {
map_tag_to_filter(query, tag_id_map)
@ -103,7 +103,7 @@ fn map_wildcard_tag_to_filter(
query: TagQuery,
tag_id_map: &HashMap<String, i64>,
) -> Option<FilterProperty> {
let filter_tag = query.tag.trim_end_matches("*");
let filter_tag = query.tag.trim_end_matches('*');
let relevant_ids = tag_id_map
.iter()
.filter_map(|(name, id)| {
@ -115,15 +115,15 @@ fn map_wildcard_tag_to_filter(
})
.collect::<Vec<i64>>();
if relevant_ids.len() > 0 {
if relevant_ids.is_empty() {
None
} else {
let comparator = if query.negate {
IsNot(relevant_ids)
} else {
Is(relevant_ids)
};
Some(FilterProperty::TagWildcardIds(comparator))
} else {
None
}
}

@ -71,7 +71,7 @@ async fn build_sort_context(
mime_type: file.mime_type().to_owned(),
namespaces: cid_nsp
.remove(&file.cd_id())
.unwrap_or(HashMap::with_capacity(0)),
.unwrap_or_else(|| HashMap::with_capacity(0)),
tag_count: cid_tag_counts.remove(&file.cd_id()).unwrap_or(0),
import_time: metadata.import_time().to_owned(),
create_time: metadata.import_time().to_owned(),
@ -177,10 +177,7 @@ fn adjust_for_dir(ordering: Ordering, direction: &SortDirection) -> Ordering {
}
fn compare_tag_lists(list_a: &Vec<String>, list_b: &Vec<String>) -> Ordering {
let first_diff = list_a
.into_iter()
.zip(list_b.into_iter())
.find(|(a, b)| *a != *b);
let first_diff = list_a.iter().zip(list_b.iter()).find(|(a, b)| *a != *b);
if let Some(diff) = first_diff {
if let (Some(num_a), Some(num_b)) = (diff.0.parse::<f32>().ok(), diff.1.parse::<f32>().ok())
{

@ -95,7 +95,7 @@ async fn get_frontend_state_path(ctx: &Context) -> IPCResult<PathBuf> {
let data = ctx.data.read().await;
let settings = data.get::<SettingsKey>().unwrap();
let repo_path = data.get::<RepoPathKey>().unwrap();
let state_path = settings.paths.frontend_state_file_path(&repo_path);
let state_path = settings.paths.frontend_state_file_path(repo_path);
Ok(state_path)
}

@ -33,11 +33,7 @@ pub async fn file_by_identifier(identifier: FileIdentifier, repo: &Repo) -> Repo
pub async fn cd_by_identifier(identifier: FileIdentifier, repo: &Repo) -> RepoResult<Vec<u8>> {
match identifier {
FileIdentifier::ID(id) => {
let file = repo
.file()
.by_id(id)
.await?
.ok_or_else(|| "Thumbnail not found")?;
let file = repo.file().by_id(id).await?.ok_or("Thumbnail not found")?;
Ok(file.cd().to_owned())
}
FileIdentifier::CD(cd) => decode_content_descriptor(cd),

@ -1,5 +1,5 @@
use std::fs;
use std::path::PathBuf;
use std::path::Path;
use console_subscriber::ConsoleLayer;
use opentelemetry::sdk::Resource;
@ -24,7 +24,7 @@ use mediarepo_core::tracing_layer_list::DynLayerList;
#[allow(dyn_drop)]
pub type DropGuard = Box<dyn Drop>;
pub fn init_tracing(repo_path: &PathBuf, log_cfg: &LoggingSettings) -> Vec<DropGuard> {
pub fn init_tracing(repo_path: &Path, log_cfg: &LoggingSettings) -> Vec<DropGuard> {
LogTracer::init().expect("failed to subscribe to log entries");
let log_path = repo_path.join("logs");
let mut guards = Vec::new();
@ -97,11 +97,11 @@ fn add_telemetry_layer(log_cfg: &LoggingSettings, layer_list: &mut DynLayerList<
fn add_app_log_layer(
log_cfg: &LoggingSettings,
log_path: &PathBuf,
log_path: &Path,
guards: &mut Vec<DropGuard>,
layer_list: &mut DynLayerList<Registry>,
) {
let (app_log_writer, guard) = get_application_log_writer(&log_path);
let (app_log_writer, guard) = get_application_log_writer(log_path);
guards.push(Box::new(guard) as DropGuard);
let app_log_layer = fmt::layer()
@ -115,11 +115,11 @@ fn add_app_log_layer(
fn add_bromine_layer(
log_cfg: &LoggingSettings,
log_path: &PathBuf,
log_path: &Path,
guards: &mut Vec<DropGuard>,
layer_list: &mut DynLayerList<Registry>,
) {
let (bromine_writer, guard) = get_bromine_log_writer(&log_path);
let (bromine_writer, guard) = get_bromine_log_writer(log_path);
guards.push(Box::new(guard) as DropGuard);
let bromine_layer = fmt::layer()
@ -133,11 +133,11 @@ fn add_bromine_layer(
fn add_sql_layer(
log_cfg: &LoggingSettings,
log_path: &PathBuf,
log_path: &Path,
guards: &mut Vec<DropGuard>,
layer_list: &mut DynLayerList<Registry>,
) {
let (sql_writer, guard) = get_sql_log_writer(&log_path);
let (sql_writer, guard) = get_sql_log_writer(log_path);
guards.push(Box::new(guard) as DropGuard);
let sql_layer = fmt::layer()
@ -161,18 +161,18 @@ fn add_stdout_layer(guards: &mut Vec<DropGuard>, layer_list: &mut DynLayerList<R
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(
std::env::var("RUST_LOG")
.unwrap_or(String::from("info,sqlx=warn"))
.unwrap_or_else(|_| String::from("info,sqlx=warn"))
.parse::<filter::Targets>()
.unwrap_or(
.unwrap_or_else(|_| {
filter::Targets::new()
.with_default(Level::INFO)
.with_target("sqlx", Level::WARN),
),
.with_target("sqlx", Level::WARN)
}),
);
layer_list.add(stdout_layer);
}
fn get_sql_log_writer(log_path: &PathBuf) -> (NonBlocking, WorkerGuard) {
fn get_sql_log_writer(log_path: &Path) -> (NonBlocking, WorkerGuard) {
tracing_appender::non_blocking(
rolling_file::BasicRollingFileAppender::new(
log_path.join("sql.log"),
@ -183,7 +183,7 @@ fn get_sql_log_writer(log_path: &PathBuf) -> (NonBlocking, WorkerGuard) {
)
}
fn get_bromine_log_writer(log_path: &PathBuf) -> (NonBlocking, WorkerGuard) {
fn get_bromine_log_writer(log_path: &Path) -> (NonBlocking, WorkerGuard) {
tracing_appender::non_blocking(
rolling_file::BasicRollingFileAppender::new(
log_path.join("bromine.log"),
@ -194,7 +194,7 @@ fn get_bromine_log_writer(log_path: &PathBuf) -> (NonBlocking, WorkerGuard) {
)
}
fn get_application_log_writer(log_path: &PathBuf) -> (NonBlocking, WorkerGuard) {
fn get_application_log_writer(log_path: &Path) -> (NonBlocking, WorkerGuard) {
tracing_appender::non_blocking(
rolling_file::BasicRollingFileAppender::new(
log_path.join("repo.log"),

@ -1,6 +1,6 @@
use std::env;
use std::iter::FromIterator;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
@ -242,7 +242,7 @@ async fn init(opt: Opt, force: bool) -> RepoResult<()> {
Ok(())
}
async fn clean_old_connection_files(root: &PathBuf) -> RepoResult<()> {
async fn clean_old_connection_files(root: &Path) -> RepoResult<()> {
let paths = ["repo.tcp", "repo.sock"];
for path in paths {

Loading…
Cancel
Save