Switch to new sea orm migrations

Signed-off-by: trivernis <trivernis@protonmail.com>
feature/rust-migrations
trivernis 2 years ago
parent cab349bafb
commit f563dff064
Signed by: Trivernis
GPG Key ID: DFFFCC2C7A02DB45

@ -1743,6 +1743,7 @@ version = "0.2.0"
dependencies = [ dependencies = [
"chrono", "chrono",
"mediarepo-core", "mediarepo-core",
"migration",
"sea-orm", "sea-orm",
"sqlx", "sqlx",
"tracing", "tracing",
@ -1838,7 +1839,7 @@ dependencies = [
name = "migration" name = "migration"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"sea-schema", "sea-orm-migration",
] ]
[[package]] [[package]]
@ -2736,9 +2737,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]] [[package]]
name = "sea-orm" name = "sea-orm"
version = "0.7.1" version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27dbb8a742003f8dbf2ba290d128134d4275a6b55fd02f4d728683b6b55ea9bf" checksum = "51de529763804dd4f74c133055f53eccdda2221bdded94351009be28cc80d2fb"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"async-trait", "async-trait",
@ -2749,7 +2750,7 @@ dependencies = [
"ouroboros", "ouroboros",
"rust_decimal", "rust_decimal",
"sea-orm-macros", "sea-orm-macros",
"sea-query 0.23.0", "sea-query",
"sea-strum", "sea-strum",
"serde", "serde",
"serde_json", "serde_json",
@ -2760,11 +2761,28 @@ dependencies = [
"uuid", "uuid",
] ]
[[package]]
name = "sea-orm-cli"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fca862fdba12c753bffba9c9adf95d3d3f5dcc82fd589b12faeee7068bb173d5"
dependencies = [
"async-std",
"chrono",
"clap",
"dotenv",
"regex",
"sea-schema",
"tracing",
"tracing-subscriber",
"url",
]
[[package]] [[package]]
name = "sea-orm-macros" name = "sea-orm-macros"
version = "0.7.0" version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "953bf5fb9f6ec985c139c4a98550b600c2f7c97bea74e2acc4025438469cb5a2" checksum = "9f9378e21366b119d281489013c8170c49972fd3709c2155eb4504a913715d2d"
dependencies = [ dependencies = [
"bae", "bae",
"heck 0.3.3", "heck 0.3.3",
@ -2774,23 +2792,31 @@ dependencies = [
] ]
[[package]] [[package]]
name = "sea-query" name = "sea-orm-migration"
version = "0.22.0" version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "727090e8d1e61edd07305d237664315226748ad559e16daa6293fa26c4e7a3c3" checksum = "15589f057677f57bea393572bd8eb9e8feb843a5f09b4fa518be6cef3a6ffedc"
dependencies = [ dependencies = [
"sea-query-derive", "async-trait",
"clap",
"dotenv",
"sea-orm",
"sea-orm-cli",
"sea-schema",
"tracing",
"tracing-subscriber",
] ]
[[package]] [[package]]
name = "sea-query" name = "sea-query"
version = "0.23.0" version = "0.24.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bf24fc03259e206d8cd4c957ce7446fe54ab00ba5ada4cdb028aa3513e26231" checksum = "6b0fa62db5ae33dfc61e805b0b0c9d579c3733f1ed90326b3779f5b38f30fa2a"
dependencies = [ dependencies = [
"chrono", "chrono",
"rust_decimal", "rust_decimal",
"sea-query-derive", "sea-query-derive",
"sea-query-driver",
"serde_json", "serde_json",
"time 0.2.27", "time 0.2.27",
"uuid", "uuid",
@ -2809,22 +2835,26 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "sea-query-driver"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e3953baee94dcb90f0e19e8b4b91b91e9394867b0fc1886d0221cfc6d0439f5"
dependencies = [
"proc-macro2 1.0.38",
"quote 1.0.18",
"syn 1.0.92",
]
[[package]] [[package]]
name = "sea-schema" name = "sea-schema"
version = "0.7.1" version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d492f4550a428d2be29e4df8e43b2e9e46717424c9603fafa3365ae6079bd73" checksum = "09fea4b9dccc8b0667f108de2d09bdabd42a137b8437de092374a4e36de8c12f"
dependencies = [ dependencies = [
"async-std", "futures 0.3.21",
"async-trait", "sea-query",
"clap",
"dotenv",
"log",
"sea-orm",
"sea-query 0.22.0",
"sea-schema-derive", "sea-schema-derive",
"tracing",
"tracing-subscriber",
] ]
[[package]] [[package]]

@ -25,7 +25,7 @@ tracing-subscriber = "0.3.11"
trait-bound-typemap = "0.3.3" trait-bound-typemap = "0.3.3"
[dependencies.sea-orm] [dependencies.sea-orm]
version = "0.7.1" version = "0.8.0"
default-features = false default-features = false
[dependencies.sqlx] [dependencies.sqlx]

@ -18,6 +18,9 @@ version = "0.5.13"
features = ["migrate"] features = ["migrate"]
[dependencies.sea-orm] [dependencies.sea-orm]
version = "0.7.1" version = "0.8.0"
features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros"] features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros"]
default-features = false default-features = false
[dependencies.migration]
path = "./migration"

@ -9,4 +9,4 @@ name = "migration"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies] [dependencies]
sea-schema = { version = "^0.7.0", default-features = false, features = [ "migration", "debug-print" ] } sea-orm-migration = "0.8.3"

@ -1,4 +1,4 @@
pub use sea_schema::migration::prelude::*; pub use sea_orm_migration::prelude::*;
mod m20220101_000001_create_table; mod m20220101_000001_create_table;
mod utils; mod utils;

@ -1,5 +1,5 @@
use crate::drop_tables; use crate::drop_tables;
use sea_schema::migration::prelude::*; use sea_orm_migration::prelude::*;
pub struct Migration; pub struct Migration;
@ -25,6 +25,35 @@ impl MigrationTrait for Migration {
manager.create_table(create_sorting_preset_key()).await?; manager.create_table(create_sorting_preset_key()).await?;
manager.create_table(create_job_states()).await?; manager.create_table(create_job_states()).await?;
manager
.create_index(
Index::create()
.name("index_files_cd_id")
.table(Files::Table)
.col(Files::CdId)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("index_tags_name")
.table(Tags::Table)
.col(Tags::Name)
.full_text()
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("index_cd_tag_mappings_tag_id")
.table(CdTagMappings::Table)
.col(CdTagMappings::TagId)
.to_owned(),
)
.await?;
Ok(()) Ok(())
} }
@ -115,7 +144,6 @@ fn create_files() -> TableCreateStatement {
.to(ContentDescriptors::Table, ContentDescriptors::Id) .to(ContentDescriptors::Table, ContentDescriptors::Id)
.on_delete(ForeignKeyAction::Cascade), .on_delete(ForeignKeyAction::Cascade),
) )
.index(Index::create().table(Files::Table).col(Files::CdId))
.to_owned() .to_owned()
} }
@ -154,12 +182,6 @@ fn create_tags() -> TableCreateStatement {
.from(Tags::Table, Tags::NamespaceId) .from(Tags::Table, Tags::NamespaceId)
.to(Namespaces::Table, Namespaces::Id), .to(Namespaces::Table, Namespaces::Id),
) )
.index(
Index::create()
.table(Tags::Table)
.col(Tags::Name)
.full_text(),
)
.index( .index(
Index::create() Index::create()
.unique() .unique()
@ -198,11 +220,6 @@ fn create_cd_tag_mappings() -> TableCreateStatement {
.to(Tags::Table, Tags::Id) .to(Tags::Table, Tags::Id)
.on_delete(ForeignKeyAction::Cascade), .on_delete(ForeignKeyAction::Cascade),
) )
.index(
Index::create()
.table(CdTagMappings::Table)
.col(CdTagMappings::TagId),
)
.to_owned() .to_owned()
} }

@ -1,5 +1,5 @@
use migration::Migrator; use migration::Migrator;
use sea_schema::migration::prelude::*; use sea_orm_migration::prelude::*;
#[async_std::main] #[async_std::main]
async fn main() { async fn main() {

@ -1,7 +1,7 @@
#[macro_export] #[macro_export]
macro_rules! drop_tables { macro_rules! drop_tables {
($man:expr, $($tbl:expr),*) => { ($man:expr, $($tbl:expr),*) => {
use sea_schema::migration::prelude::*; use sea_orm_migration::prelude::*;
$( $(
$man.drop_table(TableDropStatement::new().table($tbl).to_owned()).await?; $man.drop_table(TableDropStatement::new().table($tbl).to_owned()).await?;
)* )*

@ -1,81 +0,0 @@
CREATE TABLE storage_locations
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(128) UNIQUE NOT NULL,
path VARCHAR(255) NOT NULL
);
CREATE TABLE hashes
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
value TEXT NOT NULL
);
CREATE UNIQUE INDEX hashes_value_index ON hashes (value);
CREATE TABLE files
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
type INTEGER NOT NULL DEFAULT 0,
name VARCHAR(128),
comment VARCHAR(1024),
storage_id INTEGER NOT NULL,
hash_id INTEGER NOT NULL,
import_time DATETIME NOT NULL,
creation_time DATETIME NOT NULL,
change_time DATETIME NOT NULL,
FOREIGN KEY (storage_id) REFERENCES storage_locations (id),
FOREIGN KEY (hash_id) REFERENCES hashes (id)
);
CREATE TABLE namespaces
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(128) UNIQUE NOT NULL
);
CREATE UNIQUE INDEX namespaces_name_index ON namespaces (name);
CREATE TABLE tags
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
namespace_id INTEGER,
name VARCHAR(128) UNIQUE NOT NULL,
FOREIGN KEY (namespace_id) REFERENCES namespaces (id)
);
CREATE UNIQUE INDEX tag_name_index ON tags (name);
CREATE TABLE sources
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
url VARCHAR(512) NOT NULL
);
CREATE UNIQUE INDEX sources_value_index ON sources (url);
CREATE TABLE hash_tag_mappings
(
hash_id INTEGER NOT NULL,
tag_id INTEGER NOT NULL,
PRIMARY KEY (hash_id, tag_id),
FOREIGN KEY (hash_id) REFERENCES hashes (id),
FOREIGN KEY (tag_id) REFERENCES tags (id)
);
CREATE TABLE hash_source_mappings
(
hash_id INTEGER NOT NULL,
source_id INTEGER NOT NULL,
PRIMARY KEY (hash_id, source_id),
FOREIGN KEY (hash_id) REFERENCES hashes (id),
FOREIGN KEY (source_id) REFERENCES sources (id)
)

@ -1,3 +0,0 @@
-- Add migration script here
ALTER TABLE files
ADD COLUMN mime_type VARCHAR(128);

@ -1,14 +0,0 @@
-- Add migration script here
CREATE TABLE thumbnails (
id INTEGER PRIMARY KEY AUTOINCREMENT,
hash_id INTEGER UNIQUE NOT NULL,
storage_id INTEGER NOT NULL,
file_id INTEGER NOT NULL,
height INTEGER NOT NULL,
width INTEGER NOT NULL,
FOREIGN KEY (hash_id) REFERENCES hashes (id),
FOREIGN KEY (storage_id) REFERENCES storage_locations (id),
FOREIGN KEY (file_id) REFERENCES files (id)
);
CREATE UNIQUE INDEX thumbnail_file_resolution ON thumbnails (file_id, height, width);

@ -1,3 +0,0 @@
-- Add migration script here
ALTER TABLE thumbnails
ADD COLUMN mime VARCHAR(128);

@ -1,8 +0,0 @@
-- Add migration script here
DELETE FROM thumbnails WHERE file_id NOT IN (SELECT MIN(files.id) FROM files GROUP BY hash_id);
DELETE FROM files WHERE ROWID NOT IN (SELECT MIN(ROWID) FROM files GROUP BY hash_id);
DELETE FROM thumbnails WHERE hash_id NOT IN (SELECT MIN(hashes.id) FROM hashes GROUP BY value);
DELETE FROM files WHERE hash_id NOT IN (SELECT MIN(hashes.id) FROM hashes GROUP BY value);
DELETE FROM hashes WHERE ROWID NOT IN (SELECT MIN(ROWID) FROM hashes GROUP BY value);
CREATE UNIQUE INDEX hash_value_index ON hashes (value);
CREATE UNIQUE INDEX file_hash_id ON files (hash_id);

@ -1,33 +0,0 @@
-- Add migration script here
PRAGMA foreign_keys=off;
ALTER TABLE tags RENAME TO _tags_old;
CREATE TABLE tags
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
namespace_id INTEGER,
name VARCHAR(128),
FOREIGN KEY (namespace_id) REFERENCES namespaces (id)
);
CREATE UNIQUE INDEX tag_namespace_name_index ON tags (namespace_id, name);
INSERT INTO tags SELECT * FROM _tags_old;
DROP TABLE _tags_old;
ALTER TABLE hash_tag_mappings RENAME TO _hash_tag_mappings_old;
CREATE TABLE hash_tag_mappings
(
hash_id INTEGER NOT NULL,
tag_id INTEGER NOT NULL,
PRIMARY KEY (hash_id, tag_id),
FOREIGN KEY (hash_id) REFERENCES hashes (id),
FOREIGN KEY (tag_id) REFERENCES tags (id)
);
CREATE UNIQUE INDEX hash_tag_mappings_hash_tag ON hash_tag_mappings (hash_id, tag_id);
INSERT INTO hash_tag_mappings SELECT * FROM _hash_tag_mappings_old;
DROP TABLE _hash_tag_mappings_old;
PRAGMA foreign_keys=on;

@ -1,3 +0,0 @@
-- Add migration script here
CREATE INDEX index_hash_tag_mappings_tag_id ON hash_tag_mappings (tag_id);
CREATE INDEX index_tag_name ON tags (name);

@ -1,3 +0,0 @@
-- Add migration script here
ALTER TABLE files
ADD COLUMN size INTEGER;

@ -1,107 +0,0 @@
-- Add migration script here
PRAGMA foreign_keys= off;
-- create backup files table
ALTER TABLE files
RENAME TO _files_old;
-- create backup hashes table
ALTER TABLE hashes
RENAME TO _hashes_old;
-- create backup hash_tag_mappings table
ALTER TABLE hash_tag_mappings
RENAME TO _hash_tag_mappings_old;
-- create backup hash_source_mappings table
ALTER TABLE hash_source_mappings
RENAME TO _hash_source_mappings_old;
-- create content id table
CREATE TABLE content_descriptors
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
descriptor BLOB NOT NULL
);
CREATE UNIQUE INDEX content_descriptor_values ON content_descriptors (descriptor);
-- create content-id tag mappings table
CREATE TABLE cd_tag_mappings
(
cd_id INTEGER NOT NULL REFERENCES content_descriptors (id),
tag_id INTEGER NOT NULL REFERENCES tags (id),
PRIMARY KEY (cd_id, tag_id)
);
CREATE UNIQUE INDEX content_descriptor_tag_mapping_unique ON cd_tag_mappings (cd_id, tag_id);
CREATE INDEX content_descriptor_tag_mapping_tag ON cd_tag_mappings (tag_id);
-- create content-id source mappings table
CREATE TABLE cd_source_mappings
(
cd_id INTEGER NOT NULL REFERENCES content_descriptors (id),
source_id INTEGER NOT NULL REFERENCES sources (id),
PRIMARY KEY (cd_id, source_id)
);
CREATE UNIQUE INDEX content_descriptor_source_mapping_unique ON cd_source_mappings (cd_id, source_id);
-- create new files table
CREATE TABLE files
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
status INTEGER NOT NULL DEFAULT 10,
storage_id INTEGER NOT NULL REFERENCES storage_locations (id),
cd_id INTEGER NOT NULL REFERENCES content_descriptors (id),
mime_type VARCHAR(128) NOT NULL DEFAULT 'application/octet-stream'
);
CREATE INDEX files_contend_descriptor ON files (cd_id);
-- create metadata table
CREATE TABLE file_metadata
(
file_id INTEGER PRIMARY KEY REFERENCES files (id),
size INTEGER NOT NULL,
name VARCHAR(128),
comment VARCHAR(1024),
import_time DATETIME NOT NULL,
creation_time DATETIME NOT NULL,
change_time DATETIME NOT NULL
);
CREATE UNIQUE INDEX file_metadata_file_id_unique ON file_metadata (file_id);
-- add content identifiers from hashes table
INSERT INTO content_descriptors
SELECT id, value
FROM _hashes_old;
-- add files from files table
INSERT INTO files (id, storage_id, cd_id, mime_type)
SELECT id, storage_id, hash_id AS content_id, mime_type
FROM _files_old;
-- add metadata from files table
INSERT INTO file_metadata
SELECT id AS file_id, size, name, comment, import_time, creation_time, change_time
FROM _files_old;
-- add content tag mappings
INSERT INTO cd_tag_mappings
SELECT hash_id AS content_id, tag_id
FROM _hash_tag_mappings_old;
-- add content id source mappings
INSERT INTO cd_source_mappings
SELECT hash_id AS content_id, source_id
FROM _hash_source_mappings_old;
-- drop all old tables
DROP TABLE _hash_source_mappings_old;
DROP TABLE _hash_tag_mappings_old;
DROP TABLE _files_old;
DROP TABLE _hashes_old;
pragma foreign_keys= on;

@ -1,50 +0,0 @@
-- Add migration script here
PRAGMA foreign_keys= off;
-- rename old files table
ALTER TABLE files
RENAME TO _files_old;
-- rename metadata value (because of foreign key constraints)
ALTER TABLE file_metadata
RENAME TO _file_metadata_old;
-- create new files table
CREATE TABLE files
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
status INTEGER NOT NULL DEFAULT 10,
cd_id INTEGER NOT NULL REFERENCES content_descriptors (id),
mime_type VARCHAR(128) NOT NULL DEFAULT 'application/octet-stream'
);
-- add data from files table
INSERT INTO files
SELECT id, status, cd_id, mime_type
FROM _files_old;
-- create metadata table
CREATE TABLE file_metadata
(
file_id INTEGER PRIMARY KEY REFERENCES files (id),
size INTEGER NOT NULL,
name VARCHAR(128),
comment VARCHAR(1024),
import_time DATETIME NOT NULL,
creation_time DATETIME NOT NULL,
change_time DATETIME NOT NULL
);
-- add back the old values
INSERT INTO file_metadata
SELECT *
FROM _file_metadata_old;
-- drop old tables
DROP TABLE _file_metadata_old;
DROP TABLE _files_old;
DROP TABLE storage_locations;
-- create indices on new tables
CREATE UNIQUE INDEX file_metadata_file_id_unique ON file_metadata (file_id);
CREATE INDEX files_content_descriptor ON files (cd_id);
PRAGMA foreign_keys= on;

@ -1,20 +0,0 @@
-- Add migration script here
CREATE TABLE sorting_presets (
id INTEGER PRIMARY KEY AUTOINCREMENT
);
CREATE TABLE sort_keys (
id INTEGER PRIMARY KEY AUTOINCREMENT,
key_type INTEGER NOT NULL DEFAULT 0,
ascending INTEGER NOT NULL CHECK (ascending IN (0, 1)),
value VARCHAR(128)
);
CREATE TABLE sorting_preset_keys (
preset_id INTEGER REFERENCES sorting_presets (id) ON DELETE CASCADE,
key_id INTEGER REFERENCES sort_keys (id),
key_index INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY (preset_id, key_id)
);
CREATE INDEX sorting_preset_index ON sorting_preset_keys (preset_id);

@ -1,17 +0,0 @@
PRAGMA foreign_keys= off;
ALTER TABLE sorting_preset_keys
RENAME TO _sorting_preset_keys_old;
CREATE TABLE sorting_preset_keys
(
preset_id INTEGER REFERENCES sorting_presets (id) ON DELETE CASCADE,
key_id INTEGER REFERENCES sort_keys (id),
key_index INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY (preset_id, key_id, key_index)
);
INSERT INTO sorting_preset_keys SELECT * FROM _sorting_preset_keys_old;
DROP TABLE _sorting_preset_keys_old;
PRAGMA foreign_keys= on;

@ -1,5 +0,0 @@
CREATE TABLE job_states (
job_type INTEGER NOT NULL,
value BLOB,
PRIMARY KEY (job_type)
);

@ -1,5 +1,6 @@
use std::time::Duration; use std::time::Duration;
use migration::MigratorTrait;
use sea_orm::{ConnectOptions, Database, DatabaseConnection}; use sea_orm::{ConnectOptions, Database, DatabaseConnection};
use sqlx::migrate::MigrateDatabase; use sqlx::migrate::MigrateDatabase;
@ -10,24 +11,16 @@ pub mod queries;
/// Connects to the database, runs migrations and returns the RepoDatabase wrapper type /// Connects to the database, runs migrations and returns the RepoDatabase wrapper type
pub async fn get_database<S: AsRef<str>>(uri: S) -> RepoDatabaseResult<DatabaseConnection> { pub async fn get_database<S: AsRef<str>>(uri: S) -> RepoDatabaseResult<DatabaseConnection> {
migrate(uri.as_ref()).await?; if !sqlx::Sqlite::database_exists(uri.as_ref()).await? {
sqlx::Sqlite::create_database(uri.as_ref()).await?;
}
let mut opt = ConnectOptions::new(uri.as_ref().to_string()); let mut opt = ConnectOptions::new(uri.as_ref().to_string());
opt.connect_timeout(Duration::from_secs(10)) opt.connect_timeout(Duration::from_secs(10))
.idle_timeout(Duration::from_secs(10)) .idle_timeout(Duration::from_secs(10))
.sqlx_logging(false); .sqlx_logging(true);
let conn = Database::connect(opt).await?; let conn = Database::connect(opt).await?;
migration::Migrator::up(&conn, None).await?;
Ok(conn) Ok(conn)
} }
async fn migrate(uri: &str) -> RepoDatabaseResult<()> {
use sqlx::Connection;
if !sqlx::Sqlite::database_exists(uri).await? {
sqlx::Sqlite::create_database(uri).await?;
}
let mut conn = sqlx::SqliteConnection::connect(uri).await?;
sqlx::migrate!().run(&mut conn).await?;
Ok(())
}

@ -21,7 +21,7 @@ path = "../mediarepo-core"
path = "../mediarepo-database" path = "../mediarepo-database"
[dependencies.sea-orm] [dependencies.sea-orm]
version = "0.7.1" version = "0.8.0"
features = ["runtime-tokio-native-tls", "macros"] features = ["runtime-tokio-native-tls", "macros"]
default-features = false default-features = false

@ -6,7 +6,6 @@ use tokio::task::JoinHandle;
use crate::encrypted::{EncryptedListener, Keys}; use crate::encrypted::{EncryptedListener, Keys};
use crate::prelude::utils::generate_secret; use crate::prelude::utils::generate_secret;
use mediarepo_core::bromine::prelude::encrypted::EncryptionOptions; use mediarepo_core::bromine::prelude::encrypted::EncryptionOptions;
use mediarepo_core::bromine::prelude::tcp::TcpOptions;
use mediarepo_core::bromine::prelude::*; use mediarepo_core::bromine::prelude::*;
use mediarepo_core::error::{RepoError, RepoResult}; use mediarepo_core::error::{RepoError, RepoResult};
use mediarepo_core::mediarepo_api::types::misc::InfoResponse; use mediarepo_core::mediarepo_api::types::misc::InfoResponse;

Loading…
Cancel
Save