Merge pull request #40 from Trivernis/develop

Fix clippy errors and missing rt feature
main
Julius Riegel 3 years ago committed by GitHub
commit 44fcdc51b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,29 @@
name: Lint project files
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]
env:
CARGO_TERM_COLOR: always
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache build data
uses: actions/cache@v2
with:
path: |
target
~/.cargo/
key: ${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Clippy
run: cargo clippy

2
Cargo.lock generated

@ -120,7 +120,7 @@ dependencies = [
[[package]] [[package]]
name = "bromine" name = "bromine"
version = "0.20.1" version = "0.20.2"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bincode", "bincode",

@ -1,6 +1,6 @@
[package] [package]
name = "bromine" name = "bromine"
version = "0.20.1" version = "0.20.2"
authors = ["trivernis <trivernis@protonmail.com>"] authors = ["trivernis <trivernis@protonmail.com>"]
edition = "2018" edition = "2018"
readme = "README.md" readme = "README.md"
@ -46,7 +46,7 @@ features = []
[dependencies.tokio] [dependencies.tokio]
version = "1.17.0" version = "1.17.0"
features = ["net", "io-std", "io-util", "sync", "time", "macros"] features = ["net", "io-std", "io-util", "sync", "time", "macros", "rt"]
[dependencies.postcard] [dependencies.postcard]
version = "0.7.3" version = "0.7.3"

@ -103,7 +103,7 @@ impl Event {
/// It represents the message that is replied to and can be None. /// It represents the message that is replied to and can be None.
#[inline] #[inline]
pub fn reference_id(&self) -> Option<u64> { pub fn reference_id(&self) -> Option<u64> {
self.header.ref_id.clone() self.header.ref_id
} }
/// Decodes the payload to the given type implementing the receive payload trait /// Decodes the payload to the given type implementing the receive payload trait

@ -39,7 +39,7 @@ type EventCallback = Arc<
>; >;
/// Handler for events /// Handler for events
#[derive(Clone)] #[derive(Clone, Default)]
pub struct EventHandler { pub struct EventHandler {
callbacks: HashMap<String, EventCallback>, callbacks: HashMap<String, EventCallback>,
} }
@ -59,9 +59,7 @@ impl Debug for EventHandler {
impl EventHandler { impl EventHandler {
/// Creates a new event handler /// Creates a new event handler
pub fn new() -> Self { pub fn new() -> Self {
Self { Self::default()
callbacks: HashMap::new(),
}
} }
/// Adds a new event callback /// Adds a new event callback

@ -98,8 +98,8 @@ impl<P1, P2> TandemPayload<P1, P2> {
impl<P1: IntoPayload, P2: IntoPayload> IntoPayload for TandemPayload<P1, P2> { impl<P1: IntoPayload, P2: IntoPayload> IntoPayload for TandemPayload<P1, P2> {
fn into_payload(self, ctx: &Context) -> IPCResult<Bytes> { fn into_payload(self, ctx: &Context) -> IPCResult<Bytes> {
let p1_bytes = self.load1.into_payload(&ctx)?; let p1_bytes = self.load1.into_payload(ctx)?;
let p2_bytes = self.load2.into_payload(&ctx)?; let p2_bytes = self.load2.into_payload(ctx)?;
let mut bytes = BytesMut::with_capacity(p1_bytes.len() + p2_bytes.len() + 16); let mut bytes = BytesMut::with_capacity(p1_bytes.len() + p2_bytes.len() + 16);

@ -69,6 +69,12 @@ pub struct IPCBuilder<L: AsyncStreamProtocolListener> {
stream_options: <L::Stream as AsyncProtocolStream>::StreamOptions, stream_options: <L::Stream as AsyncProtocolStream>::StreamOptions,
} }
impl<L: AsyncStreamProtocolListener> Default for IPCBuilder<L> {
fn default() -> Self {
Self::new()
}
}
impl<L> IPCBuilder<L> impl<L> IPCBuilder<L>
where where
L: AsyncStreamProtocolListener, L: AsyncStreamProtocolListener,
@ -92,8 +98,8 @@ where
timeout: Duration::from_secs(60), timeout: Duration::from_secs(60),
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
default_serializer: DynamicSerializer::first_available(), default_serializer: DynamicSerializer::first_available(),
listener_options: Default::default(), listener_options: L::ListenerOptions::default(),
stream_options: Default::default(), stream_options: <L::Stream as AsyncProtocolStream>::StreamOptions::default(),
} }
} }
@ -236,7 +242,7 @@ where
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub async fn build_pooled_client(self, pool_size: usize) -> Result<PooledContext> { pub async fn build_pooled_client(self, pool_size: usize) -> Result<PooledContext> {
if pool_size == 0 { if pool_size == 0 {
Error::BuildError("Pool size must be greater than 0".to_string()); return Err(Error::BuildError("Pool size must be greater than 0".to_string()));
} }
self.validate()?; self.validate()?;
let data = Arc::new(RwLock::new(self.data)); let data = Arc::new(RwLock::new(self.data));
@ -250,7 +256,7 @@ where
handler: self.handler.clone(), handler: self.handler.clone(),
data: Arc::clone(&data), data: Arc::clone(&data),
reply_listeners: reply_listeners.clone(), reply_listeners: reply_listeners.clone(),
timeout: self.timeout.clone(), timeout: self.timeout,
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
default_serializer: self.default_serializer.clone(), default_serializer: self.default_serializer.clone(),

@ -83,7 +83,7 @@ impl Context {
) -> EmitMetadata<P> { ) -> EmitMetadata<P> {
self.emitter.emit_raw( self.emitter.emit_raw(
self.clone(), self.clone(),
self.ref_id.clone(), self.ref_id,
name, name,
namespace, namespace,
event_type, event_type,

@ -48,7 +48,7 @@ impl IPCServer {
let handler = Arc::clone(&handler); let handler = Arc::clone(&handler);
let namespaces = Arc::clone(&namespaces); let namespaces = Arc::clone(&namespaces);
let data = Arc::clone(&data); let data = Arc::clone(&data);
let timeout = self.timeout.clone(); let timeout = self.timeout;
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
let default_serializer = self.default_serializer.clone(); let default_serializer = self.default_serializer.clone();
@ -69,7 +69,7 @@ impl IPCServer {
default_serializer.clone(), default_serializer.clone(),
); );
#[cfg(not(feature = "serialize"))] #[cfg(not(feature = "serialize"))]
let ctx = Context::new(emitter, data, None, reply_listeners, timeout.into()); let ctx = Context::new(emitter, data, None, reply_listeners, timeout);
handle_connection::<L::Stream>(namespaces, handler, read_half, ctx).await; handle_connection::<L::Stream>(namespaces, handler, read_half, ctx).await;
}); });

@ -43,7 +43,7 @@ impl<P: IntoPayload + Send + Sync + 'static> Future for EmitMetadataWithResponse
let timeout = self let timeout = self
.timeout .timeout
.take() .take()
.unwrap_or_else(|| ctx.default_reply_timeout.clone()); .unwrap_or(ctx.default_reply_timeout);
let event_id = match poll_unwrap!(emit_metadata.event_metadata.as_mut()).get_event() { let event_id = match poll_unwrap!(emit_metadata.event_metadata.as_mut()).get_event() {
Ok(e) => e.id(), Ok(e) => e.id(),
@ -58,7 +58,7 @@ impl<P: IntoPayload + Send + Sync + 'static> Future for EmitMetadataWithResponse
let reply = tokio::select! { let reply = tokio::select! {
tx_result = tx.recv() => { tx_result = tx.recv() => {
Ok(tx_result.ok_or_else(|| Error::SendError)?) tx_result.ok_or(Error::SendError)
} }
_ = tokio::time::sleep(timeout) => { _ = tokio::time::sleep(timeout) => {
Err(Error::Timeout) Err(Error::Timeout)

@ -10,23 +10,26 @@ use std::future::Future;
use std::pin::Pin; use std::pin::Pin;
use std::task::Poll; use std::task::Poll;
use std::time::Duration; use std::time::Duration;
use futures_core::future::BoxFuture;
use tokio::sync::mpsc::Receiver; use tokio::sync::mpsc::Receiver;
/// A metadata object returned after waiting for a reply to an event /// A metadata object returned after waiting for a reply to an event
/// This object needs to be awaited for to get the actual reply /// This object needs to be awaited for to get the actual reply
pub struct EmitMetadataWithResponseStream<P: IntoPayload> { pub struct EmitMetadataWithResponseStream<P: IntoPayload> {
pub(crate) timeout: Option<Duration>, pub(crate) timeout: Option<Duration>,
pub(crate) fut: Option<Pin<Box<dyn Future<Output = Result<ResponseStream>> + Send + Sync>>>, pub(crate) fut: Option<BoxFuture<'static, Result<ResponseStream>>>,
pub(crate) emit_metadata: Option<EmitMetadata<P>>, pub(crate) emit_metadata: Option<EmitMetadata<P>>,
} }
type StreamFutureResult = Result<(Option<Event>, Context, Receiver<Event>)>;
/// An asynchronous stream one can read all responses to a specific event from. /// An asynchronous stream one can read all responses to a specific event from.
pub struct ResponseStream { pub struct ResponseStream {
event_id: u64, event_id: u64,
ctx: Option<Context>, ctx: Option<Context>,
receiver: Option<Receiver<Event>>, receiver: Option<Receiver<Event>>,
timeout: Duration, timeout: Duration,
fut: Option<Pin<Box<dyn Future<Output = Result<(Option<Event>, Context, Receiver<Event>)>>>>>, fut: Option<BoxFuture<'static, StreamFutureResult>>,
} }
impl ResponseStream { impl ResponseStream {
@ -71,7 +74,7 @@ impl<P: IntoPayload + Send + Sync + 'static> Future for EmitMetadataWithResponse
let timeout = self let timeout = self
.timeout .timeout
.take() .take()
.unwrap_or_else(|| ctx.default_reply_timeout.clone()); .unwrap_or(ctx.default_reply_timeout);
let event_id = match poll_unwrap!(emit_metadata.event_metadata.as_mut()).get_event() { let event_id = match poll_unwrap!(emit_metadata.event_metadata.as_mut()).get_event() {
Ok(e) => e.id(), Ok(e) => e.id(),

@ -37,11 +37,11 @@ impl<P: IntoPayload> EventMetadata<P> {
let payload = self.payload.take().ok_or(Error::InvalidState)?; let payload = self.payload.take().ok_or(Error::InvalidState)?;
let res_id = self.res_id.take().ok_or(Error::InvalidState)?; let res_id = self.res_id.take().ok_or(Error::InvalidState)?;
let event_type = self.event_type.take().ok_or(Error::InvalidState)?; let event_type = self.event_type.take().ok_or(Error::InvalidState)?;
let payload_bytes = payload.into_payload(&ctx)?.into(); let payload_bytes = payload.into_payload(&ctx)?;
let event = Event::new( let event = Event::new(
namespace, namespace,
event.to_string(), event,
payload_bytes, payload_bytes,
res_id, res_id,
event_type, event_type,

Loading…
Cancel
Save