Execute commands in a separate thread

Command Queue Executor to watch for new commands and execute on longer running background thread
This commit is contained in:
Emily Toop 2018-03-13 11:25:33 +00:00
parent ecc4a7a35a
commit d4365fa4cd
7 changed files with 130 additions and 104 deletions

View file

@ -29,6 +29,7 @@ rustc_version = "0.2"
chrono = "0.4"
error-chain = { git = "https://github.com/rnewman/error-chain", branch = "rnewman/sync" }
lazy_static = "0.2"
smallvec = "0.6"
time = "0.1"
uuid = "0.5"

View file

@ -10,6 +10,7 @@ itertools = "0.7"
lazy_static = "0.2"
num = "0.1"
ordered-float = "0.5"
smallvec = "0.6"
time = "0.1"
[dependencies.rusqlite]

View file

@ -21,6 +21,7 @@ extern crate lazy_static;
extern crate num;
extern crate rusqlite;
extern crate smallvec;
extern crate tabwriter;
extern crate time;

View file

@ -671,6 +671,7 @@ impl<'conn, 'a, W> Tx<'conn, 'a, W> where W: TransactWatcher {
}
self.watcher.datom(op, e, a, &v);
// TODO: Create something like a watcher to do this for us.
affected_attrs.insert(a);
let reduced = (e, a, attribute, v, added);

View file

@ -10,6 +10,13 @@
use std::sync::{
Arc,
Weak,
};
use std::sync::mpsc::{
channel,
Receiver,
RecvError,
Sender,
};
use std::thread;
@ -17,155 +24,156 @@ use indexmap::{
IndexMap,
};
use smallvec::{
SmallVec,
};
use types::{
AttributeSet,
TxReport,
};
pub struct TxObserver {
notify_fn: Arc<Option<Box<Fn(String, Vec<TxReport>) + Send + Sync>>>,
notify_fn: Arc<Box<Fn(&str, SmallVec<[&TxReport; 4]>) + Send + Sync>>,
attributes: AttributeSet,
}
impl TxObserver {
pub fn new<F>(attributes: AttributeSet, notify_fn: F) -> TxObserver where F: Fn(String, Vec<TxReport>) + 'static + Send + Sync {
pub fn new<F>(attributes: AttributeSet, notify_fn: F) -> TxObserver where F: Fn(&str, SmallVec<[&TxReport; 4]>) + 'static + Send + Sync {
TxObserver {
notify_fn: Arc::new(Some(Box::new(notify_fn))),
notify_fn: Arc::new(Box::new(notify_fn)),
attributes,
}
}
pub fn applicable_reports(&self, reports: &Vec<TxReport>) -> Vec<TxReport> {
pub fn applicable_reports<'r>(&self, reports: &'r SmallVec<[TxReport; 4]>) -> SmallVec<[&'r TxReport; 4]> {
reports.into_iter().filter_map(|report| {
if self.attributes.intersection(&report.changeset).next().is_some(){
Some(report.clone())
} else {
None
}
self.attributes.intersection(&report.changeset)
.next()
.and_then(|_| Some(report))
}).collect()
}
fn notify(&self, key: String, reports: Vec<TxReport>) {
if let Some(ref notify_fn) = *self.notify_fn {
(notify_fn)(key, reports);
} else {
eprintln!("no notify function specified for TxObserver");
}
}
}
pub trait CommandClone {
fn clone_box(&self) -> Box<Command + Send>;
}
impl<T> CommandClone for T where T: 'static + Command + Clone + Send {
fn clone_box(&self) -> Box<Command + Send> {
Box::new(self.clone())
fn notify(&self, key: &str, reports: SmallVec<[&TxReport; 4]>) {
(*self.notify_fn)(key, reports);
}
}
pub trait Command: CommandClone {
fn execute(&self);
pub trait Command {
fn execute(&mut self);
}
impl Clone for Box<Command + Send> {
fn clone(&self) -> Box<Command + Send> {
self.clone_box()
}
pub struct TxCommand {
reports: SmallVec<[TxReport; 4]>,
observers: Weak<IndexMap<String, Arc<TxObserver>>>,
}
#[derive(Clone)]
pub struct NotifyTxObserver {
key: String,
reports: Vec<TxReport>,
observer: Arc<TxObserver>,
}
impl NotifyTxObserver {
pub fn new(key: String, reports: Vec<TxReport>, observer: Arc<TxObserver>) -> Self {
NotifyTxObserver {
key,
impl TxCommand {
fn new(observers: &Arc<IndexMap<String, Arc<TxObserver>>>, reports: SmallVec<[TxReport; 4]>) -> Self {
TxCommand {
reports,
observer,
observers: Arc::downgrade(observers),
}
}
}
impl Command for NotifyTxObserver {
fn execute(&self) {
self.observer.notify(self.key.clone(), self.reports.clone());
impl Command for TxCommand {
fn execute(&mut self) {
self.observers.upgrade().map(|observers| {
for (key, observer) in observers.iter() {
let applicable_reports = observer.applicable_reports(&self.reports);
if !applicable_reports.is_empty() {
observer.notify(&key, applicable_reports);
}
}
#[derive(Clone)]
pub struct AsyncBatchExecutor {
commands: Vec<Box<Command + Send>>,
}
impl Command for AsyncBatchExecutor {
fn execute(&self) {
let command_queue = self.commands.clone();
thread::spawn (move ||{
for command in command_queue.iter() {
command.execute();
}
});
}
}
#[derive(Clone)]
pub struct TxObservationService {
observers: IndexMap<String, Arc<TxObserver>>,
pub command_queue: Vec<Box<Command + Send>>,
observers: Arc<IndexMap<String, Arc<TxObserver>>>,
executor: Option<Sender<Box<Command + Send>>>,
in_progress_count: i32,
}
impl TxObservationService {
pub fn new() -> Self {
TxObservationService {
observers: IndexMap::new(),
command_queue: Vec::new(),
observers: Arc::new(IndexMap::new()),
executor: None,
in_progress_count: 0,
}
}
// For testing purposes
pub fn is_registered(&self, key: &String) -> bool {
self.observers.contains_key(key)
}
pub fn register(&mut self, key: String, observer: Arc<TxObserver>) {
self.observers.insert(key.clone(), observer);
Arc::make_mut(&mut self.observers).insert(key, observer);
}
pub fn deregister(&mut self, key: &String) {
self.observers.remove(key);
Arc::make_mut(&mut self.observers).remove(key);
}
pub fn has_observers(&self) -> bool {
!self.observers.is_empty()
}
fn command_from_reports(&self, key: &String, reports: &Vec<TxReport>, observer: &Arc<TxObserver>) -> Option<Box<Command + Send>> {
let applicable_reports = observer.applicable_reports(reports);
if !applicable_reports.is_empty() {
Some(Box::new(NotifyTxObserver::new(key.clone(), applicable_reports, Arc::clone(observer))))
} else {
None
pub fn transaction_did_start(&mut self) {
self.in_progress_count += 1;
}
pub fn transaction_did_commit(&mut self, reports: SmallVec<[TxReport; 4]>) {
{
let executor = self.executor.get_or_insert_with(||{
let (tx, rx): (Sender<Box<Command + Send>>, Receiver<Box<Command + Send>>) = channel();
let mut worker = CommandExecutor::new(rx);
thread::spawn(move || {
worker.main();
});
tx
});
let cmd = Box::new(TxCommand::new(&self.observers, reports));
executor.send(cmd).unwrap();
}
self.in_progress_count -= 1;
if self.in_progress_count == 0 {
self.executor = None;
}
}
}
pub fn transaction_did_commit(&mut self, reports: Vec<TxReport>) {
// notify all observers about their relevant transactions
let commands: Vec<Box<Command + Send>> = self.observers
.iter()
.filter_map(|(key, observer)| { self.command_from_reports(&key, &reports, &observer) })
.collect();
self.command_queue.push(Box::new(AsyncBatchExecutor{ commands }));
struct CommandExecutor {
reciever: Receiver<Box<Command + Send>>,
}
pub fn run(&mut self) {
for command in self.command_queue.iter() {
command.execute();
impl CommandExecutor {
fn new(rx: Receiver<Box<Command + Send>>) -> Self {
CommandExecutor {
reciever: rx,
}
}
self.command_queue.clear();
fn main(&mut self) {
loop {
match self.reciever.recv() {
Err(RecvError) => {
eprintln!("Disconnected, terminating CommandExecutor");
return
},
Ok(mut cmd) => {
cmd.execute()
},
}
}
}
}

View file

@ -32,6 +32,8 @@ use rusqlite::{
TransactionBehavior,
};
use smallvec::SmallVec;
use edn;
use mentat_core::{
@ -214,7 +216,8 @@ pub struct InProgress<'a, 'c> {
schema: Schema,
cache: InProgressSQLiteAttributeCache,
use_caching: bool,
tx_reports: Vec<TxReport>,
// TODO: Collect txids/affected datoms in a better way
tx_reports: SmallVec<[TxReport; 4]>,
observer_service: Option<&'a Mutex<TxObservationService>>,
}
@ -453,12 +456,6 @@ impl<'a, 'c> InProgress<'a, 'c> {
metadata.generation += 1;
metadata.partition_map = self.partition_map;
// let the transaction observer know that there have been some transactions committed.
if let Some(ref observer_service) = self.observer_service {
let mut os = observer_service.lock().unwrap();
os.transaction_did_commit(self.tx_reports);
}
// Update the conn's cache if we made any changes.
self.cache.commit_to(&mut metadata.attribute_cache);
@ -470,10 +467,10 @@ impl<'a, 'c> InProgress<'a, 'c> {
// TODO: consider making vocabulary lookup lazy -- we won't need it much of the time.
}
// run any commands that we've created along the way.
// let the transaction observer know that there have been some transactions committed.
if let Some(ref observer_service) = self.observer_service {
let mut os = observer_service.lock().unwrap();
os.run();
os.transaction_did_commit(self.tx_reports);
}
Ok(())
@ -728,6 +725,14 @@ impl Conn {
current.attribute_cache.clone())
};
let mut obs = self.tx_observer_service.lock().unwrap();
let observer_service = if obs.has_observers() {
obs.transaction_did_start();
Some(&self.tx_observer_service)
} else {
None
};
Ok(InProgress {
mutex: &self.metadata,
transaction: tx,
@ -736,8 +741,8 @@ impl Conn {
schema: (*current_schema).clone(),
cache: InProgressSQLiteAttributeCache::from_cache(cache_cow),
use_caching: true,
tx_reports: Vec::new(),
observer_service: if self.tx_observer_service.lock().unwrap().has_observers() { Some(&self.tx_observer_service) } else { None },
tx_reports: SmallVec::new(),
observer_service: observer_service,
})
}
@ -846,7 +851,6 @@ mod tests {
Duration,
Instant
};
use std::thread;
use mentat_core::{
CachedAttributes,
@ -1534,16 +1538,21 @@ mod tests {
let output = Arc::new(Mutex::new(ObserverOutput::default()));
let mut_output = Arc::downgrade(&output);
let (tx, rx): (::std::sync::mpsc::Sender<()>, ::std::sync::mpsc::Receiver<()>) = ::std::sync::mpsc::channel();
// because the TxObserver is in an Arc and is therefore Sync, we have to wrap the Sender in a Mutex to also
// make it Sync.
let thread_tx = Mutex::new(tx);
let tx_observer = Arc::new(TxObserver::new(registered_attrs, move |obs_key, batch| {
if let Some(out) = mut_output.upgrade() {
let mut o = out.lock().unwrap();
o.called_key = Some(obs_key.clone());
o.called_key = Some(obs_key.to_string());
for report in batch.iter() {
o.txids.push(report.tx_id.clone());
o.changes.push(report.changeset.clone());
}
o.txids.sort();
}
thread_tx.lock().unwrap().send(()).unwrap();
}));
conn.register_observer(key.clone(), Arc::clone(&tx_observer));
@ -1575,7 +1584,7 @@ mod tests {
}
let delay = Duration::from_millis(100);
thread::sleep(delay);
let _ = rx.recv_timeout(delay);
match Arc::try_unwrap(output) {
Ok(out) => {
@ -1608,16 +1617,19 @@ mod tests {
let output = Arc::new(Mutex::new(ObserverOutput::default()));
let mut_output = Arc::downgrade(&output);
let (tx, rx): (::std::sync::mpsc::Sender<()>, ::std::sync::mpsc::Receiver<()>) = ::std::sync::mpsc::channel();
let thread_tx = Mutex::new(tx);
let tx_observer = Arc::new(TxObserver::new(registered_attrs, move |obs_key, batch| {
if let Some(out) = mut_output.upgrade() {
let mut o = out.lock().unwrap();
o.called_key = Some(obs_key.clone());
o.called_key = Some(obs_key.to_string());
for report in batch.iter() {
o.txids.push(report.tx_id.clone());
o.changes.push(report.changeset.clone());
}
o.txids.sort();
}
thread_tx.lock().unwrap().send(()).unwrap();
}));
conn.register_observer(key.clone(), Arc::clone(&tx_observer));
@ -1638,7 +1650,7 @@ mod tests {
}
let delay = Duration::from_millis(100);
thread::sleep(delay);
let _ = rx.recv_timeout(delay);
match Arc::try_unwrap(output) {
Ok(out) => {

View file

@ -18,6 +18,8 @@ extern crate lazy_static;
extern crate rusqlite;
extern crate smallvec;
extern crate uuid;
pub extern crate edn;