Compare commits

...

2 commits

Author SHA1 Message Date
Grisha Kruglov
687b2cf997 Add 'syncable' feature to 'db' crate to conditionally derive serialization for Partition*
This is leading up syncing with partition support.
2018-07-10 17:43:08 -07:00
Grisha Kruglov
26446ddb05 Add a top-level "syncable" feature.
Tested with:

cargo test --all
cargo test --all --no-default-features
cargo build --manifest-path tools/cli/Cargo.toml --no-default-features
cargo run --manifest-path tools/cli/Cargo.toml --no-default-features debugcli

Co-authored-by: Nick Alexander <nalexander@mozilla.com>
2018-07-10 17:31:18 -07:00
10 changed files with 183 additions and 140 deletions

View file

@ -16,9 +16,10 @@ version = "0.8.1"
build = "build/version.rs"
[features]
default = ["bundled_sqlite3"]
default = ["bundled_sqlite3", "syncable"]
bundled_sqlite3 = ["rusqlite/bundled"]
sqlcipher = ["rusqlite/sqlcipher", "mentat_db/sqlcipher"]
syncable = ["mentat_tolstoy", "mentat_db/syncable"]
[workspace]
members = ["tools/cli", "ffi"]
@ -71,6 +72,7 @@ path = "query-translator"
[dependencies.mentat_tolstoy]
path = "tolstoy"
optional = true
[profile.release]
opt-level = 3

View file

@ -6,6 +6,7 @@ workspace = ".."
[features]
default = []
sqlcipher = ["rusqlite/sqlcipher"]
syncable = ["serde", "serde_json", "serde_derive"]
[dependencies]
failure = "0.1.1"
@ -17,6 +18,9 @@ log = "0.4"
ordered-float = "0.5"
time = "0.1"
petgraph = "0.4.12"
serde = { version = "1.0", optional = true }
serde_json = { version = "1.0", optional = true }
serde_derive = { version = "1.0", optional = true }
[dependencies.rusqlite]
version = "0.13"

View file

@ -15,6 +15,9 @@ extern crate itertools;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate log;
#[cfg(feature = "syncable")]
#[macro_use] extern crate serde_derive;
extern crate petgraph;
extern crate rusqlite;
extern crate tabwriter;

View file

@ -38,6 +38,7 @@ use errors;
/// Represents one partition of the entid space.
#[derive(Clone,Debug,Eq,Hash,Ord,PartialOrd,PartialEq)]
#[cfg_attr(feature = "syncable", derive(Serialize,Deserialize))]
pub struct Partition {
/// The first entid in the partition.
pub start: i64,

View file

@ -29,6 +29,8 @@ use mentat_query_algebrizer;
use mentat_query_projector;
use mentat_query_pull;
use mentat_sql;
#[cfg(feature = "syncable")]
use mentat_tolstoy;
pub type Result<T> = std::result::Result<T, MentatError>;
@ -107,6 +109,7 @@ pub enum MentatError {
#[fail(display = "{}", _0)]
SQLError(#[cause] mentat_sql::SQLError),
#[cfg(feature = "syncable")]
#[fail(display = "{}", _0)]
TolstoyError(#[cause] mentat_tolstoy::TolstoyError),
}
@ -159,6 +162,7 @@ impl From<mentat_sql::SQLError> for MentatError {
}
}
#[cfg(feature = "syncable")]
impl From<mentat_tolstoy::TolstoyError> for MentatError {
fn from(error: mentat_tolstoy::TolstoyError) -> MentatError {
MentatError::TolstoyError(error)

View file

@ -30,6 +30,8 @@ extern crate mentat_query_projector;
extern crate mentat_query_pull;
extern crate mentat_query_translator;
extern crate mentat_sql;
#[cfg(feature = "syncable")]
extern crate mentat_tolstoy;
pub use mentat_core::{

View file

@ -38,6 +38,7 @@ use mentat_db::{
TxObserver,
};
#[cfg(feature = "syncable")]
use mentat_tolstoy::Syncer;
use uuid::Uuid;
@ -237,6 +238,7 @@ impl Pullable for Store {
}
}
#[cfg(feature = "syncable")]
impl Syncable for Store {
fn sync(&mut self, server_uri: &String, user_uuid: &String) -> Result<()> {
let uuid = Uuid::parse_str(&user_uuid).map_err(|_| MentatError::BadUuid(user_uuid.clone()))?;

View file

@ -10,40 +10,44 @@
extern crate mentat;
extern crate mentat_core;
#[cfg(feature = "syncable")]
extern crate mentat_tolstoy;
use std::collections::BTreeMap;
#[cfg(feature = "syncable")]
mod tests {
use std::collections::BTreeMap;
use mentat::conn::Conn;
use mentat::conn::Conn;
use mentat::new_connection;
use mentat_tolstoy::tx_processor::{
use mentat::new_connection;
use mentat_tolstoy::tx_processor::{
Processor,
TxReceiver,
TxPart,
};
use mentat_tolstoy::errors::Result;
use mentat_core::{
};
use mentat_tolstoy::errors::Result;
use mentat_core::{
Entid,
TypedValue,
ValueType,
};
};
struct TxCountingReceiver {
struct TxCountingReceiver {
pub tx_count: usize,
pub is_done: bool,
}
}
impl TxCountingReceiver {
impl TxCountingReceiver {
fn new() -> TxCountingReceiver {
TxCountingReceiver {
tx_count: 0,
is_done: false,
}
}
}
}
impl TxReceiver for TxCountingReceiver {
impl TxReceiver for TxCountingReceiver {
fn tx<T>(&mut self, _tx_id: Entid, _d: &mut T) -> Result<()>
where T: Iterator<Item=TxPart> {
self.tx_count = self.tx_count + 1;
@ -54,24 +58,24 @@ impl TxReceiver for TxCountingReceiver {
self.is_done = true;
Ok(())
}
}
}
#[derive(Debug)]
struct TestingReceiver {
#[derive(Debug)]
struct TestingReceiver {
pub txes: BTreeMap<Entid, Vec<TxPart>>,
pub is_done: bool,
}
}
impl TestingReceiver {
impl TestingReceiver {
fn new() -> TestingReceiver {
TestingReceiver {
txes: BTreeMap::new(),
is_done: false,
}
}
}
}
impl TxReceiver for TestingReceiver {
impl TxReceiver for TestingReceiver {
fn tx<T>(&mut self, tx_id: Entid, d: &mut T) -> Result<()>
where T: Iterator<Item=TxPart> {
let datoms = self.txes.entry(tx_id).or_insert(vec![]);
@ -83,16 +87,16 @@ impl TxReceiver for TestingReceiver {
self.is_done = true;
Ok(())
}
}
}
fn assert_tx_datoms_count(receiver: &TestingReceiver, tx_num: usize, expected_datoms: usize) {
fn assert_tx_datoms_count(receiver: &TestingReceiver, tx_num: usize, expected_datoms: usize) {
let tx = receiver.txes.keys().nth(tx_num).expect("first tx");
let datoms = receiver.txes.get(tx).expect("datoms");
assert_eq!(expected_datoms, datoms.len());
}
}
#[test]
fn test_reader() {
#[test]
fn test_reader() {
let mut c = new_connection("").expect("Couldn't open conn.");
let mut conn = Conn::connect(&mut c).expect("Couldn't open DB.");
{
@ -154,4 +158,6 @@ fn test_reader() {
assert_eq!(TypedValue::Long(123), part.v);
assert_eq!(true, part.added);
}
}
}

View file

@ -4,9 +4,10 @@ version = "0.0.1"
# Forward mentat's features.
[features]
default = ["bundled_sqlite3"]
default = ["bundled_sqlite3", "syncable"]
sqlcipher = ["mentat/sqlcipher"]
bundled_sqlite3 = ["mentat/bundled_sqlite3"]
syncable = ["mentat/syncable"]
[lib]
name = "mentat_cli"

View file

@ -35,19 +35,23 @@ use mentat_core::{
};
use mentat::{
Binding,
CacheDirection,
Keyword,
Queryable,
QueryExplanation,
QueryOutput,
QueryResults,
Queryable,
Store,
Binding,
Syncable,
TxReport,
TypedValue,
};
#[cfg(feature = "syncable")]
use mentat::{
Syncable,
};
use command_parser::{
Command,
};
@ -66,7 +70,6 @@ use command_parser::{
COMMAND_QUERY_EXPLAIN_SHORT,
COMMAND_QUERY_PREPARED_LONG,
COMMAND_SCHEMA,
COMMAND_SYNC,
COMMAND_TIMER_LONG,
COMMAND_TRANSACT_LONG,
COMMAND_TRANSACT_SHORT,
@ -82,6 +85,11 @@ use command_parser::{
COMMAND_OPEN_ENCRYPTED,
};
#[cfg(feature = "syncable")]
use command_parser::{
COMMAND_SYNC,
};
use input::InputReader;
use input::InputResult::{
Empty,
@ -124,7 +132,9 @@ lazy_static! {
(COMMAND_TIMER_LONG, "Enable or disable timing of query and transact operations."),
(COMMAND_CACHE, "Cache an attribute. Usage: `.cache :foo/bar reverse`"),
(COMMAND_SYNC, "Synchronize the database against a Sync Server URL for a provided user UUID."),
#[cfg(feature = "syncable")]
(COMMAND_SYNC, "Synchronize the database against a Mentat Sync Server URL for a provided user UUID."),
]
};
}
@ -359,12 +369,20 @@ impl Repl {
Err(e) => eprintln!("{}", e)
};
},
#[cfg(feature = "syncable")]
Command::Sync(args) => {
match self.store.sync(&args[0], &args[1]) {
Ok(_) => println!("Synced!"),
Err(e) => eprintln!("{:?}", e)
};
}
},
#[cfg(not(feature = "syncable"))]
Command::Sync(_) => {
eprintln!(".sync requires the syncable Mentat feature");
},
Command::Timer(on) => {
self.toggle_timer(on);
},