diff --git a/core/Cargo.toml b/core/Cargo.toml index 687547c9..3201f005 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -5,10 +5,10 @@ workspace = ".." [dependencies] chrono = { version = "0.4", features = ["serde"] } -enum-set = "0.0.8" -failure = "0.1.1" -indexmap = "1.3.1" -ordered-float = { version = "1.0.2", features = ["serde"] } +enum-set = "0.0" +failure = "0.1" +indexmap = "1.3" +ordered-float = { version = "1.0", features = ["serde"] } uuid = { version = "0.8", features = ["v4", "serde"] } [dependencies.core_traits] diff --git a/core/src/lib.rs b/core/src/lib.rs index c0dd972f..dc9674f0 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -32,9 +32,8 @@ pub use chrono::{ Timelike, // For truncation. }; -pub use edn::{Cloned, FromMicros, FromRc, Keyword, ToMicros, Utc, ValueRc}; - pub use edn::parse::parse_query; +pub use edn::{Cloned, FromMicros, FromRc, Keyword, ToMicros, Utc, ValueRc}; pub use cache::{CachedAttributes, UpdateableCache}; diff --git a/db-traits/Cargo.toml b/db-traits/Cargo.toml index ceac5536..3ec99303 100644 --- a/db-traits/Cargo.toml +++ b/db-traits/Cargo.toml @@ -11,8 +11,8 @@ path = "lib.rs" sqlcipher = ["rusqlite/sqlcipher"] [dependencies] -failure = "0.1.1" -failure_derive = "0.1.1" +failure = "0.1" +failure_derive = "0.1" [dependencies.edn] path = "../edn" diff --git a/edn/Cargo.toml b/edn/Cargo.toml index 1de50b76..c8cd57eb 100644 --- a/edn/Cargo.toml +++ b/edn/Cargo.toml @@ -7,18 +7,18 @@ workspace = ".." license = "Apache-2.0" repository = "https://github.com/mozilla/mentat" description = "EDN parser for Project Mentat" -build = "build.rs" readme = "./README.md" [dependencies] -chrono = "0.4.10" -itertools = "0.8.2" -num = "0.2.1" -ordered-float = "1.0.2" -pretty = "0.9.0" +chrono = "0.4" +itertools = "0.8" +num = "0.2" +ordered-float = "1.0" +pretty = "0.9" uuid = { version = "0.8", features = ["v4", "serde"] } serde = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true } +peg = "0.6" [dev-dependencies] serde_test = "1.0" @@ -26,6 +26,3 @@ serde_json = "1.0" [features] serde_support = ["serde", "serde_derive"] - -[build-dependencies] -peg = "0.5" diff --git a/edn/build.rs b/edn/build.rs deleted file mode 100644 index 54eff165..00000000 --- a/edn/build.rs +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -extern crate peg; - -fn main() { - peg::cargo_build("src/edn.rustpeg"); -} diff --git a/edn/src/edn.rustpeg b/edn/src/edn.rustpeg deleted file mode 100644 index 5f97ea6d..00000000 --- a/edn/src/edn.rustpeg +++ /dev/null @@ -1,491 +0,0 @@ -/* -*- comment-start: "//"; -*- */ -/* vim: set filetype=rust.rustpeg */ - -// Copyright 2016 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use std::collections::{BTreeSet, BTreeMap, LinkedList}; -use std::iter::FromIterator; -use std::f64::{NAN, INFINITY, NEG_INFINITY}; - -use chrono::{ - DateTime, - TimeZone, - Utc -}; -use num::BigInt; -use ordered_float::OrderedFloat; -use uuid::Uuid; - -use entities::*; -use query; -use query::FromValue; -use symbols::*; -use types::{SpannedValue, Span, ValueAndSpan}; - -// Goal: Be able to parse https://github.com/edn-format/edn -// Also extensible to help parse http://docs.datomic.com/query.html - -// Debugging hint: test using `cargo test --features peg/trace -- --nocapture` -// to trace where the parser is failing - -// TODO: Support tagged elements -// TODO: Support discard - -pub nil -> SpannedValue = "nil" { SpannedValue::Nil } -pub nan -> SpannedValue = "#f" whitespace+ "NaN" { SpannedValue::Float(OrderedFloat(NAN)) } - -pub infinity -> SpannedValue = "#f" whitespace+ s:$(sign) "Infinity" - { SpannedValue::Float(OrderedFloat(if s == "+" { INFINITY } else { NEG_INFINITY })) } - -pub boolean -> SpannedValue - = "true" { SpannedValue::Boolean(true) } - / "false" { SpannedValue::Boolean(false) } - -digit = [0-9] -alphanumeric = [0-9a-zA-Z] -octaldigit = [0-7] -validbase = [3][0-6] / [12][0-9] / [2-9] -hex = [0-9a-fA-F] -sign = [+-] - -pub raw_bigint -> BigInt = b:$( sign? digit+ ) "N" - { b.parse::().unwrap() } -pub raw_octalinteger -> i64 = "0" i:$( octaldigit+ ) - { i64::from_str_radix(i, 8).unwrap() } -pub raw_hexinteger -> i64 = "0x" i:$( hex+ ) - { i64::from_str_radix(i, 16).unwrap() } -pub raw_basedinteger -> i64 = b:$( validbase ) "r" i:$( alphanumeric+ ) - { i64::from_str_radix(i, b.parse::().unwrap()).unwrap() } -pub raw_integer -> i64 = i:$( sign? digit+ ) !("." / ([eE])) - { i.parse::().unwrap() } -pub raw_float -> OrderedFloat = f:$(sign? digit+ ("." digit+)? ([eE] sign? digit+)?) - { OrderedFloat(f.parse::().unwrap()) } - -pub bigint -> SpannedValue = v:raw_bigint { SpannedValue::BigInteger(v) } -pub octalinteger -> SpannedValue = v:raw_octalinteger { SpannedValue::Integer(v) } -pub hexinteger -> SpannedValue = v:raw_hexinteger { SpannedValue::Integer(v) } -pub basedinteger -> SpannedValue = v:raw_basedinteger { SpannedValue::Integer(v) } -pub integer -> SpannedValue = v:raw_integer { SpannedValue::Integer(v) } -pub float -> SpannedValue = v:raw_float { SpannedValue::Float(v) } - -number -> SpannedValue = ( bigint / basedinteger / hexinteger / octalinteger / integer / float ) - -// TODO: standalone characters: \, \newline, \return, \space and \tab. - -string_special_char -> &'input str = "\\" $([\\"ntr]) -string_normal_chars -> &'input str = $([^"\\]+) - -// This is what we need to do in order to unescape. We can't just match the entire string slice: -// we get a Vec<&str> from rust-peg, where some of the parts might be unescaped special characters, -// and we join it together to form an output string. -// E.g., input = r#"\"foo\\\\bar\""# -// output = [quote, "foo", backslash, "bar", quote] -// result = r#""foo\\bar""# -// For the typical case, string_normal_chars will match multiple, leading to a single-element vec. -pub raw_text -> String = "\"" t:((string_special_char / string_normal_chars)*) "\"" - { t.join(&"").to_string() } - -pub text -> SpannedValue - = v:raw_text { SpannedValue::Text(v) } - -// RFC 3339 timestamps. #inst "1985-04-12T23:20:50.52Z" -// We accept an arbitrary depth of decimals. -// Note that we discard the timezone information -- all times are translated to UTC. -inst_string -> DateTime = - "#inst" whitespace+ "\"" d:$( [0-9]*<4> "-" [0-2][0-9] "-" [0-3][0-9] - "T" - [0-2][0-9] ":" [0-5][0-9] ":" [0-6][0-9] - ("." [0-9]+)? - ("Z" / (("+" / "-") [0-2][0-9] ":" [0-5][0-9])) - ) - "\"" {? - DateTime::parse_from_rfc3339(d) - .map(|t| t.with_timezone(&Utc)) - .map_err(|_| "invalid datetime") // Oh, rustpeg. - } - -inst_micros -> DateTime = - "#instmicros" whitespace+ d:$( digit+ ) { - let micros = d.parse::().unwrap(); - let seconds: i64 = micros / 1000000; - let nanos: u32 = ((micros % 1000000).abs() as u32) * 1000; - Utc.timestamp(seconds, nanos) - } - -inst_millis -> DateTime = - "#instmillis" whitespace+ d:$( digit+ ) { - let millis = d.parse::().unwrap(); - let seconds: i64 = millis / 1000; - let nanos: u32 = ((millis % 1000).abs() as u32) * 1000000; - Utc.timestamp(seconds, nanos) - } - -inst -> SpannedValue = t:(inst_millis / inst_micros / inst_string) - { SpannedValue::Instant(t) } - -uuid_string -> Uuid = - "\"" u:$( [a-f0-9]*<8> "-" [a-f0-9]*<4> "-" [a-f0-9]*<4> "-" [a-f0-9]*<4> "-" [a-f0-9]*<12> ) "\"" { - Uuid::parse_str(u).expect("this is a valid UUID string") - } - -pub uuid -> SpannedValue = "#uuid" whitespace+ u:uuid_string - { SpannedValue::Uuid(u) } - -namespace_divider = "." -namespace_separator = "/" - -// TODO: Be more picky here -// Keywords follow the rules of symbols, except they can (and must) begin with : -// e.g. :fred or :my/fred. See https://github.com/edn-format/edn#keywords -symbol_char_initial = [a-zA-Z0-9*!_?$%&=<>] -symbol_char_subsequent = [+a-zA-Z0-9*!_?$%&=<>-] - -symbol_namespace = symbol_char_initial symbol_char_subsequent* (namespace_divider symbol_char_subsequent+)* -symbol_name = ( symbol_char_initial+ symbol_char_subsequent* ) -plain_symbol_name = symbol_name / "..." / "." - -keyword_prefix = ":" - -pub symbol -> SpannedValue = - ns:( sns:$(symbol_namespace) namespace_separator { sns })? - n:$(plain_symbol_name) - { SpannedValue::from_symbol(ns, n) } - / #expected("symbol") - -pub keyword -> SpannedValue = - keyword_prefix - ns:( sns:$(symbol_namespace) namespace_separator { sns })? - n:$(symbol_name) - { SpannedValue::from_keyword(ns, n) } - / #expected("keyword") - -pub list -> SpannedValue = "(" __ v:(value)* __ ")" - { SpannedValue::List(LinkedList::from_iter(v)) } - -pub vector -> SpannedValue = "[" __ v:(value)* __ "]" - { SpannedValue::Vector(v) } - -pub set -> SpannedValue = "#{" __ v:(value)* __ "}" - { SpannedValue::Set(BTreeSet::from_iter(v)) } - -pair -> (ValueAndSpan, ValueAndSpan) = - k:(value) v:(value) { - (k, v) - } - -pub map -> SpannedValue = "{" __ v:(pair)* __ "}" - { SpannedValue::Map(BTreeMap::from_iter(v)) } - -// It's important that float comes before integer or the parser assumes that -// floats are integers and fails to parse -pub value -> ValueAndSpan = - __ start:#position v:(nil / nan / infinity / boolean / number / inst / uuid / text / keyword / symbol / list / vector / map / set) end:#position __ { - ValueAndSpan { - inner: v, - span: Span::new(start, end) - } - } - / #expected("value") - -atom -> ValueAndSpan - = v:value {? if v.is_atom() { Ok(v) } else { Err("expected atom") } } - -// Clojure (and thus EDN) regards commas as whitespace, and thus the two-element vectors [1 2] and -// [1,,,,2] are equivalent, as are the maps {:a 1, :b 2} and {:a 1 :b 2}. -whitespace = #quiet<[ \r\n\t,]> -comment = #quiet<";" [^\r\n]* [\r\n]?> - -__ = (whitespace / comment)* - -// Transaction entity parser starts here. - -pub op -> OpType - = ":db/add" { OpType::Add } - / ":db/retract" { OpType::Retract } - -raw_keyword -> Keyword = - keyword_prefix - ns:( sns:$(symbol_namespace) namespace_separator { sns })? - n:$(symbol_name) { - match ns { - Some(ns) => Keyword::namespaced(ns, n), - None => Keyword::plain(n), - } - } - / #expected("keyword") - -raw_forward_keyword -> Keyword - = v:raw_keyword {? if v.is_forward() { Ok(v) } else { Err("expected :forward or :forward/keyword") } } - -raw_backward_keyword -> Keyword - = v:raw_keyword {? if v.is_backward() { Ok(v) } else { Err("expected :_backword or :backward/_keyword") } } - -raw_namespaced_keyword -> Keyword - = keyword_prefix ns:$(symbol_namespace) namespace_separator n:$(symbol_name) { Keyword::namespaced(ns, n) } - / #expected("namespaced keyword") - -raw_forward_namespaced_keyword -> Keyword - = v:raw_namespaced_keyword {? if v.is_forward() { Ok(v) } else { Err("expected namespaced :forward/keyword") } } - -raw_backward_namespaced_keyword -> Keyword - = v:raw_namespaced_keyword {? if v.is_backward() { Ok(v) } else { Err("expected namespaced :backward/_keyword") } } - -entid -> EntidOrIdent - = v:( raw_basedinteger / raw_hexinteger / raw_octalinteger / raw_integer ) { EntidOrIdent::Entid(v) } - / v:raw_namespaced_keyword { EntidOrIdent::Ident(v) } - / #expected("entid") - -forward_entid -> EntidOrIdent - = v:( raw_basedinteger / raw_hexinteger / raw_octalinteger / raw_integer ) { EntidOrIdent::Entid(v) } - / v:raw_forward_namespaced_keyword { EntidOrIdent::Ident(v) } - / #expected("forward entid") - -backward_entid -> EntidOrIdent - = v:raw_backward_namespaced_keyword { EntidOrIdent::Ident(v.to_reversed()) } - / #expected("backward entid") - -lookup_ref -> LookupRef - = "(" __ "lookup-ref" __ a:(entid) __ v:(value) __ ")" { LookupRef { a: AttributePlace::Entid(a), v } } - / #expected("lookup-ref") - -tx_function -> TxFunction - = "(" __ n:$(symbol_name) __ ")" { TxFunction { op: PlainSymbol::plain(n) } } - -entity_place -> EntityPlace - = v:raw_text { EntityPlace::TempId(TempId::External(v).into()) } - / v:entid { EntityPlace::Entid(v) } - / v:lookup_ref { EntityPlace::LookupRef(v) } - / v:tx_function { EntityPlace::TxFunction(v) } - -value_place_pair -> (EntidOrIdent, ValuePlace) - = k:(entid) __ v:(value_place) { (k, v) } - -map_notation -> MapNotation - = "{" __ kvs:(value_place_pair*) __ "}" { kvs.into_iter().collect() } - -value_place -> ValuePlace - = __ v:lookup_ref __ { ValuePlace::LookupRef(v) } - / __ v:tx_function __ { ValuePlace::TxFunction(v) } - / __ "[" __ vs:(value_place*) __ "]" __ { ValuePlace::Vector(vs) } - / __ v:map_notation __ { ValuePlace::MapNotation(v) } - / __ v:atom __ { ValuePlace::Atom(v) } - -pub entity -> Entity - = __ "[" __ op:(op) __ e:(entity_place) __ a:(forward_entid) __ v:(value_place) __ "]" __ { Entity::AddOrRetract { op, e: e, a: AttributePlace::Entid(a), v: v } } - / __ "[" __ op:(op) __ e:(value_place) __ a:(backward_entid) __ v:(entity_place) __ "]" __ { Entity::AddOrRetract { op, e: v, a: AttributePlace::Entid(a), v: e } } - / __ map:map_notation __ { Entity::MapNotation(map) } - / #expected("entity") - -pub entities -> Vec> - = __ "[" __ es:(entity*) __ "]" __ { es } - -// Query parser starts here. -// -// We expect every rule except the `raw_*` rules to eat whitespace -// (with `__`) at its start and finish. That means that every string -// pattern (say "[") should be bracketed on either side with either a -// whitespace-eating rule or an explicit whitespace eating `__`. - -query_function -> query::QueryFunction - = __ n:$(symbol_name) __ {? query::QueryFunction::from_symbol(&PlainSymbol::plain(n)).ok_or("expected query function") } - -fn_arg -> query::FnArg - = v:value {? query::FnArg::from_value(&v).ok_or("expected query function argument") } - / __ "[" args:fn_arg+ "]" __ { query::FnArg::Vector(args) } - -find_elem -> query::Element - = __ v:variable __ { query::Element::Variable(v) } - / __ "(" __ "the" v:variable ")" __ { query::Element::Corresponding(v) } - / __ "(" __ "pull" var:variable "[" patterns:pull_attribute+ "]" __ ")" __ { query::Element::Pull(query::Pull { var, patterns }) } - / __ "(" func:query_function args:fn_arg* ")" __ { query::Element::Aggregate(query::Aggregate { func, args }) } - -find_spec -> query::FindSpec - = f:find_elem "." __ { query::FindSpec::FindScalar(f) } - / fs:find_elem+ { query::FindSpec::FindRel(fs) } - / __ "[" f:find_elem __ "..." __ "]" __ { query::FindSpec::FindColl(f) } - / __ "[" fs:find_elem+ "]" __ { query::FindSpec::FindTuple(fs) } - -pull_attribute -> query::PullAttributeSpec - = __ "*" __ { query::PullAttributeSpec::Wildcard } - / __ k:raw_forward_namespaced_keyword __ alias:(":as" __ alias:raw_forward_keyword __ { alias })? { - let attribute = query::PullConcreteAttribute::Ident(::std::rc::Rc::new(k)); - let alias = alias.map(|alias| ::std::rc::Rc::new(alias)); - query::PullAttributeSpec::Attribute( - query::NamedPullAttribute { - attribute, - alias: alias, - }) - } - -limit -> query::Limit - = __ v:variable __ { query::Limit::Variable(v) } - / __ n:(raw_octalinteger / raw_hexinteger / raw_basedinteger / raw_integer) __ {? - if n > 0 { - Ok(query::Limit::Fixed(n as u64)) - } else { - Err("expected positive integer") - } - } - -order -> query::Order - = __ "(" __ "asc" v:variable ")" __ { query::Order(query::Direction::Ascending, v) } - / __ "(" __ "desc" v:variable ")" __ { query::Order(query::Direction::Descending, v) } - / v:variable { query::Order(query::Direction::Ascending, v) } - - -pattern_value_place -> query::PatternValuePlace - = v:value {? query::PatternValuePlace::from_value(&v).ok_or("expected pattern_value_place") } - -pattern_non_value_place -> query::PatternNonValuePlace - = v:value {? query::PatternNonValuePlace::from_value(&v).ok_or("expected pattern_non_value_place") } - -pattern -> query::WhereClause - = __ "[" - src:src_var? - e:pattern_non_value_place - a:pattern_non_value_place - v:pattern_value_place? - tx:pattern_non_value_place? - "]" __ - {? - let v = v.unwrap_or(query::PatternValuePlace::Placeholder); - let tx = tx.unwrap_or(query::PatternNonValuePlace::Placeholder); - - // Pattern::new takes care of reversal of reversed - // attributes: [?x :foo/_bar ?y] turns into - // [?y :foo/bar ?x]. - // - // This is a bit messy: the inner conversion to a Pattern can - // fail if the input is something like - // - // ```edn - // [?x :foo/_reversed 23.4] - // ``` - // - // because - // - // ```edn - // [23.4 :foo/reversed ?x] - // ``` - // - // is nonsense. That leaves us with a nested optional, which we unwrap here. - query::Pattern::new(src, e, a, v, tx) - .map(query::WhereClause::Pattern) - .ok_or("expected pattern") - } - -// TODO: this shouldn't be checked at parse time. -rule_vars -> BTreeSet - = vs:variable+ {? - let given = vs.len(); - let set: BTreeSet = vs.into_iter().collect(); - if given != set.len() { - Err("expected unique variables") - } else { - Ok(set) - } - } - -or_pattern_clause -> query::OrWhereClause - = clause:where_clause { query::OrWhereClause::Clause(clause) } - -or_and_clause -> query::OrWhereClause - = __ "(" __ "and" clauses:where_clause+ ")" __ { query::OrWhereClause::And(clauses) } - -or_where_clause -> query::OrWhereClause - = or_pattern_clause - / or_and_clause - -or_clause -> query::WhereClause - = __ "(" __ "or" clauses:or_where_clause+ ")" __ { - query::WhereClause::OrJoin(query::OrJoin::new(query::UnifyVars::Implicit, clauses)) - } - -or_join_clause -> query::WhereClause - = __ "(" __ "or-join" __ "[" vars:rule_vars "]" clauses:or_where_clause+ ")" __ { - query::WhereClause::OrJoin(query::OrJoin::new(query::UnifyVars::Explicit(vars), clauses)) - } - -not_clause -> query::WhereClause - = __ "(" __ "not" clauses:where_clause+ ")" __ { - query::WhereClause::NotJoin(query::NotJoin::new(query::UnifyVars::Implicit, clauses)) - } - -not_join_clause -> query::WhereClause - = __ "(" __ "not-join" __ "[" vars:rule_vars "]" clauses:where_clause+ ")" __ { - query::WhereClause::NotJoin(query::NotJoin::new(query::UnifyVars::Explicit(vars), clauses)) - } - -type_annotation -> query::WhereClause - = __ "[" __ "(" __ "type" var:variable __ ty:raw_keyword __ ")" __ "]" __ { - query::WhereClause::TypeAnnotation( - query::TypeAnnotation { - value_type: ty, - variable: var, - }) - } - -pred -> query::WhereClause - = __ "[" __ "(" func:query_function args:fn_arg* ")" __ "]" __ { - query::WhereClause::Pred( - query::Predicate { - operator: func.0, - args: args, - }) - } - -pub where_fn -> query::WhereClause - = __ "[" __ "(" func:query_function args:fn_arg* ")" __ binding:binding "]" __ { - query::WhereClause::WhereFn( - query::WhereFn { - operator: func.0, - args: args, - binding, - }) - } - -where_clause -> query::WhereClause - // Right now we only support patterns and predicates. See #239 for more. - = pattern - / or_join_clause - / or_clause - / not_join_clause - / not_clause - / type_annotation - / pred - / where_fn - -query_part -> query::QueryPart - = __ ":find" fs:find_spec { query::QueryPart::FindSpec(fs) } - / __ ":in" in_vars:variable+ { query::QueryPart::InVars(in_vars) } - / __ ":limit" l:limit { query::QueryPart::Limit(l) } - / __ ":order" os:order+ { query::QueryPart::Order(os) } - / __ ":where" ws:where_clause+ { query::QueryPart::WhereClauses(ws) } - / __ ":with" with_vars:variable+ { query::QueryPart::WithVars(with_vars) } - -pub parse_query -> query::ParsedQuery - = __ "[" qps:query_part+ "]" __ {? query::ParsedQuery::from_parts(qps) } - -variable -> query::Variable - = v:value {? query::Variable::from_value(&v).ok_or("expected variable") } - -src_var -> query::SrcVar - = v:value {? query::SrcVar::from_value(&v).ok_or("expected src_var") } - -variable_or_placeholder -> query::VariableOrPlaceholder - = v:variable { query::VariableOrPlaceholder::Variable(v) } - / __ "_" __ { query::VariableOrPlaceholder::Placeholder } - -binding -> query::Binding - = __ "[" __ "[" vs:variable_or_placeholder+ "]" __ "]" __ { query::Binding::BindRel(vs) } - / __ "[" v:variable "..." __ "]" __ { query::Binding::BindColl(v) } - / __ "[" vs:variable_or_placeholder+ "]" __ { query::Binding::BindTuple(vs) } - / v:variable { query::Binding::BindScalar(v) } diff --git a/edn/src/lib.rs b/edn/src/lib.rs index b6361b38..e8baf444 100644 --- a/edn/src/lib.rs +++ b/edn/src/lib.rs @@ -8,13 +8,12 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -#![allow(ellipsis_inclusive_range_patterns)] - extern crate chrono; extern crate itertools; extern crate num; extern crate ordered_float; extern crate pretty; +extern crate peg; extern crate uuid; #[cfg(feature = "serde_support")] @@ -38,10 +37,6 @@ pub mod utils; pub mod value_rc; pub use value_rc::{Cloned, FromRc, ValueRc}; -pub mod parse { - include!(concat!(env!("OUT_DIR"), "/edn.rs")); -} - // Re-export the types we use. pub use chrono::{DateTime, Utc}; pub use num::BigInt; @@ -49,9 +44,485 @@ pub use ordered_float::OrderedFloat; pub use uuid::Uuid; // Export from our modules. -pub use parse::ParseError; pub use types::{ FromMicros, FromMillis, Span, SpannedValue, ToMicros, ToMillis, Value, ValueAndSpan, }; pub use symbols::{Keyword, NamespacedSymbol, PlainSymbol}; + +use std::collections::{BTreeSet, BTreeMap, LinkedList}; +use std::iter::FromIterator; +use std::f64::{NAN, INFINITY, NEG_INFINITY}; + +use chrono::{ + TimeZone, +}; + +use entities::*; +use query::FromValue; + +// Goal: Be able to parse https://github.com/edn-format/edn +// Also extensible to help parse http://docs.datomic.com/query.html + +// Debugging hint: test using `cargo test --features peg/trace -- --nocapture` +// to trace where the parser is failing + +// TODO: Support tagged elements +// TODO: Support discard + +pub type ParseError = peg::error::ParseError; + +peg::parser!(pub grammar parse() for str { + + pub rule nil() -> SpannedValue = "nil" { SpannedValue::Nil } + pub rule nan() -> SpannedValue = "#f" whitespace()+ "NaN" { SpannedValue::Float(OrderedFloat(NAN)) } + + pub rule infinity() -> SpannedValue = "#f" whitespace()+ s:$(sign()) "Infinity" + { SpannedValue::Float(OrderedFloat(if s == "+" { INFINITY } else { NEG_INFINITY })) } + + pub rule boolean() -> SpannedValue + = "true" { SpannedValue::Boolean(true) } + / "false" { SpannedValue::Boolean(false) } + + rule digit() = ['0'..='9'] + rule alphanumeric() = ['0'..='9' | 'a'..='z' | 'A'..='Z'] + rule octaldigit() = ['0'..='7'] + rule validbase() = ['3']['0'..='6'] / ['1' | '2']['0'..='9'] / ['2'..='9'] + rule hex() = ['0'..='9' | 'a'..='f' | 'A'..='F'] + rule sign() = ['+' | '-'] + + pub rule raw_bigint() -> BigInt = b:$( sign()? digit()+ ) "N" + { b.parse::().unwrap() } + pub rule raw_octalinteger() -> i64 = "0" i:$( octaldigit()+ ) + { i64::from_str_radix(i, 8).unwrap() } + pub rule raw_hexinteger() -> i64 = "0x" i:$( hex()+ ) + { i64::from_str_radix(i, 16).unwrap() } + pub rule raw_basedinteger() -> i64 = b:$( validbase() ) "r" i:$( alphanumeric()+ ) + { i64::from_str_radix(i, b.parse::().unwrap()).unwrap() } + pub rule raw_integer() -> i64 = i:$( sign()? digit()+ ) !("." / (['e' | 'E'])) + { i.parse::().unwrap() } + pub rule raw_float() -> OrderedFloat = f:$(sign()? digit()+ ("." digit()+)? (['e' | 'E'] sign()? digit()+)?) + { OrderedFloat(f.parse::().unwrap()) } + + pub rule bigint() -> SpannedValue = v:raw_bigint() { SpannedValue::BigInteger(v) } + pub rule octalinteger() -> SpannedValue = v:raw_octalinteger() { SpannedValue::Integer(v) } + pub rule hexinteger() -> SpannedValue = v:raw_hexinteger() { SpannedValue::Integer(v) } + pub rule basedinteger() -> SpannedValue = v:raw_basedinteger() { SpannedValue::Integer(v) } + pub rule integer() -> SpannedValue = v:raw_integer() { SpannedValue::Integer(v) } + pub rule float() -> SpannedValue = v:raw_float() { SpannedValue::Float(v) } + + rule number() -> SpannedValue = ( bigint() / basedinteger() / hexinteger() / octalinteger() / integer() / float() ) + + // TODO: standalone characters: \, \newline, \return, \space and \tab. + // rule string_standalone_chars() -> + rule string_special_char() -> &'input str = "\\" c:$(['\\' | '"' | 'n' | 't' | 'r']) { c } + rule string_normal_chars() -> &'input str = c:$((!['\"' | '\\'][_])+) { c } + + // This is what we need to do in order to unescape. We can't just match the entire string slice: + // we get a Vec<&str> from rust-peg, where some parts might be unescaped special characters and + // we join it together to form an output string. + // E.g., input = r#"\"foo\\\\bar\""# + // output = [quote, "foo", backslash, "bar", quote] + // result = r#""foo\\bar""# + // For the typical case, string_normal_chars will match multiple, leading to a single-element vec. + pub rule raw_text() -> String = "\"" t:((string_special_char() / string_normal_chars())*) "\"" + { t.join(&"").to_string() } + + pub rule text() -> SpannedValue + = v:raw_text() { SpannedValue::Text(v) } + + // RFC 3339 timestamps. #inst "1985-04-12T23:20:50.52Z" + // We accept an arbitrary depth of decimals. + // TODO: Note that we discard the timezone information -- all times are translated to UTC. Should we? + rule inst_string() -> DateTime = + "#inst" whitespace()+ "\"" d:$( ['0'..='9']*<4> "-" ['0'..='2']['0'..='9'] "-" ['0'..='3']['0'..='9'] + "T" + ['0'..='2']['0'..='9'] ":" ['0'..='5']['0'..='9'] ":" ['0'..='6']['0'..='9'] + ("." ['0'..='9']+)? + ("Z" / (("+" / "-") ['0'..='2']['0'..='9'] ":" ['0'..='5']['0'..='9'])) + ) + "\"" {? + DateTime::parse_from_rfc3339(d) + .map(|t| t.with_timezone(&Utc)) + .map_err(|_| "invalid datetime") // TODO Oh, rustpeg. + } + + rule inst_micros() -> DateTime = + "#instmicros" whitespace()+ d:$( digit()+ ) { + let micros = d.parse::().unwrap(); + let seconds: i64 = micros / 1000000; + let nanos: u32 = ((micros % 1000000).abs() as u32) * 1000; + Utc.timestamp(seconds, nanos) + } + + rule inst_millis() -> DateTime = + "#instmillis" whitespace()+ d:$( digit()+ ) { + let millis = d.parse::().unwrap(); + let seconds: i64 = millis / 1000; + let nanos: u32 = ((millis % 1000).abs() as u32) * 1000000; + Utc.timestamp(seconds, nanos) + } + + rule inst() -> SpannedValue = t:(inst_millis() / inst_micros() / inst_string()) + { SpannedValue::Instant(t) } + + rule uuid_string() -> Uuid = + "\"" u:$( ['a'..='f' | '0'..='9']*<8> "-" ['a'..='f' | '0'..='9']*<4> "-" ['a'..='f' | '0'..='9']*<4> "-" ['a'..='f' | '0'..='9']*<4> "-" ['a'..='f' | '0'..='9']*<12> ) "\"" { + Uuid::parse_str(u).expect("this is a valid UUID string") + } + + pub rule uuid() -> SpannedValue = "#uuid" whitespace()+ u:uuid_string() + { SpannedValue::Uuid(u) } + + rule namespace_divider() = "." + rule namespace_separator() = "/" + + // TODO: Be more picky here. + // Keywords follow the rules of symbols, except they can (and must) begin with : + // e.g. :fred or :my/fred. See https://github.com/edn-format/edn#keywords + rule symbol_char_initial() = ['a'..='z' | 'A'..='Z' | '0'..='9' | '*' | '!' | '_' | '?' | '$' | '%' | '&' | '=' | '<' | '>'] + rule symbol_char_subsequent() = ['+' | 'a'..='z' | 'A'..='Z' | '0'..='9' | '*' | '!' | '_' | '?' | '$' | '%' | '&' | '=' | '<' | '>' | '-'] + + rule symbol_namespace() = symbol_char_initial() symbol_char_subsequent()* (namespace_divider() symbol_char_subsequent()+)* + rule symbol_name() = ( symbol_char_initial()+ symbol_char_subsequent()* ) + rule plain_symbol_name() = symbol_name() / "..." / "." + + rule keyword_prefix() = ":" + + pub rule symbol() -> SpannedValue = + ns:( sns:$(symbol_namespace()) namespace_separator() { sns })? + n:$(plain_symbol_name()) + { SpannedValue::from_symbol(ns, n) } + / expected!("symbol") + + pub rule keyword() -> SpannedValue = + keyword_prefix() + ns:( sns:$(symbol_namespace()) namespace_separator() { sns })? + n:$(symbol_name()) + { SpannedValue::from_keyword(ns, n) } + / expected!("keyword") + + pub rule list() -> SpannedValue = "(" __ v:(value())* __ ")" + { SpannedValue::List(LinkedList::from_iter(v)) } + + pub rule vector() -> SpannedValue = "[" __ v:(value())* __ "]" + { SpannedValue::Vector(v) } + + pub rule set() -> SpannedValue = "#{" __ v:(value())* __ "}" + { SpannedValue::Set(BTreeSet::from_iter(v)) } + + pub rule pair() -> (ValueAndSpan, ValueAndSpan) = + k:(value()) v:(value()) { + (k, v) + } + + pub rule map() -> SpannedValue = "{" __ v:(pair())* __ "}" + { SpannedValue::Map(BTreeMap::from_iter(v)) } + + // Note: It's important that float comes before integer or the parser assumes that floats are integers and fails to parse. + pub rule value() -> ValueAndSpan = + __ start:position!() v:(nil() / nan() / infinity() / boolean() / number() / inst() / uuid() / text() / keyword() / symbol() / list() / vector() / map() / set()) end:position!() __ { + ValueAndSpan { + inner: v, + span: Span::new(start, end) + } + } + / expected!("value") + + rule atom() -> ValueAndSpan + = v:value() {? if v.is_atom() { Ok(v) } else { Err("expected atom") } } + + // Clojure (and thus EDN) regards commas as whitespace, and thus the two-element vectors [1 2] and + // [1,,,,2] are equivalent, as are the maps {:a 1, :b 2} and {:a 1 :b 2}. + rule whitespace() = quiet!{[' ' | '\r' | '\n' | '\t' | ',']} + rule comment() = quiet!{";" (!['\r' | '\n'][_])* ['\r' | '\n']?} + + rule __() = (whitespace() / comment())* + + // Transaction entity parser starts here. + + pub rule op() -> OpType + = ":db/add" { OpType::Add } + / ":db/retract" { OpType::Retract } + + rule raw_keyword() -> Keyword = + keyword_prefix() + ns:( sns:$(symbol_namespace()) namespace_separator() { sns })? + n:$(symbol_name()) { + match ns { + Some(ns) => Keyword::namespaced(ns, n), + None => Keyword::plain(n), + } + } + / expected!("keyword") + + rule raw_forward_keyword() -> Keyword + = v:raw_keyword() {? if v.is_forward() { Ok(v) } else { Err("expected :forward or :forward/keyword") } } + + rule raw_backward_keyword() -> Keyword + = v:raw_keyword() {? if v.is_backward() { Ok(v) } else { Err("expected :_backward or :backward/_keyword") } } + + rule raw_namespaced_keyword() -> Keyword + = keyword_prefix() ns:$(symbol_namespace()) namespace_separator() n:$(symbol_name()) { Keyword::namespaced(ns, n) } + / expected!("namespaced keyword") + + rule raw_forward_namespaced_keyword() -> Keyword + = v:raw_namespaced_keyword() {? if v.is_forward() { Ok(v) } else { Err("expected namespaced :forward/keyword") } } + + rule raw_backward_namespaced_keyword() -> Keyword + = v:raw_namespaced_keyword() {? if v.is_backward() { Ok(v) } else { Err("expected namespaced :backward/_keyword") } } + + rule entid() -> EntidOrIdent + = v:( raw_basedinteger() / raw_hexinteger() / raw_octalinteger() / raw_integer() ) { EntidOrIdent::Entid(v) } + / v:raw_namespaced_keyword() { EntidOrIdent::Ident(v) } + / expected!("entid") + + rule forward_entid() -> EntidOrIdent + = v:( raw_basedinteger() / raw_hexinteger() / raw_octalinteger() / raw_integer() ) { EntidOrIdent::Entid(v) } + / v:raw_forward_namespaced_keyword() { EntidOrIdent::Ident(v) } + / expected!("forward entid") + + rule backward_entid() -> EntidOrIdent + = v:raw_backward_namespaced_keyword() { EntidOrIdent::Ident(v.to_reversed()) } + / expected!("backward entid") + + rule lookup_ref() -> LookupRef + = "(" __ "lookup-ref" __ a:(entid()) __ v:(value()) __ ")" { LookupRef { a: AttributePlace::Entid(a), v } } + / expected!("lookup-ref") + + rule tx_function() -> TxFunction + = "(" __ n:$(symbol_name()) __ ")" { TxFunction { op: PlainSymbol::plain(n) } } + + rule entity_place() -> EntityPlace + = v:raw_text() { EntityPlace::TempId(TempId::External(v).into()) } + / v:entid() { EntityPlace::Entid(v) } + / v:lookup_ref() { EntityPlace::LookupRef(v) } + / v:tx_function() { EntityPlace::TxFunction(v) } + + rule value_place_pair() -> (EntidOrIdent, ValuePlace) + = k:(entid()) __ v:(value_place()) { (k, v) } + + rule map_notation() -> MapNotation + = "{" __ kvs:(value_place_pair()*) __ "}" { kvs.into_iter().collect() } + + rule value_place() -> ValuePlace + = __ v:lookup_ref() __ { ValuePlace::LookupRef(v) } + / __ v:tx_function() __ { ValuePlace::TxFunction(v) } + / __ "[" __ vs:(value_place()*) __ "]" __ { ValuePlace::Vector(vs) } + / __ v:map_notation() __ { ValuePlace::MapNotation(v) } + / __ v:atom() __ { ValuePlace::Atom(v) } + + pub rule entity() -> Entity + = __ "[" __ op:(op()) __ e:(entity_place()) __ a:(forward_entid()) __ v:(value_place()) __ "]" __ { Entity::AddOrRetract { op, e: e, a: AttributePlace::Entid(a), v: v } } + / __ "[" __ op:(op()) __ e:(value_place()) __ a:(backward_entid()) __ v:(entity_place()) __ "]" __ { Entity::AddOrRetract { op, e: v, a: AttributePlace::Entid(a), v: e } } + / __ map:map_notation() __ { Entity::MapNotation(map) } + / expected!("entity") + + pub rule entities() -> Vec> + = __ "[" __ es:(entity()*) __ "]" __ { es } + + // Query parser starts here. + // + // We expect every rule except the `raw_*` rules to eat whitespace + // (with `__`) at its start and finish. That means that every string + // pattern (say "[") should be bracketed on either side with either a + // whitespace-eating rule or an explicit whitespace eating `__`. + + rule query_function() -> query::QueryFunction + = __ n:$(symbol_name()) __ {? query::QueryFunction::from_symbol(&PlainSymbol::plain(n)).ok_or("expected query function") } + + rule fn_arg() -> query::FnArg + = v:value() {? query::FnArg::from_value(&v).ok_or("expected query function argument") } + / __ "[" args:fn_arg()+ "]" __ { query::FnArg::Vector(args) } + + rule find_elem() -> query::Element + = __ v:variable() __ { query::Element::Variable(v) } + / __ "(" __ "the" v:variable() ")" __ { query::Element::Corresponding(v) } + / __ "(" __ "pull" var:variable() "[" patterns:pull_attribute()+ "]" __ ")" __ { query::Element::Pull(query::Pull { var, patterns }) } + / __ "(" func:query_function() args:fn_arg()* ")" __ { query::Element::Aggregate(query::Aggregate { func, args }) } + + rule find_spec() -> query::FindSpec + = f:find_elem() "." __ { query::FindSpec::FindScalar(f) } + / fs:find_elem()+ { query::FindSpec::FindRel(fs) } + / __ "[" f:find_elem() __ "..." __ "]" __ { query::FindSpec::FindColl(f) } + / __ "[" fs:find_elem()+ "]" __ { query::FindSpec::FindTuple(fs) } + + rule pull_attribute() -> query::PullAttributeSpec + = __ "*" __ { query::PullAttributeSpec::Wildcard } + / __ k:raw_forward_namespaced_keyword() __ alias:(":as" __ alias:raw_forward_keyword() __ { alias })? { + let attribute = query::PullConcreteAttribute::Ident(::std::rc::Rc::new(k)); + let alias = alias.map(|alias| ::std::rc::Rc::new(alias)); + query::PullAttributeSpec::Attribute( + query::NamedPullAttribute { + attribute, + alias: alias, + }) + } + + rule limit() -> query::Limit + = __ v:variable() __ { query::Limit::Variable(v) } + / __ n:(raw_octalinteger() / raw_hexinteger() / raw_basedinteger() / raw_integer()) __ {? + if n > 0 { + Ok(query::Limit::Fixed(n as u64)) + } else { + Err("expected positive integer") + } + } + + rule order() -> query::Order + = __ "(" __ "asc" v:variable() ")" __ { query::Order(query::Direction::Ascending, v) } + / __ "(" __ "desc" v:variable() ")" __ { query::Order(query::Direction::Descending, v) } + / v:variable() { query::Order(query::Direction::Ascending, v) } + + + rule pattern_value_place() -> query::PatternValuePlace + = v:value() {? query::PatternValuePlace::from_value(&v).ok_or("expected pattern_value_place") } + + rule pattern_non_value_place() -> query::PatternNonValuePlace + = v:value() {? query::PatternNonValuePlace::from_value(&v).ok_or("expected pattern_non_value_place") } + + rule pattern() -> query::WhereClause + = __ "[" + src:src_var()? + e:pattern_non_value_place() + a:pattern_non_value_place() + v:pattern_value_place()? + tx:pattern_non_value_place()? + "]" __ + {? + let v = v.unwrap_or(query::PatternValuePlace::Placeholder); + let tx = tx.unwrap_or(query::PatternNonValuePlace::Placeholder); + + // Pattern::new takes care of reversal of reversed + // attributes: [?x :foo/_bar ?y] turns into + // [?y :foo/bar ?x]. + // + // This is a bit messy: the inner conversion to a Pattern can + // fail if the input is something like + // + // ```edn + // [?x :foo/_reversed 23.4] + // ``` + // + // because + // + // ```edn + // [23.4 :foo/reversed ?x] + // ``` + // + // is nonsense. That leaves us with a nested optional, which we unwrap here. + query::Pattern::new(src, e, a, v, tx) + .map(query::WhereClause::Pattern) + .ok_or("expected pattern") + } + + // TODO: This shouldn't be checked at parse time. + rule rule_vars() -> BTreeSet + = vs:variable()+ {? + let given = vs.len(); + let set: BTreeSet = vs.into_iter().collect(); + if given != set.len() { + Err("expected unique variables") + } else { + Ok(set) + } + } + + rule or_pattern_clause() -> query::OrWhereClause + = clause:where_clause() { query::OrWhereClause::Clause(clause) } + + rule or_and_clause() -> query::OrWhereClause + = __ "(" __ "and" clauses:where_clause()+ ")" __ { query::OrWhereClause::And(clauses) } + + rule or_where_clause() -> query::OrWhereClause + = or_pattern_clause() + / or_and_clause() + + rule or_clause() -> query::WhereClause + = __ "(" __ "or" clauses:or_where_clause()+ ")" __ { + query::WhereClause::OrJoin(query::OrJoin::new(query::UnifyVars::Implicit, clauses)) + } + + rule or_join_clause() -> query::WhereClause + = __ "(" __ "or-join" __ "[" vars:rule_vars() "]" clauses:or_where_clause()+ ")" __ { + query::WhereClause::OrJoin(query::OrJoin::new(query::UnifyVars::Explicit(vars), clauses)) + } + + rule not_clause() -> query::WhereClause + = __ "(" __ "not" clauses:where_clause()+ ")" __ { + query::WhereClause::NotJoin(query::NotJoin::new(query::UnifyVars::Implicit, clauses)) + } + + rule not_join_clause() -> query::WhereClause + = __ "(" __ "not-join" __ "[" vars:rule_vars() "]" clauses:where_clause()+ ")" __ { + query::WhereClause::NotJoin(query::NotJoin::new(query::UnifyVars::Explicit(vars), clauses)) + } + + rule type_annotation() -> query::WhereClause + = __ "[" __ "(" __ "type" var:variable() __ ty:raw_keyword() __ ")" __ "]" __ { + query::WhereClause::TypeAnnotation( + query::TypeAnnotation { + value_type: ty, + variable: var, + }) + } + + rule pred() -> query::WhereClause + = __ "[" __ "(" func:query_function() args:fn_arg()* ")" __ "]" __ { + query::WhereClause::Pred( + query::Predicate { + operator: func.0, + args: args, + }) + } + + pub rule where_fn() -> query::WhereClause + = __ "[" __ "(" func:query_function() args:fn_arg()* ")" __ binding:binding() "]" __ { + query::WhereClause::WhereFn( + query::WhereFn { + operator: func.0, + args: args, + binding, + }) + } + + rule where_clause() -> query::WhereClause + // Right now we only support patterns and predicates. See #239 for more. + = pattern() + / or_join_clause() + / or_clause() + / not_join_clause() + / not_clause() + / type_annotation() + / pred() + / where_fn() + + rule query_part() -> query::QueryPart + = __ ":find" fs:find_spec() { query::QueryPart::FindSpec(fs) } + / __ ":in" in_vars:variable()+ { query::QueryPart::InVars(in_vars) } + / __ ":limit" l:limit() { query::QueryPart::Limit(l) } + / __ ":order" os:order()+ { query::QueryPart::Order(os) } + / __ ":where" ws:where_clause()+ { query::QueryPart::WhereClauses(ws) } + / __ ":with" with_vars:variable()+ { query::QueryPart::WithVars(with_vars) } + + pub rule parse_query() -> query::ParsedQuery + = __ "[" qps:query_part()+ "]" __ {? query::ParsedQuery::from_parts(qps) } + + rule variable() -> query::Variable + = v:value() {? query::Variable::from_value(&v).ok_or("expected variable") } + + rule src_var() -> query::SrcVar + = v:value() {? query::SrcVar::from_value(&v).ok_or("expected src_var") } + + rule variable_or_placeholder() -> query::VariableOrPlaceholder + = v:variable() { query::VariableOrPlaceholder::Variable(v) } + / __ "_" __ { query::VariableOrPlaceholder::Placeholder } + + rule binding() -> query::Binding + = __ "[" __ "[" vs:variable_or_placeholder()+ "]" __ "]" __ { query::Binding::BindRel(vs) } + / __ "[" v:variable() "..." __ "]" __ { query::Binding::BindColl(v) } + / __ "[" vs:variable_or_placeholder()+ "]" __ { query::Binding::BindTuple(vs) } + / v:variable() { query::Binding::BindScalar(v) } + +}); + diff --git a/edn/src/pretty_print.rs b/edn/src/pretty_print.rs index 840240ef..87885c6a 100644 --- a/edn/src/pretty_print.rs +++ b/edn/src/pretty_print.rs @@ -60,7 +60,7 @@ impl Value { let i = vs .into_iter() .map(|v| v.as_doc(allocator)) - .intersperse(allocator.space()); + .intersperse(allocator.line()); allocator .text(open) .append(allocator.concat(i).nest(n)) @@ -84,8 +84,8 @@ impl Value { let xs = vs .iter() .rev() - .map(|(k, v)| k.as_doc(pp).append(pp.space()).append(v.as_doc(pp)).group()) - .intersperse(pp.space()); + .map(|(k, v)| k.as_doc(pp).append(pp.line()).append(v.as_doc(pp)).group()) + .intersperse(pp.line()); pp.text("{") .append(pp.concat(xs).nest(1)) .append(pp.text("}")) diff --git a/edn/tests/tests.rs b/edn/tests/tests.rs index f864c87c..6f291456 100644 --- a/edn/tests/tests.rs +++ b/edn/tests/tests.rs @@ -23,10 +23,7 @@ use num::traits::{One, Zero}; use ordered_float::OrderedFloat; use chrono::{TimeZone, Utc}; -use edn::parse::{self, ParseError}; -use edn::symbols; -use edn::types::{Span, SpannedValue, Value, ValueAndSpan}; -use edn::utils; +use edn::{parse, symbols, types::{Span, SpannedValue, Value, ValueAndSpan}, utils, ParseError}; // Helper for making wrapped keywords with a namespace. fn k_ns(ns: &str, name: &str) -> Value { diff --git a/public-traits/Cargo.toml b/public-traits/Cargo.toml index cb559a7f..d0a730d4 100644 --- a/public-traits/Cargo.toml +++ b/public-traits/Cargo.toml @@ -13,10 +13,10 @@ sqlcipher = ["rusqlite/sqlcipher"] syncable = ["tolstoy_traits", "hyper", "serde_json"] [dependencies] -failure = "0.1.6" -failure_derive = "0.1.6" -http = "0.2.0" -tokio-core = "0.1.17" +failure = "0.1" +failure_derive = "0.1" +http = "0.2" +tokio-core = "0.1" uuid = "0.8" [dependencies.rusqlite] @@ -49,7 +49,7 @@ path = "../tolstoy-traits" optional = true [dependencies.hyper] -version = "0.13.1" +version = "0.13" optional = true [dependencies.serde_json] diff --git a/query-algebrizer-traits/Cargo.toml b/query-algebrizer-traits/Cargo.toml index 008ee267..df7407a5 100644 --- a/query-algebrizer-traits/Cargo.toml +++ b/query-algebrizer-traits/Cargo.toml @@ -8,8 +8,8 @@ name = "query_algebrizer_traits" path = "lib.rs" [dependencies] -failure = "0.1.1" -failure_derive = "0.1.1" +failure = "0.1" +failure_derive = "0.1" [dependencies.edn] path = "../edn" diff --git a/query-algebrizer-traits/errors.rs b/query-algebrizer-traits/errors.rs index 91c38327..95e44780 100644 --- a/query-algebrizer-traits/errors.rs +++ b/query-algebrizer-traits/errors.rs @@ -12,9 +12,7 @@ use std; // To refer to std::result::Result. use core_traits::{ValueType, ValueTypeSet}; -use edn::parse::ParseError; - -use edn::query::PlainSymbol; +use edn::{ ParseError, query::PlainSymbol }; pub type Result = std::result::Result; diff --git a/query-projector-traits/Cargo.toml b/query-projector-traits/Cargo.toml index 6942ae40..196f0209 100644 --- a/query-projector-traits/Cargo.toml +++ b/query-projector-traits/Cargo.toml @@ -11,8 +11,8 @@ path = "lib.rs" sqlcipher = ["rusqlite/sqlcipher"] [dependencies] -failure = "0.1.1" -failure_derive = "0.1.1" +failure = "0.1" +failure_derive = "0.1" [dependencies.rusqlite] version = "0.21" diff --git a/query-projector/Cargo.toml b/query-projector/Cargo.toml index 2d65bcc9..ec538533 100644 --- a/query-projector/Cargo.toml +++ b/query-projector/Cargo.toml @@ -7,8 +7,8 @@ workspace = ".." sqlcipher = ["rusqlite/sqlcipher"] [dependencies] -failure = "0.1.1" -indexmap = "1.3.1" +failure = "0.1" +indexmap = "1.3" [dependencies.rusqlite] version = "0.21" diff --git a/query-pull-traits/Cargo.toml b/query-pull-traits/Cargo.toml index 9bb793ee..e4ac6f26 100644 --- a/query-pull-traits/Cargo.toml +++ b/query-pull-traits/Cargo.toml @@ -8,8 +8,8 @@ name = "query_pull_traits" path = "lib.rs" [dependencies] -failure = "0.1.1" -failure_derive = "0.1.1" +failure = "0.1" +failure_derive = "0.1" [dependencies.core_traits] path = "../core-traits" diff --git a/query-sql/Cargo.toml b/query-sql/Cargo.toml index d56c9ffb..fdce3cff 100644 --- a/query-sql/Cargo.toml +++ b/query-sql/Cargo.toml @@ -4,7 +4,7 @@ version = "0.0.1" workspace = ".." [dependencies] -rusqlite = "0.21.0" +rusqlite = "0.21" [dependencies.edn] path = "../edn" diff --git a/sql/Cargo.toml b/sql/Cargo.toml index 5d6d1982..7e09b0fe 100644 --- a/sql/Cargo.toml +++ b/sql/Cargo.toml @@ -7,8 +7,8 @@ workspace = ".." sqlcipher = ["rusqlite/sqlcipher"] [dependencies] -failure = "0.1.1" -ordered-float = "1.0.2" +failure = "0.1" +ordered-float = "1.0" [dependencies.rusqlite] version = "0.21" diff --git a/src/store.rs b/src/store.rs index 27476308..e5f81caf 100644 --- a/src/store.rs +++ b/src/store.rs @@ -27,7 +27,7 @@ use mentat_transaction::{ CacheAction, CacheDirection, InProgress, InProgressRead, Pullable, Queryable, }; -use super::conn::Conn; +use crate::conn::Conn; use public_traits::errors::Result; @@ -37,7 +37,7 @@ use mentat_transaction::query::{PreparedResult, QueryExplanation, QueryInputs, Q use mentat_tolstoy::{SyncFollowup, SyncReport, SyncResult}; #[cfg(feature = "syncable")] -use super::sync::Syncable; +use crate::sync::Syncable; /// A convenience wrapper around a single SQLite connection and a Conn. This is suitable /// for applications that don't require complex connection management. @@ -97,7 +97,7 @@ impl Store { /// supplied. Fails unless linked against sqlcipher (or something else that /// supports the Sqlite Encryption Extension). pub fn open_with_key(path: &str, encryption_key: &str) -> Result { - let mut connection = ::new_connection_with_key(path, encryption_key)?; + let mut connection = crate::new_connection_with_key(path, encryption_key)?; let conn = Conn::connect(&mut connection)?; Ok(Store { conn: conn, @@ -109,7 +109,7 @@ impl Store { /// rekey`). Fails unless linked against sqlcipher (or something else that supports the Sqlite /// Encryption Extension). pub fn change_encryption_key(&mut self, new_encryption_key: &str) -> Result<()> { - ::change_encryption_key(&self.sqlite, new_encryption_key)?; + crate::change_encryption_key(&self.sqlite, new_encryption_key)?; Ok(()) } } diff --git a/tolstoy-traits/Cargo.toml b/tolstoy-traits/Cargo.toml index 8ae541b6..b3649651 100644 --- a/tolstoy-traits/Cargo.toml +++ b/tolstoy-traits/Cargo.toml @@ -11,9 +11,9 @@ path = "lib.rs" sqlcipher = ["rusqlite/sqlcipher"] [dependencies] -failure = "0.1.1" -failure_derive = "0.1.1" -http = "0.2.0" +failure = "0.1" +failure_derive = "0.1" +http = "0.2" hyper = "0.13" serde_json = "1.0" uuid = { version = "0.8" } diff --git a/tolstoy/Cargo.toml b/tolstoy/Cargo.toml index 8e641fb7..3ca55f53 100644 --- a/tolstoy/Cargo.toml +++ b/tolstoy/Cargo.toml @@ -9,20 +9,20 @@ authors = ["Grisha Kruglov "] sqlcipher = ["rusqlite/sqlcipher"] [dependencies] -failure = "0.1.6" +failure = "0.1" futures = "0.3" hyper = "0.13" -hyper-tls = "0.4.1" +hyper-tls = "0.4" http = "0.2" log = "0.4" -mime = "0.3.16" +mime = "0.3" #tokio = { version = "0.2", features = ["full"] } tokio-core = "0.1" serde = "1.0" serde_json = "1.0" -serde_cbor = "0.11.1" +serde_cbor = "0.11" serde_derive = "1.0" -lazy_static = "1.4.0" +lazy_static = "1.4" uuid = { version = "0.8", features = ["v4", "serde"] } [dependencies.edn] diff --git a/tools/cli/Cargo.toml b/tools/cli/Cargo.toml index ef13ceb4..dcef0f8d 100644 --- a/tools/cli/Cargo.toml +++ b/tools/cli/Cargo.toml @@ -19,19 +19,19 @@ doc = false test = false [dependencies] -combine = "4.0.0-beta.2" -dirs = "2.0.2" -env_logger = "0.7.1" -failure = "0.1.6" -failure_derive = "0.1.6" +combine = "4.0" +dirs = "2.0" +env_logger = "0.7" +failure = "0.1" +failure_derive = "0.1" getopts = "0.2" -lazy_static = "1.4.0" -linefeed = "0.6.0" +lazy_static = "1.4" +linefeed = "0.6" log = "0.4" tabwriter = "1" -tempfile = "3.1.0" -termion = "1" -time = "0.2.2" +tempfile = "3.1" +termion = "1.5" +time = "0.2" [dependencies.rusqlite] version = "0.21" diff --git a/transaction/Cargo.toml b/transaction/Cargo.toml index f8ee32ff..f9ce38a4 100644 --- a/transaction/Cargo.toml +++ b/transaction/Cargo.toml @@ -7,7 +7,7 @@ workspace = ".." sqlcipher = ["rusqlite/sqlcipher"] [dependencies] -failure = "0.1.1" +failure = "0.1" [dependencies.edn] path = "../edn"