diff --git a/Cargo.toml b/Cargo.toml index d117f4e5..faf9702d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ time = "0.1.35" [dependencies.rusqlite] version = "0.10.1" # System sqlite might be very old. -features = ["bundled"] +features = ["bundled", "limits"] [dependencies.edn] path = "edn" diff --git a/db/Cargo.toml b/db/Cargo.toml index 3140c856..a6b4e26d 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -13,7 +13,7 @@ time = "0.1.35" [dependencies.rusqlite] version = "0.10.1" # System sqlite might be very old. -features = ["bundled"] +features = ["bundled", "limits"] [dependencies.edn] path = "../edn" diff --git a/db/src/db.rs b/db/src/db.rs index 37d457ce..3792cd1c 100644 --- a/db/src/db.rs +++ b/db/src/db.rs @@ -21,6 +21,7 @@ use itertools; use itertools::Itertools; use rusqlite; use rusqlite::types::{ToSql, ToSqlOutput}; +use rusqlite::limits::Limit; use ::{repeat_values, to_namespaced_keyword}; use bootstrap; @@ -613,7 +614,8 @@ impl MentatStoring for rusqlite::Connection { // produce the map [a v] -> e. // // TODO: `collect` into a HashSet so that any (a, v) is resolved at most once. - let chunks: itertools::IntoChunks<_> = avs.into_iter().enumerate().chunks(::SQLITE_MAX_VARIABLE_NUMBER / 4); + let max_vars = self.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) as usize; + let chunks: itertools::IntoChunks<_> = avs.into_iter().enumerate().chunks(max_vars / 4); // We'd like to `flat_map` here, but it's not obvious how to `flat_map` across `Result`. // Alternatively, this is a `fold`, and it might be wise to express it as such. @@ -642,7 +644,9 @@ impl MentatStoring for rusqlite::Connection { // TODO: query against `datoms` and UNION ALL with `fulltext_datoms` rather than // querying against `all_datoms`. We know all the attributes, and in the common case, // where most unique attributes will not be fulltext-indexed, we'll be querying just - // `datoms`, which will be much faster. + // `datoms`, which will be much faster.ˇ + assert!(bindings_per_statement * count < max_vars, "Too many values: {} * {} >= {}", bindings_per_statement, count, max_vars); + let values: String = repeat_values(bindings_per_statement, count); let s: String = format!("WITH t(search_id, a, v, value_type_tag) AS (VALUES {}) SELECT t.search_id, d.e \ FROM t, all_datoms AS d \ @@ -731,7 +735,8 @@ impl MentatStoring for rusqlite::Connection { fn insert_non_fts_searches<'a>(&self, entities: &'a [ReducedEntity<'a>], search_type: SearchType) -> Result<()> { let bindings_per_statement = 6; - let chunks: itertools::IntoChunks<_> = entities.into_iter().chunks(::SQLITE_MAX_VARIABLE_NUMBER / bindings_per_statement); + let max_vars = self.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) as usize; + let chunks: itertools::IntoChunks<_> = entities.into_iter().chunks(max_vars / bindings_per_statement); // We'd like to flat_map here, but it's not obvious how to flat_map across Result. let results: Result> = chunks.into_iter().map(|chunk| -> Result<()> { @@ -767,7 +772,8 @@ impl MentatStoring for rusqlite::Connection { .chain(once(flags as &ToSql)))))) }).collect(); - // TODO: cache this for selected values of count. + // TODO: cache this for selected values of count. + assert!(bindings_per_statement * count < max_vars, "Too many values: {} * {} >= {}", bindings_per_statement, count, max_vars); let values: String = repeat_values(bindings_per_statement, count); let s: String = if search_type == SearchType::Exact { format!("INSERT INTO temp.exact_searches (e0, a0, v0, value_type_tag0, added0, flags0) VALUES {}", values) @@ -797,7 +803,8 @@ impl MentatStoring for rusqlite::Connection { // TODO: only update changed partitions. pub fn update_partition_map(conn: &rusqlite::Connection, partition_map: &PartitionMap) -> Result<()> { let values_per_statement = 2; - let max_partitions = ::SQLITE_MAX_VARIABLE_NUMBER / values_per_statement; + let max_vars = conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) as usize; + let max_partitions = max_vars / values_per_statement; if partition_map.len() > max_partitions { bail!(ErrorKind::NotYetImplemented(format!("No more than {} partitions are supported", max_partitions))); } @@ -1011,4 +1018,16 @@ mod tests { let transactions = value.as_vector().unwrap(); assert_transactions(&conn, &mut db.partition_map, &mut db.schema, transactions); } + + #[test] + fn test_sqlite_limit() { + let conn = new_connection("").expect("Couldn't open in-memory db"); + let initial = conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER); + // Sanity check. + assert!(initial > 500); + + // Make sure setting works. + conn.set_limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER, 222); + assert_eq!(222, conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER)); + } } diff --git a/db/src/lib.rs b/db/src/lib.rs index cde13ad9..bd7f26ea 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -54,9 +54,6 @@ pub use types::{ use edn::symbols; -// TODO: replace with sqlite3_limit. #288. -pub const SQLITE_MAX_VARIABLE_NUMBER: usize = 999; - pub fn to_namespaced_keyword(s: &str) -> Result { let splits = [':', '/']; let mut i = s.split(&splits[..]); @@ -84,7 +81,6 @@ pub fn to_namespaced_keyword(s: &str) -> Result { pub fn repeat_values(values_per_tuple: usize, tuples: usize) -> String { assert!(values_per_tuple >= 1); assert!(tuples >= 1); - assert!(values_per_tuple * tuples < SQLITE_MAX_VARIABLE_NUMBER, "Too many values: {} * {} >= {}", values_per_tuple, tuples, SQLITE_MAX_VARIABLE_NUMBER); // Like "(?, ?, ?)". let inner = format!("({})", repeat("?").take(values_per_tuple).join(", ")); // Like "(?, ?, ?), (?, ?, ?)". diff --git a/query-projector/Cargo.toml b/query-projector/Cargo.toml index 25d84269..9ef12ac0 100644 --- a/query-projector/Cargo.toml +++ b/query-projector/Cargo.toml @@ -9,7 +9,7 @@ error-chain = "0.9.0" [dependencies.rusqlite] version = "0.10.1" # System sqlite might be very old. -features = ["bundled"] +features = ["bundled", "limits"] [dependencies.mentat_core] path = "../core"