diff --git a/.clang-format b/.clang-format
index a309831..40a45e9 100644
--- a/.clang-format
+++ b/.clang-format
@@ -13,7 +13,7 @@ AlwaysBreakAfterReturnType: None
BreakBeforeBinaryOperators: All
BreakBeforeBraces: Attach
BreakBeforeConceptDeclarations: Always
-ColumnLimit: 80
+ColumnLimit: 120
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
FixNamespaceComments: true
diff --git a/.envrc b/.envrc
index 47c52a0..f59b4d9 100644
--- a/.envrc
+++ b/.envrc
@@ -3,7 +3,3 @@ if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
fi
watch_file devShell.nix shell.nix flake.nix
use flake || use nix
-
-CMAKE_GENERATOR=Ninja
-CMAKE_MAKE_PROGRAM=Ninja
-
diff --git a/.gdbinit b/.gdbinit
new file mode 100644
index 0000000..9151197
--- /dev/null
+++ b/.gdbinit
@@ -0,0 +1 @@
+handle SIG35 nostop noprint
diff --git a/.gitmodules b/.gitmodules
index d877619..7c5d2c7 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,4 @@
[submodule "seastar"]
path = seastar
url = https://github.com/scylladb/seastar.git
+ branch = 2b43417d210edbd7a3c3065bcfe3c0a9aea27f75
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3cf3f98..e57d258 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -40,7 +40,10 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake") # Make our cmak
####################
## Dependencies ##
####################
-
+find_package(Boost REQUIRED COMPONENTS system)
+if(Boost_FOUND)
+ include_directories(${Boost_INCLUDE_DIRS})
+endif()
###############
## Options ##
diff --git a/docs/design.md b/docs/design.md
index 56385f0..efa6454 100644
--- a/docs/design.md
+++ b/docs/design.md
@@ -5,20 +5,87 @@ Formerly named "HanoiDB" but the C++ version needed a new name, so ^H^H and
voila, "NoiDB".
### History
-See [HanoiDB](https://github.com/krestenkrab/hanoidb) and the [lasp-lang](https://github.com/lasp-lang/hanoidb) fork.
+See [HanoiDB](https://github.com/krestenkrab/hanoidb) and the
+[lasp-lang](https://github.com/lasp-lang/hanoidb) fork.
+
+### Network API
+
+HTTP REST CRUD API
+
+#### Create/Update
+
+| *Method* | *Path* | *Consumes* | HTTP Code |
+| -------- | --------------- | ------------------------ | --------- |
+| `PUT` | `/kv/:key?value`| `value` query parameter | 2xx, etc. |
+| `POST` | `/kv/:key` | `application/text` body | 2xx, etc |
+
+*Path Parameters*
+ * `key` (string: "") - Specifies the path of the key to read.
+
+*Query Parameters*
+ * `value` (string: "") - Specifies the value to store for the key.
+
+#### Read Key
+
+| *Method* | *Path* | *Produces* | HTTP Code |
+| -------- | --------------- | ------------------------ | --------- |
+| `GET` | `/kv/:key` | `application/text` value | 2xx, etc. |
+
+*Path Parameters*
+ * `key` (string: "") - Specifies the path of the key to read.
+
+#### Delete Key
+
+| *Method* | *Path* | *Produces* | HTTP Code |
+| -------- | --------------- | ------------------------ | --------- |
+| `DELETE` | `/kv/:key` | | 2xx, etc. |
+
+*Path Parameters*
+ * `key` (string: "") - Specifies the path of the key to read.
+
+### Seastar Specifics
+
+* All REST requests serviable by any shard.
+* Queries map/reduce nurseries, then contact owning shard (possibly triggering
+ incremental merge work).
+* Owner of level 2n governed by random slicing.
+* Every node runs a nursery.
+* Each nursery is at more 2^8 KVPs
+* Nurseries are:
+ * B-trees in memory,
+ * and logged to disk according to format below.
+* Searching across nurseries is a map/reduce operation over the shards.
+* Combining, merging Nurseries owned by shard closing the nursery.
+
### Basics
-If there are N records, there are in log2(N) levels (each being a plain B-tree in a file named "A-*level*.data"). The file `A-0.data` has 1 record, `A-1.data` has 2 records, `A-2.data` has 4 records, and so on: `A-n.data` has 2n records.
+If there are N records, there are in log2(N) levels (each being a
+plain B-tree in a file named "A-*level*.data"). The file `A-0.data` has 1
+record, `A-1.data` has 2 records, `A-2.data` has 4 records, and so on:
+`A-n.data` has 2n records.
-In "stable state", each level file is either full (there) or empty (not there); so if there are e.g. 20 records stored, then there are only data in filed `A-2.data` (4 records) and `A-4.data` (16 records).
+In "stable state", each level file is either full (there) or empty (not there);
+so if there are e.g. 20 records stored, then there are only 2 data files
+`A-2.data` (4 records) and `A-4.data` (16 records) required.
-OK, I've told you a lie. In practice, it is not practical to create a new file for each insert (injection at level #0), so we allows you to define the "top level" to be a number higher that #0; currently defaulting to #5 (32 records). That means that you take the amortization "hit" for ever 32 inserts.
+In practice, it is not practical to create a new file for each insert (injection
+at level #0), so we maintain a "top level" to be a number higher that #0;
+currently defaulting to #5 (32 records). That means that you take the
+amortization "hit" for ever 32 inserts. This first combined level is the
+"Nursery".
### Lookup
-Lookup is quite simple: starting at `A-0.data`, the sought for Key is searched in the B-tree there. If nothing is found, search continues to the next data file. So if there are *N* levels, then *N* disk-based B-tree lookups are performed. Each lookup is "guarded" by a bloom filter to improve the likelihood that disk-based searches are only done when likely to succeed.
+Lookup is quite simple: starting at `A-0.data`, the sought for key is searched
+in the B-tree there. Finding nothing, the search continues to the next data
+file. So if there are *N* levels, then *N* disk-based B-tree lookups are
+performed. Each lookup is "guarded" by a bloom filter to improve the likelihood
+that disk-based searches are only done when likely to succeed.
### Insertion
-Insertion works by a mechanism known as B-tree injection. Insertion always starts by constructing a fresh B-tree with 1 element in it, and "injecting" that B-tree into level #0. So you always inject a B-tree of the same size as the size of the level you're injecting it into.
+Insertion works by a mechanism known as B-tree injection. Insertion always
+starts by constructing a fresh B-tree with 1 element in it, and "injecting" that
+B-tree into level #0. So you always inject a B-tree of the same size as the
+size of the level you're injecting it into.
- If the level being injected into empty (there is no A-*level*.data file), then the injected B-tree becomes the contents for that level (we just rename the file).
- Otherwise,
diff --git a/flake.lock b/flake.lock
index 6d87522..f0ad25e 100644
--- a/flake.lock
+++ b/flake.lock
@@ -22,13 +22,31 @@
"utils": "utils"
}
},
- "utils": {
+ "systems": {
"locked": {
- "lastModified": 1623875721,
- "narHash": "sha256-A8BU7bjS5GirpAUv4QA+QnJ4CceLHkcXdRp4xITDB0s=",
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ },
+ "utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1710146030,
+ "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
- "rev": "f7e004a55b120c02ecb6219596820fcd32ca8772",
+ "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
diff --git a/flake.nix b/flake.nix
index cb736df..d586be6 100644
--- a/flake.nix
+++ b/flake.nix
@@ -37,6 +37,7 @@
# Build time and Run time dependencies
boost
+ cryptopp
c-ares
fmt
gnutls
@@ -57,6 +58,10 @@
icon = "f121";
in ''
export PS1="$(echo -e '\u${icon}') {\[$(tput sgr0)\]\[\033[38;5;228m\]\w\[$(tput sgr0)\]\[\033[38;5;15m\]} (${name}) \\$ \[$(tput sgr0)\]"
+ export CMAKE_GENERATOR=Ninja
+ export CMAKE_MAKE_PROGRAM=Ninja
+ export CC=clang
+ export CXX=clang++
'';
};
});
diff --git a/seastar b/seastar
index a965080..2b43417 160000
--- a/seastar
+++ b/seastar
@@ -1 +1 @@
-Subproject commit a965080ec0bb895e5c6196d3b082fa8d8f49b512
+Subproject commit 2b43417d210edbd7a3c3065bcfe3c0a9aea27f75
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 87e8437..0042c69 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,3 +1,3 @@
add_executable(noidb)
-target_sources(noidb PRIVATE noidb.cc)
+target_sources(noidb PRIVATE noidb.cc Database.cc)
target_link_libraries(noidb PRIVATE Seastar::seastar)
diff --git a/src/noidb.cc b/src/noidb.cc
index 408a956..5d0d631 100644
--- a/src/noidb.cc
+++ b/src/noidb.cc
@@ -1,36 +1,85 @@
+
+#include "Database.hh"
+
+#include
#include
-#include
-#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
#include
-// using namespace seastar;
+#include
-seastar::logger lg("hanoidb");
+using namespace seastar;
+using namespace httpd;
-static seastar::future<> hello_from_all_cores_serial() {
- for (unsigned i = 0; i < seastar::smp::count; ++i) {
- co_await seastar::smp::submit_to(
- i, [] { lg.info("serial - Hello from every core"); });
- };
- co_return;
-}
+logger lg("noidb");
-static seastar::future<> hello_from_all_cores_parallel() {
- co_await seastar::smp::invoke_on_all([]() -> seastar::future<> {
- auto memory = seastar::memory::get_memory_layout();
- lg.info(
- "parallel - memory layout start={} end={} size={}", memory.start, memory.end, memory.end - memory.start);
- co_return;
- });
- co_return;
-}
+namespace bpo = boost::program_options;
int main(int argc, char** argv) {
seastar::app_template app;
- return app.run(argc, argv, [&]() -> seastar::future {
- co_await hello_from_all_cores_serial();
- co_await hello_from_all_cores_parallel();
- co_return 0;
- });
+ // Options
+ app.add_options()("address", bpo::value()->default_value("0.0.0.0"), "HTTP Server address");
+ app.add_options()("port", bpo::value()->default_value(8080), "HTTP Server port");
+ app.add_options()("data", bpo::value()->required(), "Data directory");
+
+ try {
+ return app.run(argc, argv, [&app] {
+ return seastar::async([&app] {
+ seastar_apps_lib::stop_signal stop_signal;
+ const auto& config = app.configuration();
+
+ // Start Server
+ seastar::net::inet_address addr(config["address"].as());
+ uint16_t port = config["port"].as();
+
+ seastar::httpd::http_server_control srv;
+ srv.start().get();
+
+ Database db(srv);
+
+ srv
+ .set_routes([](seastar::httpd::routes& r) {
+ r.add(
+ seastar::httpd::operation_type::GET,
+ seastar::httpd::url("/hello"),
+ new seastar::httpd::function_handler(
+ []([[maybe_unused]] seastar::httpd::const_req req) { return "hi"; }));
+ })
+ .get();
+
+ srv
+ .set_routes([](seastar::httpd::routes& r) {
+ r.add(
+ seastar::httpd::operation_type::GET,
+ seastar::httpd::url("").remainder("path"),
+ new seastar::httpd::directory_handler("./public/"));
+ })
+ .get();
+
+ srv.listen(seastar::socket_address{addr, port}).get();
+ lg.info("NoiDB HTTP server listening on {}:{}\n", addr, port);
+
+ seastar::engine().at_exit([&srv, &db]() -> seastar::future<> {
+ lg.info("Stopping NoiDB HTTP server");
+ auto status = co_await db.stop();
+ if (status) lg.info("Stopped NoiDB Database");
+ co_await srv.stop();
+ co_return;
+ });
+
+ stop_signal.wait().get(); // block waiting for SIGINT or SIGTERM signal
+ });
+ });
+ } catch (...) {
+ lg.error("Failed to start NoiDB: {}\n", std::current_exception());
+ return 1;
+ }
}