feat(db): Add mdbx-rs apache licenced code 55e234 (#132)

This commit is contained in:
rakita
2022-10-25 11:41:04 +02:00
committed by GitHub
parent 1fe0affa4b
commit 4d2e67e9f5
48 changed files with 83412 additions and 0 deletions

View File

@ -0,0 +1,45 @@
[package]
name = "libmdbx"
version = "0.1.6"
edition = "2021"
license = "Apache-2.0"
description = "Idiomatic and safe MDBX wrapper."
documentation = "https://docs.rs/libmdbx"
homepage = "https://github.com/vorot93/libmdbx-rs"
repository = "https://github.com/vorot93/libmdbx-rs"
readme = "README.md"
keywords = ["LMDB", "MDBX", "database", "storage-engine", "bindings"]
categories = ["database"]
[lib]
name = "libmdbx"
[workspace]
members = ["mdbx-sys"]
[dependencies]
bitflags = "1"
byteorder = "1"
derive_more = "0.99"
indexmap = "1"
libc = "0.2"
parking_lot = "0.12"
thiserror = "1"
ffi = { package = "mdbx-sys", version = "=0.11.8-0", path = "./mdbx-sys" }
lifetimed-bytes = { version = "0.1", optional = true }
[dev-dependencies]
criterion = "0.3"
rand = "0.8"
rand_xorshift = "0.3"
tempfile = "3"
[[bench]]
name = "cursor"
harness = false
[[bench]]
name = "transaction"
harness = false

202
crates/libmdbx-rs/LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Dan Burkert
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,21 @@
# libmdbx-rs
Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru).
## Updating the libmdbx Version
To update the libmdbx version you must clone it and copy the `dist/` folder in `mdbx-sys/`.
Make sure to follow the [building steps](https://libmdbx.dqdkfa.ru/usage.html#getting).
```bash
# clone libmmdbx to a repository outside at specific tag
git clone https://gitflic.ru/project/erthink/libmdbx.git ../libmdbx --branch v0.7.0
make -C ../libmdbx dist
# copy the `libmdbx/dist/` folder just created into `mdbx-sys/libmdbx`
rm -rf mdbx-sys/libmdbx
cp -R ../libmdbx/dist mdbx-sys/libmdbx
# add the changes to the next commit you will make
git add mdbx-sys/libmdbx
```

View File

@ -0,0 +1,120 @@
mod utils;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ffi::*;
use libmdbx::*;
use std::ptr;
use utils::*;
/// Benchmark of iterator sequential read performance.
fn bench_get_seq_iter(c: &mut Criterion) {
let n = 100;
let (_dir, env) = setup_bench_db(n);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
c.bench_function("bench_get_seq_iter", |b| {
b.iter(|| {
let mut cursor = txn.cursor(&db).unwrap();
let mut i = 0;
let mut count = 0u32;
for (key_len, data_len) in cursor
.iter::<ObjectLength, ObjectLength>()
.map(Result::unwrap)
{
i = i + *key_len + *data_len;
count += 1;
}
for (key_len, data_len) in cursor
.iter::<ObjectLength, ObjectLength>()
.filter_map(Result::ok)
{
i = i + *key_len + *data_len;
count += 1;
}
fn iterate<K: TransactionKind>(cursor: &mut Cursor<'_, K>) -> Result<()> {
let mut i = 0;
for result in cursor.iter::<ObjectLength, ObjectLength>() {
let (key_len, data_len) = result?;
i = i + *key_len + *data_len;
}
Ok(())
}
iterate(&mut cursor).unwrap();
black_box(i);
assert_eq!(count, n);
})
});
}
/// Benchmark of cursor sequential read performance.
fn bench_get_seq_cursor(c: &mut Criterion) {
let n = 100;
let (_dir, env) = setup_bench_db(n);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
c.bench_function("bench_get_seq_cursor", |b| {
b.iter(|| {
let (i, count) = txn
.cursor(&db)
.unwrap()
.iter::<ObjectLength, ObjectLength>()
.map(Result::unwrap)
.fold((0, 0), |(i, count), (key, val)| {
(i + *key + *val, count + 1)
});
black_box(i);
assert_eq!(count, n);
})
});
}
/// Benchmark of raw MDBX sequential read performance (control).
fn bench_get_seq_raw(c: &mut Criterion) {
let n = 100;
let (_dir, env) = setup_bench_db(n);
let dbi = env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi();
let _txn = env.begin_ro_txn().unwrap();
let txn = _txn.txn();
let mut key = MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let mut data = MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let mut cursor: *mut MDBX_cursor = ptr::null_mut();
c.bench_function("bench_get_seq_raw", |b| {
b.iter(|| unsafe {
mdbx_cursor_open(txn, dbi, &mut cursor);
let mut i = 0;
let mut count = 0u32;
while mdbx_cursor_get(cursor, &mut key, &mut data, MDBX_NEXT) == 0 {
i += key.iov_len + data.iov_len;
count += 1;
}
black_box(i);
assert_eq!(count, n);
mdbx_cursor_close(cursor);
})
});
}
criterion_group!(
benches,
bench_get_seq_iter,
bench_get_seq_cursor,
bench_get_seq_raw
);
criterion_main!(benches);

View File

@ -0,0 +1,140 @@
mod utils;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ffi::*;
use libc::size_t;
use libmdbx::{ObjectLength, WriteFlags};
use rand::{prelude::SliceRandom, SeedableRng};
use rand_xorshift::XorShiftRng;
use std::ptr;
use utils::*;
fn bench_get_rand(c: &mut Criterion) {
let n = 100u32;
let (_dir, env) = setup_bench_db(n);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut keys: Vec<String> = (0..n).map(get_key).collect();
keys.shuffle(&mut XorShiftRng::from_seed(Default::default()));
c.bench_function("bench_get_rand", |b| {
b.iter(|| {
let mut i = 0usize;
for key in &keys {
i += *txn
.get::<ObjectLength>(&db, key.as_bytes())
.unwrap()
.unwrap();
}
black_box(i);
})
});
}
fn bench_get_rand_raw(c: &mut Criterion) {
let n = 100u32;
let (_dir, env) = setup_bench_db(n);
let _txn = env.begin_ro_txn().unwrap();
let db = _txn.open_db(None).unwrap();
let mut keys: Vec<String> = (0..n).map(get_key).collect();
keys.shuffle(&mut XorShiftRng::from_seed(Default::default()));
let dbi = db.dbi();
let txn = _txn.txn();
let mut key_val: MDBX_val = MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let mut data_val: MDBX_val = MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
c.bench_function("bench_get_rand_raw", |b| {
b.iter(|| unsafe {
let mut i: size_t = 0;
for key in &keys {
key_val.iov_len = key.len() as size_t;
key_val.iov_base = key.as_bytes().as_ptr() as *mut _;
mdbx_get(txn, dbi, &key_val, &mut data_val);
i += key_val.iov_len;
}
black_box(i);
})
});
}
fn bench_put_rand(c: &mut Criterion) {
let n = 100u32;
let (_dir, env) = setup_bench_db(0);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.prime_for_permaopen(db);
let db = txn.commit_and_rebind_open_dbs().unwrap().1.remove(0);
let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect();
items.shuffle(&mut XorShiftRng::from_seed(Default::default()));
c.bench_function("bench_put_rand", |b| {
b.iter(|| {
let txn = env.begin_rw_txn().unwrap();
for &(ref key, ref data) in items.iter() {
txn.put(&db, key, data, WriteFlags::empty()).unwrap();
}
})
});
}
fn bench_put_rand_raw(c: &mut Criterion) {
let n = 100u32;
let (_dir, _env) = setup_bench_db(0);
let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect();
items.shuffle(&mut XorShiftRng::from_seed(Default::default()));
let dbi = _env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi();
let env = _env.env();
let mut key_val: MDBX_val = MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let mut data_val: MDBX_val = MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
c.bench_function("bench_put_rand_raw", |b| {
b.iter(|| unsafe {
let mut txn: *mut MDBX_txn = ptr::null_mut();
mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &mut txn, ptr::null_mut());
let mut i: ::libc::c_int = 0;
for &(ref key, ref data) in items.iter() {
key_val.iov_len = key.len() as size_t;
key_val.iov_base = key.as_bytes().as_ptr() as *mut _;
data_val.iov_len = data.len() as size_t;
data_val.iov_base = data.as_bytes().as_ptr() as *mut _;
i += mdbx_put(txn, dbi, &key_val, &mut data_val, 0);
}
assert_eq!(0, i);
mdbx_txn_abort(txn);
})
});
}
criterion_group!(
benches,
bench_get_rand,
bench_get_rand_raw,
bench_put_rand,
bench_put_rand_raw
);
criterion_main!(benches);

View File

@ -0,0 +1,26 @@
use libmdbx::{Environment, NoWriteMap, WriteFlags};
use tempfile::{tempdir, TempDir};
pub fn get_key(n: u32) -> String {
format!("key{}", n)
}
pub fn get_data(n: u32) -> String {
format!("data{}", n)
}
pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment<NoWriteMap>) {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
for i in 0..num_rows {
txn.put(&db, &get_key(i), &get_data(i), WriteFlags::empty())
.unwrap();
}
txn.commit().unwrap();
}
(dir, env)
}

View File

@ -0,0 +1,22 @@
[package]
name = "mdbx-sys"
version = "0.11.8-0"
edition = "2021"
license = "Apache-2.0"
description = "Rust bindings for libmdbx."
documentation = "https://docs.rs/mdbx-sys"
homepage = "https://github.com/vorot93/libmdbx-rs"
repository = "https://github.com/vorot93/libmdbx-rs"
readme = "../README.md"
keywords = ["MDBX", "database", "storage-engine", "bindings", "library"]
categories = ["database", "external-ffi-bindings"]
[lib]
name = "mdbx_sys"
[dependencies]
libc = "0.2"
[build-dependencies]
cc = "1.0"
bindgen = { version = "0.60", default-features = false, features = ["runtime"] }

View File

@ -0,0 +1,92 @@
use bindgen::callbacks::{IntKind, ParseCallbacks};
use std::{env, path::PathBuf};
#[derive(Debug)]
struct Callbacks;
impl ParseCallbacks for Callbacks {
fn int_macro(&self, name: &str, _value: i64) -> Option<IntKind> {
match name {
"MDBX_SUCCESS"
| "MDBX_KEYEXIST"
| "MDBX_NOTFOUND"
| "MDBX_PAGE_NOTFOUND"
| "MDBX_CORRUPTED"
| "MDBX_PANIC"
| "MDBX_VERSION_MISMATCH"
| "MDBX_INVALID"
| "MDBX_MAP_FULL"
| "MDBX_DBS_FULL"
| "MDBX_READERS_FULL"
| "MDBX_TLS_FULL"
| "MDBX_TXN_FULL"
| "MDBX_CURSOR_FULL"
| "MDBX_PAGE_FULL"
| "MDBX_MAP_RESIZED"
| "MDBX_INCOMPATIBLE"
| "MDBX_BAD_RSLOT"
| "MDBX_BAD_TXN"
| "MDBX_BAD_VALSIZE"
| "MDBX_BAD_DBI"
| "MDBX_LOG_DONTCHANGE"
| "MDBX_DBG_DONTCHANGE"
| "MDBX_RESULT_TRUE"
| "MDBX_UNABLE_EXTEND_MAPSIZE"
| "MDBX_PROBLEM"
| "MDBX_LAST_LMDB_ERRCODE"
| "MDBX_BUSY"
| "MDBX_EMULTIVAL"
| "MDBX_EBADSIGN"
| "MDBX_WANNA_RECOVERY"
| "MDBX_EKEYMISMATCH"
| "MDBX_TOO_LARGE"
| "MDBX_THREAD_MISMATCH"
| "MDBX_TXN_OVERLAPPING"
| "MDBX_LAST_ERRCODE" => Some(IntKind::Int),
_ => Some(IntKind::UInt),
}
}
}
fn main() {
let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap());
mdbx.push("libmdbx");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
let bindings = bindgen::Builder::default()
.header(mdbx.join("mdbx.h").to_string_lossy())
.allowlist_var("^(MDBX|mdbx)_.*")
.allowlist_type("^(MDBX|mdbx)_.*")
.allowlist_function("^(MDBX|mdbx)_.*")
.size_t_is_usize(true)
.ctypes_prefix("::libc")
.parse_callbacks(Box::new(Callbacks))
.layout_tests(false)
.prepend_enum_name(false)
.generate_comments(false)
.disable_header_comment()
.rustfmt_bindings(true)
.generate()
.expect("Unable to generate bindings");
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("Couldn't write bindings!");
let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap());
mdbx.push("libmdbx");
let mut cc_builder = cc::Build::new();
cc_builder
.flag_if_supported("-Wno-unused-parameter")
.flag_if_supported("-Wbad-function-cast")
.flag_if_supported("-Wuninitialized");
let flags = format!("{:?}", cc_builder.get_compiler().cflags_env());
cc_builder
.define("MDBX_BUILD_FLAGS", flags.as_str())
.define("MDBX_TXN_CHECKOWNER", "0")
.file(mdbx.join("mdbx.c"))
.compile("libmdbx.a");
}

View File

@ -0,0 +1,953 @@
##
## Copyright 2020-2022 Leonid Yuriev <leo@yuriev.ru>
## and other libmdbx authors: please see AUTHORS file.
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted only as authorized by the OpenLDAP
## Public License.
##
## A copy of this license is available in the file LICENSE in the
## top-level directory of the distribution or, alternatively, at
## <http://www.OpenLDAP.org/license.html>.
##
##
## libmdbx = { Revised and extended descendant of Symas LMDB. }
## Please see README.md at https://gitflic.ru/project/erthink/libmdbx
##
## Libmdbx is superior to LMDB in terms of features and reliability,
## not inferior in performance. libmdbx works on Linux, FreeBSD, MacOS X
## and other systems compliant with POSIX.1-2008, but also support Windows
## as a complementary platform.
##
## The next version is under active non-public development and will be
## released as MithrilDB and libmithrildb for libraries & packages.
## Admittedly mythical Mithril is resembling silver but being stronger and
## lighter than steel. Therefore MithrilDB is rightly relevant name.
##
## MithrilDB will be radically different from libmdbx by the new database
## format and API based on C++17, as well as the Apache 2.0 License.
## The goal of this revolution is to provide a clearer and robust API,
## add more features and new valuable properties of database.
##
## The Future will (be) Positive. Всё будет хорошо.
##
if(CMAKE_VERSION VERSION_LESS 3.12)
cmake_minimum_required(VERSION 3.8.2)
else()
cmake_minimum_required(VERSION 3.12)
endif()
cmake_policy(PUSH)
cmake_policy(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION})
if(NOT CMAKE_VERSION VERSION_LESS 3.21)
cmake_policy(SET CMP0126 NEW)
endif()
if(NOT CMAKE_VERSION VERSION_LESS 3.17)
cmake_policy(SET CMP0102 NEW)
endif()
if(NOT CMAKE_VERSION VERSION_LESS 3.15)
cmake_policy(SET CMP0091 NEW)
endif()
if(NOT CMAKE_VERSION VERSION_LESS 3.13)
cmake_policy(SET CMP0077 NEW)
endif()
if(NOT CMAKE_VERSION VERSION_LESS 3.12)
cmake_policy(SET CMP0075 NEW)
endif()
if(NOT CMAKE_VERSION VERSION_LESS 3.9)
cmake_policy(SET CMP0068 NEW)
cmake_policy(SET CMP0069 NEW)
include(CheckIPOSupported)
check_ipo_supported(RESULT CMAKE_INTERPROCEDURAL_OPTIMIZATION_AVAILABLE)
else()
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_AVAILABLE FALSE)
endif()
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/test/CMakeLists.txt" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/core.c" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/alloy.c" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/version.c.in" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/man1" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/mdbx_chk.c" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/mdbx.c++")
set(MDBX_AMALGAMATED_SOURCE FALSE)
find_program(GIT git)
if(NOT GIT)
message(SEND_ERROR "Git command-line tool not found")
endif()
set(MDBX_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/src")
elseif(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION.txt" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/mdbx.c" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/mdbx.c++" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/config.h.in" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/man1" AND
EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/mdbx_chk.c")
set(MDBX_AMALGAMATED_SOURCE TRUE)
set(MDBX_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
else()
message(FATAL_ERROR "\n"
"Please don't use tarballs nor zips which are automatically provided by Github! "
"These archives do not contain version information and thus are unfit to build libmdbx. "
"You can vote for ability of disabling auto-creation such unsuitable archives at https://github.community/t/disable-tarball\n"
"Instead of above, just clone the git repository, either download a tarball or zip with the properly amalgamated source core. "
"For embedding libmdbx use a git-submodule or the amalgamated source code.\n"
"Please, avoid using any other techniques.")
endif()
if(DEFINED PROJECT_NAME)
option(MDBX_FORCE_BUILD_AS_MAIN_PROJECT "Force libmdbx to full control build options even it added as a subdirectory to your project." OFF)
endif()
if(DEFINED PROJECT_NAME AND NOT MDBX_FORCE_BUILD_AS_MAIN_PROJECT)
set(SUBPROJECT ON)
set(NOT_SUBPROJECT OFF)
if(NOT MDBX_AMALGAMATED_SOURCE AND NOT DEFINED BUILD_TESTING)
set(BUILD_TESTING OFF)
endif()
enable_language(C)
else()
set(SUBPROJECT OFF)
set(NOT_SUBPROJECT ON)
project(libmdbx C)
if(NOT MDBX_AMALGAMATED_SOURCE AND NOT DEFINED BUILD_TESTING)
set(BUILD_TESTING ON)
endif()
endif()
if(NOT MDBX_AMALGAMATED_SOURCE)
include(CTest)
option(MDBX_ENABLE_TESTS "Build libmdbx tests." ${BUILD_TESTING})
elseif(DEFINED MDBX_ENABLE_TESTS AND MDBX_ENABLE_TESTS)
message(WARNING "MDBX_ENABLE_TESTS=${MDBX_ENABLE_TESTS}: But amalgamated source code don't includes tests.")
set(MDBX_ENABLE_TESTS OFF)
endif()
# Try to find a C++ compiler unless sure that this is unnecessary.
if (NOT CMAKE_CXX_COMPILER_LOADED)
include(CheckLanguage)
if(NOT DEFINED MDBX_BUILD_CXX OR MDBX_BUILD_CXX
OR (NOT MDBX_AMALGAMATED_SOURCE AND (NOT DEFINED MDBX_ENABLE_TESTS OR MDBX_ENABLE_TESTS)))
check_language(CXX)
if(CMAKE_CXX_COMPILER)
enable_language(CXX)
endif()
endif()
else()
enable_language(CXX)
endif()
# Set default build type to Release. This is to ease a User's life.
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel."
FORCE)
endif()
string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPERCASE)
if(NOT_SUBPROJECT AND (CMAKE_CROSSCOMPILING OR IOS))
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
endif()
if(NOT "$ENV{TEAMCITY_PROCESS_FLOW_ID}" STREQUAL "")
set(CI TEAMCITY)
message(STATUS "TeamCity CI")
elseif(NOT "$ENV{TRAVIS}" STREQUAL "")
set(CI TRAVIS)
message(STATUS "Travis CI")
elseif(NOT "$ENV{CIRCLECI}" STREQUAL "")
set(CI CIRCLE)
message(STATUS "Circle CI")
elseif(NOT "$ENV{APPVEYOR}" STREQUAL "")
set(CI APPVEYOR)
message(STATUS "AppVeyor CI")
elseif(NOT "$ENV{CI}" STREQUAL "")
set(CI "$ENV{CI}")
message(STATUS "Other CI (${CI})")
else()
message(STATUS "Assume No any CI environment")
unset(CI)
endif()
# output all mdbx-related targets in single directory
if(NOT DEFINED MDBX_OUTPUT_DIR)
set(MDBX_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR})
endif()
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${MDBX_OUTPUT_DIR})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${MDBX_OUTPUT_DIR})
set(CMAKE_PDB_OUTPUT_DIRECTORY ${MDBX_OUTPUT_DIR})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${MDBX_OUTPUT_DIR})
include(CheckFunctionExists)
include(FindPackageMessage)
include(GNUInstallDirs)
if(CMAKE_C_COMPILER_ID STREQUAL "MSVC" AND MSVC_VERSION LESS 1900)
message(SEND_ERROR "MSVC compiler ${MSVC_VERSION} is too old for building MDBX."
" At least 'Microsoft Visual Studio 2015' is required.")
endif()
if(NOT DEFINED THREADS_PREFER_PTHREAD_FLAG)
set(THREADS_PREFER_PTHREAD_FLAG TRUE)
endif()
find_package(Threads REQUIRED)
include(cmake/utils.cmake)
include(cmake/compiler.cmake)
include(cmake/profile.cmake)
# Workaround for `-pthread` toolchain/cmake bug
if(NOT APPLE AND NOT MSVC
AND CMAKE_USE_PTHREADS_INIT AND NOT CMAKE_THREAD_LIBS_INIT
AND (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG))
check_compiler_flag("-pthread" CC_HAS_PTHREAD)
if(CC_HAS_PTHREAD AND NOT CMAKE_EXE_LINKER_FLAGS MATCHES "-pthread")
message(STATUS "Force add -pthread for linker flags to avoid troubles")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pthread")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -pthread")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -pthread")
endif()
endif()
CHECK_FUNCTION_EXISTS(pow NOT_NEED_LIBM)
if(NOT_NEED_LIBM)
set(LIB_MATH "")
else()
set(CMAKE_REQUIRED_LIBRARIES m)
CHECK_FUNCTION_EXISTS(pow HAVE_LIBM)
if(HAVE_LIBM)
set(LIB_MATH m)
else()
message(FATAL_ERROR "No libm found for math support")
endif()
endif()
if(SUBPROJECT)
if(NOT DEFINED BUILD_SHARED_LIBS)
option(BUILD_SHARED_LIBS "Build shared libraries (DLLs)" OFF)
endif()
if(NOT DEFINED CMAKE_POSITION_INDEPENDENT_CODE)
option(CMAKE_POSITION_INDEPENDENT_CODE "Generate position independent (PIC)" ON)
endif()
else()
option(BUILD_SHARED_LIBS "Build shared libraries (DLLs)" ON)
option(CMAKE_POSITION_INDEPENDENT_CODE "Generate position independent (PIC)" ON)
if (CC_HAS_ARCH_NATIVE)
option(BUILD_FOR_NATIVE_CPU "Generate code for the compiling machine CPU" OFF)
endif()
if(CMAKE_CONFIGURATION_TYPES OR NOT CMAKE_BUILD_TYPE_UPPERCASE STREQUAL "DEBUG")
set(INTERPROCEDURAL_OPTIMIZATION_DEFAULT ON)
else()
set(INTERPROCEDURAL_OPTIMIZATION_DEFAULT OFF)
endif()
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION_AVAILABLE
OR GCC_LTO_AVAILABLE OR MSVC_LTO_AVAILABLE OR
(CLANG_LTO_AVAILABLE AND
((DEFINED MDBX_ENABLE_TESTS AND NOT MDBX_ENABLE_TESTS)
OR NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.0)))
option(INTERPROCEDURAL_OPTIMIZATION "Enable interprocedural/LTO optimization" ${INTERPROCEDURAL_OPTIMIZATION_DEFAULT})
else()
set(INTERPROCEDURAL_OPTIMIZATION OFF)
endif()
if(INTERPROCEDURAL_OPTIMIZATION)
if(GCC_LTO_AVAILABLE)
set(LTO_ENABLED TRUE)
set(CMAKE_AR ${CMAKE_GCC_AR} CACHE PATH "Path to ar program with LTO-plugin" FORCE)
set(CMAKE_NM ${CMAKE_GCC_NM} CACHE PATH "Path to nm program with LTO-plugin" FORCE)
set(CMAKE_RANLIB ${CMAKE_GCC_RANLIB} CACHE PATH "Path to ranlib program with LTO-plugin" FORCE)
message(STATUS "MDBX indulge Link-Time Optimization by GCC")
elseif(CLANG_LTO_AVAILABLE)
set(LTO_ENABLED TRUE)
set(CMAKE_AR ${CMAKE_CLANG_AR} CACHE PATH "Path to ar program with LTO-plugin" FORCE)
set(CMAKE_NM ${CMAKE_CLANG_NM} CACHE PATH "Path to nm program with LTO-plugin" FORCE)
set(CMAKE_RANLIB ${CMAKE_CLANG_RANLIB} CACHE PATH "Path to ranlib program with LTO-plugin" FORCE)
message(STATUS "MDBX indulge Link-Time Optimization by CLANG")
elseif(MSVC_LTO_AVAILABLE)
set(LTO_ENABLED TRUE)
message(STATUS "MDBX indulge Link-Time Optimization by MSVC")
elseif(CMAKE_INTERPROCEDURAL_OPTIMIZATION_AVAILABLE)
message(STATUS "MDBX indulge Interprocedural Optimization by CMake")
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
set(LTO_ENABLED TRUE)
else()
message(WARNING "Unable to engage interprocedural/LTO optimization.")
endif()
else()
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION FALSE)
set(LTO_ENABLED FALSE)
endif()
if(NOT MDBX_AMALGAMATED_SOURCE)
find_program(VALGRIND valgrind)
if(VALGRIND)
# LY: cmake is ugly and nasty.
# - therefore memcheck-options should be defined before including ctest;
# - otherwise ctest may ignore it.
set(MEMORYCHECK_SUPPRESSIONS_FILE
"${CMAKE_CURRENT_SOURCE_DIR}/test/valgrind_suppress.txt"
CACHE FILEPATH "Suppressions file for Valgrind" FORCE)
set(MEMORYCHECK_COMMAND_OPTIONS
"--trace-children=yes --leak-check=full --track-origins=yes --error-exitcode=42 --error-markers=@ --errors-for-leak-kinds=definite --fair-sched=yes --suppressions=${MEMORYCHECK_SUPPRESSIONS_FILE}"
CACHE STRING "Valgrind options" FORCE)
set(VALGRIND_COMMAND_OPTIONS "${MEMORYCHECK_COMMAND_OPTIONS}" CACHE STRING "Valgrind options" FORCE)
endif()
# Enable 'make tags' target.
find_program(CTAGS ctags)
if(CTAGS)
add_custom_target(tags COMMAND ${CTAGS} -R -f tags
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
add_custom_target(ctags DEPENDS tags)
endif(CTAGS)
if(UNIX)
find_program(CLANG_FORMAT
NAMES clang-format-13 clang-format)
if(CLANG_FORMAT)
execute_process(COMMAND ${CLANG_FORMAT} "--version" OUTPUT_VARIABLE clang_format_version_info)
string(REGEX MATCH "version ([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)?" clang_format_version_info CLANG_FORMAT_VERSION)
if(clang_format_version_info AND NOT CLANG_FORMAT_VERSION VERSION_LESS 13.0)
# Enable 'make reformat' target.
add_custom_target(reformat
VERBATIM
COMMAND
git ls-files |
grep -E \\.\(c|cxx|cc|cpp|h|hxx|hpp\)\(\\.in\)?\$ |
xargs ${CLANG_FORMAT} -i --style=file
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
endif()
endif()
endif()
if(NOT "${PROJECT_BINARY_DIR}" STREQUAL "${PROJECT_SOURCE_DIR}")
add_custom_target(distclean)
add_custom_command(TARGET distclean
COMMAND ${CMAKE_COMMAND} -E remove_directory "${PROJECT_BINARY_DIR}"
COMMENT "Removing the build directory and its content")
elseif(IS_DIRECTORY .git AND GIT)
add_custom_target(distclean)
add_custom_command(TARGET distclean
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
COMMAND ${GIT} submodule foreach --recursive git clean -f -X -d
COMMAND ${GIT} clean -f -X -d
COMMENT "Removing all build files from the source directory")
endif()
endif(NOT MDBX_AMALGAMATED_SOURCE)
setup_compile_flags()
endif(SUBPROJECT)
list(FIND CMAKE_C_COMPILE_FEATURES c_std_11 HAS_C11)
list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_11 HAS_CXX11)
list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_14 HAS_CXX14)
list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_17 HAS_CXX17)
list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_20 HAS_CXX20)
list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_23 HAS_CXX23)
if(NOT DEFINED MDBX_CXX_STANDARD)
if(DEFINED ENV{CMAKE_CXX_STANDARD})
set(CMAKE_CXX_STANDARD $ENV{CMAKE_CXX_STANDARD})
endif()
if(DEFINED CMAKE_CXX_STANDARD)
set(MDBX_CXX_STANDARD ${CMAKE_CXX_STANDARD})
elseif(NOT HAS_CXX23 LESS 0
AND NOT (CMAKE_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12))
set(MDBX_CXX_STANDARD 23)
elseif(NOT HAS_CXX20 LESS 0
AND NOT (CMAKE_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10))
set(MDBX_CXX_STANDARD 20)
elseif(NOT HAS_CXX17 LESS 0
AND NOT (CMAKE_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5))
set(MDBX_CXX_STANDARD 17)
elseif(NOT HAS_CXX14 LESS 0)
set(MDBX_CXX_STANDARD 14)
elseif(NOT HAS_CXX11 LESS 0)
set(MDBX_CXX_STANDARD 11)
else()
set(MDBX_CXX_STANDARD 98)
endif()
endif()
if(NOT DEFINED MDBX_C_STANDARD)
# MSVC >= 19.28 (Microsoft Visual Studio 16.8) is mad!
# It unable process Windows SDK headers in the C11 mode!
if(MSVC AND MSVC_VERSION GREATER 1927 AND NOT MSVC_VERSION GREATER 1929)
set(MDBX_C_STANDARD 99)
set(C_FALLBACK_11 OFF)
set(C_FALLBACK_GNU11 OFF)
elseif(HAS_C11 LESS 0 AND NOT C_FALLBACK_GNU11 AND NOT C_FALLBACK_11)
set(MDBX_C_STANDARD 99)
else()
set(MDBX_C_STANDARD 11)
endif()
endif()
if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows" AND EXISTS "${MDBX_SOURCE_DIR}/ntdll.def")
if(MSVC)
if(NOT MSVC_LIB_EXE)
# Find lib.exe
get_filename_component(CL_NAME ${CMAKE_C_COMPILER} NAME)
string(REPLACE cl.exe lib.exe MSVC_LIB_EXE ${CL_NAME})
find_program(MSVC_LIB_EXE ${MSVC_LIB_EXE})
endif()
if(MSVC_LIB_EXE)
message(STATUS "Found MSVC's lib tool: ${MSVC_LIB_EXE}")
set(MDBX_NTDLL_EXTRA_IMPLIB "${CMAKE_CURRENT_BINARY_DIR}/mdbx_ntdll_extra.lib")
add_custom_command(OUTPUT "${MDBX_NTDLL_EXTRA_IMPLIB}"
COMMENT "Create extra-import-library for ntdll.dll"
MAIN_DEPENDENCY "${MDBX_SOURCE_DIR}/ntdll.def"
COMMAND ${MSVC_LIB_EXE} /def:"${MDBX_SOURCE_DIR}/ntdll.def" /out:"${MDBX_NTDLL_EXTRA_IMPLIB}" ${INITIAL_CMAKE_STATIC_LINKER_FLAGS})
else()
message(WARNING "MSVC's lib tool not found")
endif()
elseif(MINGW OR MINGW64)
if(NOT DLLTOOL)
# Find dlltool
get_filename_component(GCC_NAME ${CMAKE_C_COMPILER} NAME)
string(REPLACE gcc dlltool DLLTOOL_NAME ${GCC_NAME})
find_program(DLLTOOL NAMES ${DLLTOOL_NAME})
endif()
if(DLLTOOL)
message(STATUS "Found dlltool: ${DLLTOOL}")
set(MDBX_NTDLL_EXTRA_IMPLIB "${CMAKE_CURRENT_BINARY_DIR}/mdbx_ntdll_extra.a")
add_custom_command(OUTPUT "${MDBX_NTDLL_EXTRA_IMPLIB}"
COMMENT "Create extra-import-library for ntdll.dll"
MAIN_DEPENDENCY "${MDBX_SOURCE_DIR}/ntdll.def"
COMMAND ${DLLTOOL} -d "${MDBX_SOURCE_DIR}/ntdll.def" -l "${MDBX_NTDLL_EXTRA_IMPLIB}")
else()
message(WARNING "dlltool not found")
endif()
endif()
if(MDBX_NTDLL_EXTRA_IMPLIB)
# LY: Sometimes CMake requires a nightmarish magic for simple things.
# 1) create a target out of the library compilation result
add_custom_target(ntdll_extra_target DEPENDS "${MDBX_NTDLL_EXTRA_IMPLIB}")
# 2) create an library target out of the library compilation result
add_library(ntdll_extra STATIC IMPORTED GLOBAL)
add_dependencies(ntdll_extra ntdll_extra_target)
# 3) specify where the library is (and where to find the headers)
set_target_properties(ntdll_extra
PROPERTIES
IMPORTED_LOCATION "${MDBX_NTDLL_EXTRA_IMPLIB}")
endif()
endif()
################################################################################
################################################################################
#
# #### ##### ##### # #### # # ####
# # # # # # # # # ## # #
# # # # # # # # # # # # ####
# # # ##### # # # # # # # #
# # # # # # # # # ## # #
# #### # # # #### # # ####
#
set(MDBX_BUILD_OPTIONS ENABLE_UBSAN ENABLE_ASAN MDBX_USE_VALGRIND ENABLE_GPROF ENABLE_GCOV)
macro(add_mdbx_option NAME DESCRIPTION DEFAULT)
list(APPEND MDBX_BUILD_OPTIONS ${NAME})
if(NOT ${DEFAULT} STREQUAL "AUTO")
option(${NAME} "${DESCRIPTION}" ${DEFAULT})
elseif(NOT DEFINED ${NAME})
set(${NAME}_AUTO ON)
endif()
endmacro()
if(IOS)
set(MDBX_BUILD_TOOLS_DEFAULT OFF)
if(NOT_SUBPROJECT)
cmake_policy(SET CMP0006 OLD)
set(CMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_ALLOWED "NO")
endif()
else()
set(MDBX_BUILD_TOOLS_DEFAULT ON)
endif()
add_mdbx_option(MDBX_INSTALL_STATIC "Build and install libmdbx for static linking" OFF)
add_mdbx_option(MDBX_BUILD_SHARED_LIBRARY "Build libmdbx as shared library (DLL)" ${BUILD_SHARED_LIBS})
add_mdbx_option(MDBX_BUILD_TOOLS "Build MDBX tools (mdbx_chk/stat/dump/load/copy)" ${MDBX_BUILD_TOOLS_DEFAULT})
CMAKE_DEPENDENT_OPTION(MDBX_INSTALL_MANPAGES "Install man-pages for MDBX tools (mdbx_chk/stat/dump/load/copy)" ON MDBX_BUILD_TOOLS OFF)
add_mdbx_option(MDBX_TXN_CHECKOWNER "Checking transaction matches the calling thread inside libmdbx's API" ON)
add_mdbx_option(MDBX_ENV_CHECKPID "Paranoid checking PID inside libmdbx's API" AUTO)
mark_as_advanced(MDBX_ENV_CHECKPID)
if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
add_mdbx_option(MDBX_DISABLE_GNU_SOURCE "Don't use GNU/Linux libc extensions" OFF)
mark_as_advanced(MDBX_DISABLE_GNU_SOURCE)
endif()
if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin" OR IOS)
add_mdbx_option(MDBX_OSX_SPEED_INSTEADOF_DURABILITY "Disable use fcntl(F_FULLFSYNC) in favor of speed" OFF)
mark_as_advanced(MDBX_OSX_SPEED_INSTEADOF_DURABILITY)
endif()
if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
if(MDBX_NTDLL_EXTRA_IMPLIB)
add_mdbx_option(MDBX_WITHOUT_MSVC_CRT "Avoid dependence from MSVC CRT and use ntdll.dll instead" OFF)
endif()
else()
add_mdbx_option(MDBX_USE_OFDLOCKS "Use Open file description locks (aka OFD locks, non-POSIX)" AUTO)
mark_as_advanced(MDBX_USE_OFDLOCKS)
endif()
add_mdbx_option(MDBX_LOCKING "Locking method (Win32=-1, SysV=5, POSIX=1988, POSIX=2001, POSIX=2008, Futexes=1995)" AUTO)
mark_as_advanced(MDBX_LOCKING)
add_mdbx_option(MDBX_TRUST_RTC "Does a system have battery-backed Real-Time Clock or just a fake" AUTO)
mark_as_advanced(MDBX_TRUST_RTC)
option(MDBX_FORCE_ASSERTIONS "Force enable assertion checking" OFF)
option(MDBX_DISABLE_PAGECHECKS "Disable some checks to reduce an overhead and detection probability of database corruption to a values closer to the LMDB" OFF)
if(NOT MDBX_AMALGAMATED_SOURCE)
if(CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE_UPPERCASE STREQUAL "DEBUG")
set(MDBX_ALLOY_BUILD_DEFAULT OFF)
else()
set(MDBX_ALLOY_BUILD_DEFAULT ON)
endif()
add_mdbx_option(MDBX_ALLOY_BUILD "Build MDBX library through single/alloyed object file" ${MDBX_ALLOY_BUILD_DEFAULT})
endif()
if((MDBX_BUILD_TOOLS OR MDBX_ENABLE_TESTS) AND MDBX_BUILD_SHARED_LIBRARY)
add_mdbx_option(MDBX_LINK_TOOLS_NONSTATIC "Link MDBX tools with non-static libmdbx" OFF)
else()
unset(MDBX_LINK_TOOLS_NONSTATIC CACHE)
endif()
if(CMAKE_CXX_COMPILER_LOADED AND MDBX_CXX_STANDARD GREATER_EQUAL 11 AND MDBX_CXX_STANDARD LESS 83)
if(NOT MDBX_AMALGAMATED_SOURCE)
option(MDBX_ENABLE_TESTS "Build MDBX tests" ${BUILD_TESTING})
endif()
if(NOT MDBX_WITHOUT_MSVC_CRT
AND NOT (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
AND NOT (CMAKE_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.9)
AND NOT (MSVC AND MSVC_VERSION LESS 1900))
option(MDBX_BUILD_CXX "Build C++ portion" ON)
else()
set(MDBX_BUILD_CXX FALSE)
endif()
else()
set(MDBX_BUILD_CXX FALSE)
set(MDBX_ENABLE_TESTS FALSE)
endif()
################################################################################
################################################################################
if(MDBX_BUILD_CXX AND NOT CMAKE_CXX_COMPILER_LOADED)
message(FATAL_ERROR "MDBX_BUILD_CXX=${MDBX_BUILD_CXX}: The C++ compiler is required to build the C++API.")
endif()
if(MDBX_BUILD_CXX)
# determine library for C++ std::filesystem
probe_libcxx_filesystem()
endif()
# Get version
fetch_version(MDBX "${CMAKE_CURRENT_SOURCE_DIR}" FALSE)
message(STATUS "libmdbx version is ${MDBX_VERSION}")
# sources list
set(LIBMDBX_PUBLIC_HEADERS mdbx.h)
set(LIBMDBX_SOURCES mdbx.h "${CMAKE_CURRENT_BINARY_DIR}/config.h")
if(MDBX_AMALGAMATED_SOURCE)
list(APPEND LIBMDBX_SOURCES mdbx.c)
else()
# generate version file
configure_file("${MDBX_SOURCE_DIR}/version.c.in"
"${CMAKE_CURRENT_BINARY_DIR}/version.c" ESCAPE_QUOTES)
file(SHA256 "${CMAKE_CURRENT_BINARY_DIR}/version.c" MDBX_SOURCERY_DIGEST)
string(MAKE_C_IDENTIFIER "${MDBX_GIT_DESCRIBE}" MDBX_SOURCERY_SUFFIX)
set(MDBX_BUILD_SOURCERY "${MDBX_SOURCERY_DIGEST}_${MDBX_SOURCERY_SUFFIX}")
if(MDBX_ALLOY_BUILD)
list(APPEND LIBMDBX_SOURCES "${MDBX_SOURCE_DIR}/alloy.c")
include_directories("${MDBX_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}")
else()
list(APPEND LIBMDBX_SOURCES
"${CMAKE_CURRENT_BINARY_DIR}/version.c"
"${MDBX_SOURCE_DIR}/options.h" "${MDBX_SOURCE_DIR}/base.h"
"${MDBX_SOURCE_DIR}/internals.h" "${MDBX_SOURCE_DIR}/osal.h"
"${MDBX_SOURCE_DIR}/core.c" "${MDBX_SOURCE_DIR}/osal.c"
"${MDBX_SOURCE_DIR}/lck-posix.c" "${MDBX_SOURCE_DIR}/lck-windows.c")
include_directories("${MDBX_SOURCE_DIR}")
endif()
endif(MDBX_AMALGAMATED_SOURCE)
if(MDBX_BUILD_CXX)
message(STATUS "Use C${MDBX_C_STANDARD} and C++${MDBX_CXX_STANDARD} for libmdbx")
list(APPEND LIBMDBX_PUBLIC_HEADERS mdbx.h++)
list(APPEND LIBMDBX_SOURCES "${MDBX_SOURCE_DIR}/mdbx.c++" mdbx.h++)
else()
message(STATUS "Use C${MDBX_C_STANDARD} for libmdbx but C++ portion is disabled")
endif()
if(SUBPROJECT AND MSVC)
if(MSVC_VERSION LESS 1900)
message(FATAL_ERROR "At least \"Microsoft C/C++ Compiler\" version 19.0.24234.1 (Visual Studio 2015 Update 3) is required.")
endif()
add_compile_options("/utf-8")
endif()
macro(target_setup_options TARGET)
if(DEFINED INTERPROCEDURAL_OPTIMIZATION)
set_target_properties(${TARGET} PROPERTIES
INTERPROCEDURAL_OPTIMIZATION $<BOOL:${INTERPROCEDURAL_OPTIMIZATION}>)
endif()
if(NOT C_FALLBACK_GNU11 AND NOT C_FALLBACK_11)
set_target_properties(${TARGET} PROPERTIES
C_STANDARD ${MDBX_C_STANDARD} C_STANDARD_REQUIRED ON)
endif()
if(MDBX_BUILD_CXX)
set_target_properties(${TARGET} PROPERTIES
CXX_STANDARD ${MDBX_CXX_STANDARD} CXX_STANDARD_REQUIRED ON)
if(MSVC AND NOT MSVC_VERSION LESS 1910)
target_compile_options(${TARGET} INTERFACE "/Zc:__cplusplus")
endif()
endif()
if(CC_HAS_FASTMATH
AND NOT (CMAKE_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10))
target_compile_options(${TARGET} PRIVATE "-ffast-math")
endif()
if(CC_HAS_VISIBILITY)
target_compile_options(${TARGET} PRIVATE "-fvisibility=hidden")
endif()
if(BUILD_FOR_NATIVE_CPU AND CC_HAS_ARCH_NATIVE)
target_compile_options(${TARGET} PRIVATE "-march=native")
endif()
endmacro()
macro(libmdbx_setup_libs TARGET MODE)
target_link_libraries(${TARGET} ${MODE} Threads::Threads)
if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
target_link_libraries(${TARGET} ${MODE} ntdll advapi32)
if(MDBX_NTDLL_EXTRA_IMPLIB AND MDBX_WITHOUT_MSVC_CRT)
target_link_libraries(${TARGET} ${MODE} ntdll_extra)
endif()
elseif(${CMAKE_SYSTEM_NAME} STREQUAL "SunOS" OR ${CMAKE_SYSTEM_NAME} STREQUAL "Solaris")
target_link_libraries(${TARGET} ${MODE} kstat)
elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Android")
target_link_libraries(${TARGET} ${MODE} log)
endif()
if(LIBCXX_FILESYSTEM AND MDBX_BUILD_CXX)
if(CMAKE_COMPILER_IS_ELBRUSCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 1.25.23
AND NOT CMAKE_VERSION VERSION_LESS 3.13)
target_link_options(${TARGET} PUBLIC "-Wl,--allow-multiple-definition")
endif()
target_link_libraries(${TARGET} PUBLIC ${LIBCXX_FILESYSTEM})
endif()
endmacro()
# build static library
if(MDBX_INSTALL_STATIC)
add_library(mdbx-static STATIC ${LIBMDBX_SOURCES})
else()
add_library(mdbx-static STATIC EXCLUDE_FROM_ALL ${LIBMDBX_SOURCES})
endif()
set_target_properties(mdbx-static PROPERTIES PUBLIC_HEADER "${LIBMDBX_PUBLIC_HEADERS}")
target_compile_definitions(mdbx-static PRIVATE MDBX_BUILD_SHARED_LIBRARY=0)
target_setup_options(mdbx-static)
libmdbx_setup_libs(mdbx-static INTERFACE)
if(MDBX_BUILD_SHARED_LIBRARY)
set_target_properties(mdbx-static PROPERTIES OUTPUT_NAME mdbx-static)
else()
set_target_properties(mdbx-static PROPERTIES OUTPUT_NAME mdbx)
endif()
target_include_directories(mdbx-static INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}")
################################################################################
# build shared library
if(MDBX_BUILD_SHARED_LIBRARY)
add_library(mdbx SHARED ${LIBMDBX_SOURCES})
set_target_properties(mdbx PROPERTIES PUBLIC_HEADER "${LIBMDBX_PUBLIC_HEADERS}")
target_compile_definitions(mdbx PRIVATE LIBMDBX_EXPORTS MDBX_BUILD_SHARED_LIBRARY=1 INTERFACE LIBMDBX_IMPORTS)
target_setup_options(mdbx)
libmdbx_setup_libs(mdbx PRIVATE)
if(MSVC)
if(MDBX_NTDLL_EXTRA_IMPLIB AND MDBX_WITHOUT_MSVC_CRT)
set_property(TARGET mdbx PROPERTY LINKER_FLAGS "/NODEFAULTLIB")
else()
set_property(TARGET mdbx PROPERTY MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>DLL")
endif()
endif()
if(CC_HAS_VISIBILITY AND (LTO_ENABLED OR INTERPROCEDURAL_OPTIMIZATION))
set_target_properties(mdbx PROPERTIES LINK_FLAGS "-fvisibility=hidden")
endif()
list(APPEND MDBX_BUILD_FLAGS ${CMAKE_SHARED_LINKER_FLAGS})
target_include_directories(mdbx INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}")
endif()
if(MDBX_BUILD_SHARED_LIBRARY AND MDBX_LINK_TOOLS_NONSTATIC)
set(TOOL_MDBX_LIB mdbx)
# use, i.e. don't skip the full RPATH for the build tree
set(CMAKE_SKIP_BUILD_RPATH FALSE)
# when building, don't use the install RPATH already (but later on when installing)
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
# add the automatically determined parts of the RPATH
# which point to directories outside the build tree to the install RPATH
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
# the RPATH to be used when installing, but only if it's not a system directory
list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_PREFIX}/lib" isSystemDir)
if(isSystemDir EQUAL -1)
if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
set(CMAKE_INSTALL_RPATH "@executable_path/../lib")
else()
set(CMAKE_INSTALL_RPATH "\$ORIGIN/../lib")
endif()
endif()
else()
set(TOOL_MDBX_LIB mdbx-static)
endif()
# build mdbx-tools
if(MDBX_BUILD_TOOLS)
if(NOT MDBX_AMALGAMATED_SOURCE AND ${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
set(WINGETOPT_SRC ${MDBX_SOURCE_DIR}/wingetopt.c ${MDBX_SOURCE_DIR}/wingetopt.h)
else()
set(WINGETOPT_SRC "")
endif()
foreach(TOOL mdbx_chk mdbx_copy mdbx_stat mdbx_dump mdbx_load mdbx_drop)
add_executable(${TOOL} mdbx.h ${MDBX_SOURCE_DIR}/${TOOL}.c ${WINGETOPT_SRC})
if(NOT C_FALLBACK_GNU11 AND NOT C_FALLBACK_11)
set_target_properties(${TOOL} PROPERTIES
C_STANDARD ${MDBX_C_STANDARD} C_STANDARD_REQUIRED ON)
endif()
target_setup_options(${TOOL})
target_link_libraries(${TOOL} ${TOOL_MDBX_LIB})
endforeach()
if(LIB_MATH)
target_link_libraries(mdbx_chk ${LIB_MATH})
target_link_libraries(mdbx_stat ${LIB_MATH})
endif()
endif()
################################################################################
# mdbx-shared-lib installation
if(NOT DEFINED MDBX_DLL_INSTALL_DESTINATION)
if(WIN32)
set(MDBX_DLL_INSTALL_DESTINATION ${CMAKE_INSTALL_BINDIR})
else()
set(MDBX_DLL_INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
endif()
if(MDBX_BUILD_SHARED_LIBRARY)
if(CMAKE_VERSION VERSION_LESS 3.12)
install(TARGETS mdbx EXPORT libmdbx
LIBRARY DESTINATION ${MDBX_DLL_INSTALL_DESTINATION} COMPONENT runtime
OBJECTS DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel)
else()
install(TARGETS mdbx EXPORT libmdbx
LIBRARY DESTINATION ${MDBX_DLL_INSTALL_DESTINATION} COMPONENT runtime
NAMELINK_COMPONENT devel
OBJECTS DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel)
endif()
endif(MDBX_BUILD_SHARED_LIBRARY)
# mdbx-tools installation
if(MDBX_BUILD_TOOLS)
if(NOT DEFINED MDBX_TOOLS_INSTALL_DESTINATION)
set(MDBX_TOOLS_INSTALL_DESTINATION ${CMAKE_INSTALL_BINDIR})
endif()
install(
TARGETS
mdbx_chk
mdbx_stat
mdbx_copy
mdbx_dump
mdbx_load
mdbx_drop
RUNTIME
DESTINATION ${MDBX_TOOLS_INSTALL_DESTINATION}
COMPONENT runtime)
if(MDBX_INSTALL_MANPAGES)
if(NOT DEFINED MDBX_MAN_INSTALL_DESTINATION)
set(MDBX_MAN_INSTALL_DESTINATION ${CMAKE_INSTALL_MANDIR}/man1)
endif()
install(
FILES
"${MDBX_SOURCE_DIR}/man1/mdbx_chk.1"
"${MDBX_SOURCE_DIR}/man1/mdbx_stat.1"
"${MDBX_SOURCE_DIR}/man1/mdbx_copy.1"
"${MDBX_SOURCE_DIR}/man1/mdbx_dump.1"
"${MDBX_SOURCE_DIR}/man1/mdbx_load.1"
"${MDBX_SOURCE_DIR}/man1/mdbx_drop.1"
DESTINATION ${MDBX_MAN_INSTALL_DESTINATION}
COMPONENT doc)
endif()
endif(MDBX_BUILD_TOOLS)
# mdbx-static-lib installation
if(MDBX_INSTALL_STATIC)
if(CMAKE_VERSION VERSION_LESS 3.12)
install(TARGETS mdbx-static EXPORT libmdbx
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
OBJECTS DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel)
else()
install(TARGETS mdbx-static EXPORT libmdbx
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
NAMELINK_COMPONENT devel
OBJECTS DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT devel
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} COMPONENT devel)
endif()
endif(MDBX_INSTALL_STATIC)
################################################################################
# collect options & build info
if(NOT DEFINED MDBX_BUILD_TIMESTAMP)
string(TIMESTAMP MDBX_BUILD_TIMESTAMP UTC)
endif()
set(MDBX_BUILD_FLAGS ${CMAKE_C_FLAGS})
if(MDBX_BUILD_CXX)
set(MDBX_BUILD_FLAGS ${CMAKE_CXX_FLAGS})
endif()
# append cmake's build-type flags and defines
if(NOT CMAKE_CONFIGURATION_TYPES)
list(APPEND MDBX_BUILD_FLAGS ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPERCASE}})
if(MDBX_BUILD_CXX)
list(APPEND MDBX_BUILD_FLAGS ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPERCASE}})
endif()
endif()
# choice target to fetch definitions and options
if(MDBX_BUILD_SHARED_LIBRARY)
set(target4fetch mdbx)
else()
set(target4fetch mdbx-static)
endif()
# get definitions
get_target_property(defs_list ${target4fetch} COMPILE_DEFINITIONS)
if(defs_list)
list(APPEND MDBX_BUILD_FLAGS ${defs_list})
endif()
# get target compile options
get_target_property(options_list ${target4fetch} COMPILE_OPTIONS)
if(options_list)
list(APPEND MDBX_BUILD_FLAGS ${options_list})
endif()
list(REMOVE_DUPLICATES MDBX_BUILD_FLAGS)
string(REPLACE ";" " " MDBX_BUILD_FLAGS "${MDBX_BUILD_FLAGS}")
if(CMAKE_CONFIGURATION_TYPES)
# add dynamic part via per-configuration define
message(STATUS "MDBX Compile Flags: ${MDBX_BUILD_FLAGS} <AND CONFIGURATION DEPENDENT>")
add_definitions(-DMDBX_BUILD_FLAGS_CONFIG="$<$<CONFIG:Debug>:${CMAKE_C_FLAGS_DEBUG} ${CMAKE_C_DEFINES_DEBUG}>$<$<CONFIG:Release>:${CMAKE_C_FLAGS_RELEASE} ${CMAKE_C_DEFINES_RELEASE}>$<$<CONFIG:RelWithDebInfo>:${CMAKE_C_FLAGS_RELWITHDEBINFO} ${CMAKE_C_DEFINES_RELWITHDEBINFO}>$<$<CONFIG:MinSizeRel>:${CMAKE_C_FLAGS_MINSIZEREL} ${CMAKE_C_DEFINES_MINSIZEREL}>")
else()
message(STATUS "MDBX Compile Flags: ${MDBX_BUILD_FLAGS}")
endif()
# get compiler info
execute_process(COMMAND sh -c "${CMAKE_C_COMPILER} --version | head -1"
OUTPUT_VARIABLE MDBX_BUILD_COMPILER
OUTPUT_STRIP_TRAILING_WHITESPACE
ERROR_QUIET
RESULT_VARIABLE rc)
if(rc OR NOT MDBX_BUILD_COMPILER)
string(STRIP "${CMAKE_C_COMPILER_ID}-${CMAKE_C_COMPILER_VERSION}" MDBX_BUILD_COMPILER)
endif()
# make a build-target triplet
if(CMAKE_C_COMPILER_TARGET)
set(MDBX_BUILD_TARGET "${CMAKE_C_COMPILER_TARGET}")
else()
if(CMAKE_C_COMPILER_ARCHITECTURE_ID)
string(STRIP "${CMAKE_C_COMPILER_ARCHITECTURE_ID}" MDBX_BUILD_TARGET)
elseif(CMAKE_GENERATOR_PLATFORM AND NOT CMAKE_GENERATOR_PLATFORM STREQUAL CMAKE_SYSTEM_NAME)
string(STRIP "${CMAKE_GENERATOR_PLATFORM}" MDBX_BUILD_TARGET)
elseif(CMAKE_SYSTEM_ARCH)
string(STRIP "${CMAKE_SYSTEM_ARCH}" MDBX_BUILD_TARGET)
elseif(CMAKE_LIBRARY_ARCHITECTURE)
string(STRIP "${CMAKE_LIBRARY_ARCHITECTURE}" MDBX_BUILD_TARGET)
elseif(CMAKE_SYSTEM_PROCESSOR)
string(STRIP "${CMAKE_SYSTEM_PROCESSOR}" MDBX_BUILD_TARGET)
else()
set(MDBX_BUILD_TARGET "unknown")
endif()
if(CMAKE_C_COMPILER_ABI
AND NOT (CMAKE_C_COMPILER_ABI MATCHES ".*${MDBX_BUILD_TARGET}.*" OR MDBX_BUILD_TARGET MATCHES ".*${CMAKE_C_COMPILER_ABI}.*"))
string(APPEND MDBX_BUILD_TARGET "-${CMAKE_C_COMPILER_ABI}")
endif()
if(CMAKE_C_PLATFORM_ID
AND NOT (CMAKE_SYSTEM_NAME
AND (CMAKE_C_PLATFORM_ID MATCHES ".*${CMAKE_SYSTEM_NAME}.*" OR CMAKE_SYSTEM_NAME MATCHES ".*${CMAKE_C_PLATFORM_ID}.*"))
AND NOT (CMAKE_C_PLATFORM_ID MATCHES ".*${CMAKE_C_PLATFORM_ID}.*" OR MDBX_BUILD_TARGET MATCHES ".*${CMAKE_C_PLATFORM_ID}.*"))
string(APPEND MDBX_BUILD_TARGET "-${CMAKE_C_COMPILER_ABI}")
endif()
if(CMAKE_SYSTEM_NAME)
string(APPEND MDBX_BUILD_TARGET "-${CMAKE_SYSTEM_NAME}")
endif()
endif()
# provide build-type
if(CMAKE_CONFIGURATION_TYPES)
# via per-configuration define
add_definitions(-DMDBX_BUILD_TYPE="$<CONFIG>")
set(MDBX_BUILD_TYPE "<CONFIGURATION DEPENDENT>")
else()
set(MDBX_BUILD_TYPE ${CMAKE_BUILD_TYPE})
endif()
# options
set(options VERSION C_COMPILER CXX_COMPILER MDBX_BUILD_TARGET MDBX_BUILD_TYPE ${MDBX_BUILD_OPTIONS})
foreach(item IN LISTS options)
if(DEFINED ${item})
set(value "${${item}}")
elseif(DEFINED MDBX_${item})
set(item MDBX_${item})
set(value "${${item}}")
elseif(DEFINED CMAKE_${item})
set(item CMAKE_${item})
set(value "${${item}}")
else()
set(value "AUTO (not pre-defined explicitly)")
endif()
message(STATUS "${item}: ${value}")
endforeach(item)
# provide config.h for library build info
configure_file("${MDBX_SOURCE_DIR}/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/config.h" ESCAPE_QUOTES)
add_definitions(-DMDBX_CONFIG_H="${CMAKE_CURRENT_BINARY_DIR}/config.h")
################################################################################
if(NOT MDBX_AMALGAMATED_SOURCE AND MDBX_ENABLE_TESTS)
if(NOT CMAKE_CXX_COMPILER_LOADED)
message(FATAL_ERROR "MDBX_ENABLE_TESTS=${MDBX_ENABLE_TESTS}: The C++ compiler is required to build the tests.")
endif()
add_subdirectory(test)
endif()
################################################################################
if (NOT SUBPROJECT)
set(PACKAGE "libmdbx")
set(CPACK_PACKAGE_VERSION_MAJOR ${MDBX_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${MDBX_VERSION_MINOR})
set(CPACK_PACKAGE_VERSION_PATCH ${MDBX_VERSION_RELEASE})
set(CPACK_PACKAGE_VERSION_COMMIT ${MDBX_VERSION_REVISION})
set(PACKAGE_VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}.${CPACK_PACKAGE_VERSION_COMMIT}")
message(STATUS "libmdbx package version is ${PACKAGE_VERSION}")
endif()
cmake_policy(POP)

View File

@ -0,0 +1,835 @@
ChangeLog
---------
## v0.11.8 at 2022-06-12
Acknowledgements:
- [Masatoshi Fukunaga](https://github.com/mah0x211) for [Lua bindings](https://github.com/mah0x211/lua-libmdbx).
New:
- Added most of transactions flags to the public API.
- Added `MDBX_NOSUCCESS_EMPTY_COMMIT` build option to return non-success result (`MDBX_RESULT_TRUE`) on empty commit.
- Reworked validation and import of DBI-handles into a transaction.
Assumes these changes will be invisible to most users, but will cause fewer surprises in complex DBI cases.
- Added ability to open DB in without-LCK (exclusive read-only) mode in case no permissions to create/write LCK-file.
Fixes:
- A series of fixes and improvements for automatically generated documentation (Doxygen).
- Fixed copy&paste bug with could lead to `SIGSEGV` (nullptr dereference) in the exclusive/no-lck mode.
- Fixed minor warnings from modern Apple's CLANG 13.
- Fixed minor warnings from CLANG 14 and in-development CLANG 15.
- Fixed `SIGSEGV` regression in without-LCK (exclusive read-only) mode.
- Fixed `mdbx_check_fs_local()` for CDROM case on Windows.
- Fixed nasty typo of typename which caused false `MDBX_CORRUPTED` error in a rare execution path,
when the size of the thread-ID type not equal to 8.
- Fixed write-after-free memory corruption on latest `macOS` during finalization/cleanup of thread(s) that executed read transaction(s).
> The issue was suddenly discovered by a [CI](https://en.wikipedia.org/wiki/Continuous_integration)
> after adding an iteration with macOS 11 "Big Sur", and then reproduced on recent release of macOS 12 "Monterey".
> The issue was never noticed nor reported on macOS 10 "Catalina" nor others.
> Analysis shown that the problem caused by a change in the behavior of the system library (internals of dyld and pthread)
> during thread finalization/cleanup: now a memory allocated for a `__thread` variable(s) is released
> before execution of the registered Thread-Local-Storage destructor(s),
> thus a TLS-destructor will write-after-free just by legitime dereference any `__thread` variable.
> This is unexpected crazy-like behavior since the order of resources releasing/destroying
> is not the reverse of ones acquiring/construction order. Nonetheless such surprise
> is now workarounded by using atomic compare-and-swap operations on a 64-bit signatures/cookies.
- Fixed Elbrus/E2K LCC 1.26 compiler warnings (memory model for atomic operations, etc).
Minors:
- Refined `release-assets` GNU Make target.
- Added logging to `mdbx_fetch_sdb()` to help debugging complex DBI-handels use cases.
- Added explicit error message from probe of no-support for `std::filesystem`.
- Added contributors "score" table by `git fame` to generated docs.
- Added `mdbx_assert_fail()` to public API (mostly for backtracing).
- Now C++20 concepts used/enabled only when `__cpp_lib_concepts >= 202002`.
- Don't provide nor report package information if used as a CMake subproject.
-------------------------------------------------------------------------------
## v0.11.7 at 2022-04-22
The stable risen release after the Github's intentional malicious disaster.
#### We have migrated to a reliable trusted infrastructure
The origin for now is at [GitFlic](https://gitflic.ru/project/erthink/libmdbx)
since on 2022-04-15 the Github administration, without any warning nor
explanation, deleted _libmdbx_ along with a lot of other projects,
simultaneously blocking access for many developers.
For the same reason ~~Github~~ is blacklisted forever.
GitFlic already support Russian and English languages, plan to support more,
including 和 中文. You are welcome!
New:
- Added the `tools-static` make target to build statically linked MDBX tools.
- Support for Microsoft Visual Studio 2022.
- Support build by MinGW' make from command line without CMake.
- Added `mdbx::filesystem` C++ API namespace that corresponds to `std::filesystem` or `std::experimental::filesystem`.
- Created [website](https://libmdbx.dqdkfa.ru/) for online auto-generated documentation.
- Used `https://web.archive.org/web/20220414235959/https://github.com/erthink/` for dead (or temporarily lost) resources deleted by ~~Github~~.
- Added `--loglevel=` command-line option to the `mdbx_test` tool.
- Added few fast smoke-like tests into CMake builds.
Fixes:
- Fixed a race between starting a transaction and creating a DBI descriptor that could lead to `SIGSEGV` in the cursor tracking code.
- Clarified description of `MDBX_EPERM` error returned from `mdbx_env_set_geometry()`.
- Fixed non-promoting the parent transaction to be dirty in case the undo of the geometry update failed during abortion of a nested transaction.
- Resolved linking issues with `libstdc++fs`/`libc++fs`/`libc++experimental` for C++ `std::filesystem` or `std::experimental::filesystem` for legacy compilers.
- Added workaround for GNU Make 3.81 and earlier.
- Added workaround for Elbrus/LCC 1.25 compiler bug of class inline `static constexpr` member field.
- [Fixed](https://github.com/ledgerwatch/erigon/issues/3874) minor assertion regression (only debug builds were affected).
- Fixed detection of `C++20` concepts accessibility.
- Fixed detection of Clang's LTO availability for Android.
- Fixed extra definition of `_FILE_OFFSET_BITS=64` for Android that is problematic for 32-bit Bionic.
- Fixed build for ARM/ARM64 by MSVC.
- Fixed non-x86 Windows builds with `MDBX_WITHOUT_MSVC_CRT=ON` and `MDBX_BUILD_SHARED_LIBRARY=ON`.
Minors:
- Resolve minor MSVC warnings: avoid `/INCREMENTAL[:YES]` with `/LTCG`, `/W4` with `/W3`, the `C5105` warning.
- Switched to using `MDBX_EPERM` instead of `MDBX_RESULT_TRUE` to indicate that the geometry cannot be updated.
- Added `NULL` checking during memory allocation inside `mdbx_chk`.
- Resolved all warnings from MinGW while used without CMake.
- Added inheretable `target_include_directories()` to `CMakeLists.txt` for easy integration.
- Added build-time checks and paranoid runtime assertions for the `off_t` arguments of `fcntl()` which are used for locking.
- Added `-Wno-lto-type-mismatch` to avoid false-positive warnings from old GCC during LTO-enabled builds.
- Added checking for TID (system thread id) to avoid hang on 32-bit Bionic/Android within `pthread_mutex_lock()`.
- Reworked `MDBX_BUILD_TARGET` of CMake builds.
- Added `CMAKE_HOST_ARCH` and `CMAKE_HOST_CAN_RUN_EXECUTABLES_BUILT_FOR_TARGET`.
-------------------------------------------------------------------------------
## v0.11.6 at 2022-03-24
The stable release with the complete workaround for an incoherence flaw of Linux unified page/buffer cache.
Nonetheless the cause for this trouble may be an issue of Intel CPU cache/MESI.
See [issue#269](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/269) for more information.
Acknowledgements:
- [David Bouyssié](https://github.com/david-bouyssie) for [Scala bindings](https://github.com/david-bouyssie/mdbx4s).
- [Michelangelo Riccobene](https://github.com/mriccobene) for reporting and testing.
Fixes:
- [Added complete workaround](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/269) for an incoherence flaw of Linux unified page/buffer cache.
- [Fixed](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/272) cursor reusing for read-only transactions.
- Fixed copy&paste typo inside `mdbx::cursor::find_multivalue()`.
Minors:
- Minor refine C++ API for convenience.
- Minor internals refines.
- Added `lib-static` and `lib-shared` targets for make.
- Added minor workaround for AppleClang 13.3 bug.
- Clarified error messages of a signature/version mismatch.
## v0.11.5 at 2022-02-23
The release with the temporary hotfix for a flaw of Linux unified page/buffer cache.
See [issue#269](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/269) for more information.
Acknowledgements:
- [Simon Leier](https://github.com/leisim) for reporting and testing.
- [Kai Wetlesen](https://github.com/kaiwetlesen) for [RPMs](http://copr.fedorainfracloud.org/coprs/kwetlesen/libmdbx/).
- [Tullio Canepa](https://github.com/canepat) for reporting C++ API issue and contributing.
Fixes:
- [Added hotfix](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/269) for a flaw of Linux unified page/buffer cache.
- [Fixed/Reworked](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/pull/270) move-assignment operators for "managed" classes of C++ API.
- Fixed potential `SIGSEGV` while open DB with overrided non-default page size.
- [Made](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/267) `mdbx_env_open()` idempotence in failure cases.
- Refined/Fixed pages reservation inside `mdbx_update_gc()` to avoid non-reclamation in a rare cases.
- Fixed typo in a retained space calculation for the hsr-callback.
Minors:
- Reworked functions for meta-pages, split-off non-volatile.
- Disentangled C11-atomic fences/barriers and pure-functions (with `__attribute__((__pure__))`) to avoid compiler misoptimization.
- Fixed hypotetic unaligned access to 64-bit dwords on ARM with `__ARM_FEATURE_UNALIGNED` defined.
- Reasonable paranoia that makes clarity for code readers.
- Minor fixes Doxygen references, comments, descriptions, etc.
## v0.11.4 at 2022-02-02
The stable release with fixes for large and huge databases sized of 4..128 TiB.
Acknowledgements:
- [Ledgerwatch](https://github.com/ledgerwatch), [Binance](https://github.com/binance-chain) and [Positive Technologies](https://www.ptsecurity.com/) teams for reporting, assistance in investigation and testing.
- [Alex Sharov](https://github.com/AskAlexSharov) for reporting, testing and provide resources for remote debugging/investigation.
- [Kris Zyp](https://github.com/kriszyp) for [Deno](https://deno.land/) support.
New features, extensions and improvements:
- Added treating the `UINT64_MAX` value as maximum for given option inside `mdbx_env_set_option()`.
- Added `to_hex/to_base58/to_base64::output(std::ostream&)` overloads without using temporary string objects as buffers.
- Added `--geometry-jitter=YES|no` option to the test framework.
- Added support for [Deno](https://deno.land/) support by [Kris Zyp](https://github.com/kriszyp).
Fixes:
- Fixed handling `MDBX_opt_rp_augment_limit` for GC's records from huge transactions (Erigon/Akula/Ethereum).
- [Fixed](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/258) build on Android (avoid including `sys/sem.h`).
- [Fixed](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/pull/261) missing copy assignment operator for `mdbx::move_result`.
- Fixed missing `&` for `std::ostream &operator<<()` overloads.
- Fixed unexpected `EXDEV` (Cross-device link) error from `mdbx_env_copy()`.
- Fixed base64 encoding/decoding bugs in auxillary C++ API.
- Fixed overflow of `pgno_t` during checking PNL on 64-bit platforms.
- [Fixed](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/260) excessive PNL checking after sort for spilling.
- Reworked checking `MAX_PAGENO` and DB upper-size geometry limit.
- [Fixed](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/265) build for some combinations of versions of MSVC and Windows SDK.
Minors:
- Added workaround for CLANG bug [D79919/PR42445](https://reviews.llvm.org/D79919).
- Fixed build test on Android (using `pthread_barrier_t` stub).
- Disabled C++20 concepts for CLANG < 14 on Android.
- Fixed minor `unused parameter` warning.
- Added CI for Android.
- Refine/cleanup internal logging.
- Refined line splitting inside hex/base58/base64 encoding to avoid `\n` at the end.
- Added workaround for modern libstdc++ with CLANG < 4.x
- Relaxed txn-check rules for auxiliary functions.
- Clarified a comments and descriptions, etc.
- Using the `-fno-semantic interposition` option to reduce the overhead to calling self own public functions.
## v0.11.3 at 2021-12-31
Acknowledgements:
- [gcxfd <i@rmw.link>](https://github.com/gcxfd) for reporting, contributing and testing.
- [장세연 (Чан Се Ен)](https://github.com/sasgas) for reporting and testing.
- [Alex Sharov](https://github.com/AskAlexSharov) for reporting, testing and provide resources for remote debugging/investigation.
New features, extensions and improvements:
- [Added](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/236) `mdbx_cursor_get_batch()`.
- [Added](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/250) `MDBX_SET_UPPERBOUND`.
- C++ API is finalized now.
- The GC update stage has been [significantly speeded](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/254) when fixing huge Erigon's transactions (Ethereum ecosystem).
Fixes:
- Disabled C++20 concepts for stupid AppleClang 13.x
- Fixed internal collision of `MDBX_SHRINK_ALLOWED` with `MDBX_ACCEDE`.
Minors:
- Fixed returning `MDBX_RESULT_TRUE` (unexpected -1) from `mdbx_env_set_option()`.
- Added `mdbx_env_get_syncbytes()` and `mdbx_env_get_syncperiod()`.
- [Clarified](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/pull/249) description of `MDBX_INTEGERKEY`.
- Reworked/simplified `mdbx_env_sync_internal()`.
- [Fixed](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/248) extra assertion inside `mdbx_cursor_put()` for `MDBX_DUPFIXED` cases.
- Avoiding extra looping inside `mdbx_env_info_ex()`.
- Explicitly enabled core dumps from stochastic tests scripts on Linux.
- [Fixed](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/253) `mdbx_override_meta()` to avoid false-positive assertions.
- For compatibility reverted returning `MDBX_ENODATA`for some cases.
## v0.11.2 at 2021-12-02
Acknowledgements:
- [장세연 (Чан Се Ен)](https://github.com/sasgas) for contributing to C++ API.
- [Alain Picard](https://github.com/castortech) for [Java bindings](https://github.com/castortech/mdbxjni).
- [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing.
- [Kris Zyp](https://github.com/kriszyp) for reporting and testing.
- [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/libmdbx-rs).
Fixes:
- [Fixed compilation](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/pull/239) with `devtoolset-9` on CentOS/RHEL 7.
- [Fixed unexpected `MDBX_PROBLEM` error](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/242) because of update an obsolete meta-page.
- [Fixed returning `MDBX_NOTFOUND` error](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/243) in case an inexact value found for `MDBX_GET_BOTH` operation.
- [Fixed compilation](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/245) without kernel/libc-devel headers.
Minors:
- Fixed `constexpr`-related macros for legacy compilers.
- Allowed to define 'CMAKE_CXX_STANDARD` using an environment variable.
- Simplified collection statistics of page operation .
- Added `MDBX_FORCE_BUILD_AS_MAIN_PROJECT` cmake option.
- Remove unneeded `#undef P_DIRTY`.
## v0.11.1 at 2021-10-23
### Backward compatibility break:
The database format signature has been changed to prevent
forward-interoperability with an previous releases, which may lead to a
[false positive diagnosis of database corruption](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/238)
due to flaws of an old library versions.
This change is mostly invisible:
- previously versions are unable to read/write a new DBs;
- but the new release is able to handle an old DBs and will silently upgrade ones.
Acknowledgements:
- [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing.
-------------------------------------------------------------------------------
## v0.10.5 at 2021-10-13 (obsolete, please use v0.11.1)
Unfortunately, the `v0.10.5` accidentally comes not full-compatible with previous releases:
- `v0.10.5` can read/processing DBs created by previous releases, i.e. the backward-compatibility is provided;
- however, previous releases may lead to false-corrupted state with DB that was touched by `v0.10.5`, i.e. the forward-compatibility is broken for `v0.10.4` and earlier.
This cannot be fixed, as it requires fixing past versions, which as a result we will just get a current version.
Therefore, it is recommended to use `v0.11.1` instead of `v0.10.5`.
Acknowledgements:
- [Noel Kuntze](https://github.com/Thermi) for immediately bug reporting.
Fixes:
- Fixed unaligned access regression after the `#pragma pack` fix for modern compilers.
- Added UBSAN-test to CI to avoid a regression(s) similar to lately fixed.
- Fixed possibility of meta-pages clashing after manually turn to a particular meta-page using `mdbx_chk` utility.
Minors:
- Refined handling of weak or invalid meta-pages while a DB opening.
- Refined providing information for the `@MAIN` and `@GC` sub-databases of a last committed modification transaction's ID.
## v0.10.4 at 2021-10-10
Acknowledgements:
- [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/libmdbx-rs).
- [Andrew Ashikhmin](https://github.com/yperbasis) for contributing to C++ API.
Fixes:
- Fixed possibility of looping update GC during transaction commit (no public issue since the problem was discovered inside [Positive Technologies](https://www.ptsecurity.ru)).
- Fixed `#pragma pack` to avoid provoking some compilers to generate code with [unaligned access](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/235).
- Fixed `noexcept` for potentially throwing `txn::put()` of C++ API.
Minors:
- Added stochastic test script for checking small transactions cases.
- Removed extra transaction commit/restart inside test framework.
- In debugging builds fixed a too small (single page) by default DB shrink threshold.
## v0.10.3 at 2021-08-27
Acknowledgements:
- [Francisco Vallarino](https://github.com/fjvallarino) for [Haskell bindings for libmdbx](https://hackage.haskell.org/package/libmdbx).
- [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing.
- [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for contributing.
Extensions and improvements:
- Added `cursor::erase()` overloads for `key` and for `key-value`.
- Resolve minor Coverity Scan issues (no fixes but some hint/comment were added).
- Resolve minor UndefinedBehaviorSanitizer issues (no fixes but some workaround were added).
Fixes:
- Always setup `madvise` while opening DB (fixes https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/231).
- Fixed checking legacy `P_DIRTY` flag (`0x10`) for nested/sub-pages.
Minors:
- Fixed getting revision number from middle of history during amalgamation (GNU Makefile).
- Fixed search GCC tools for LTO (CMake scripts).
- Fixed/reorder dirs list for search CLANG tools for LTO (CMake scripts).
- Fixed/workarounds for CLANG < 9.x
- Fixed CMake warning about compatibility with 3.8.2
## v0.10.2 at 2021-07-26
Acknowledgements:
- [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing.
- [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for reporting bugs.
- [Lionel Debroux](https://github.com/debrouxl) for fuzzing tests and reporting bugs.
- [Sergey Fedotov](https://github.com/SergeyFromHell/) for [`node-mdbx` NodeJS bindings](https://www.npmjs.com/package/node-mdbx).
- [Kris Zyp](https://github.com/kriszyp) for [`lmdbx-store` NodeJS bindings](https://github.com/kriszyp/lmdbx-store).
- [Noel Kuntze](https://github.com/Thermi) for [draft Python bindings](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/commits/python-bindings).
New features, extensions and improvements:
- [Allow to predefine/override `MDBX_BUILD_TIMESTAMP` for builds reproducibility](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/201).
- Added options support for `long-stochastic` script.
- Avoided `MDBX_TXN_FULL` error for large transactions when possible.
- The `MDBX_READERS_LIMIT` increased to `32767`.
- Raise `MDBX_TOO_LARGE` under Valgrind/ASAN if being opened DB is 100 larger than RAM (to avoid hangs and OOM).
- Minimized the size of poisoned/unpoisoned regions to avoid Valgrind/ASAN stuck.
- Added more workarounds for QEMU for testing builds for 32-bit platforms, Alpha and Sparc architectures.
- `mdbx_chk` now skips iteration & checking of DB' records if corresponding page-tree is corrupted (to avoid `SIGSEGV`, ASAN failures, etc).
- Added more checks for [rare/fuzzing corruption cases](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/217).
Backward compatibility break:
- Use file `VERSION.txt` for version information instead of `VERSION` to avoid collision with `#include <version>`.
- Rename `slice::from/to_FOO_bytes()` to `slice::envisage_from/to_FOO_length()'.
- Rename `MDBX_TEST_EXTRA` make's variable to `MDBX_SMOKE_EXTRA`.
- Some details of the C++ API have been changed for subsequent freezing.
Fixes:
- Fixed excess meta-pages checks in case `mdbx_chk` is called to check the DB for a specific meta page and thus could prevent switching to the selected meta page, even if the check passed without errors.
- Fixed [recursive use of SRW-lock on Windows cause by `MDBX_NOTLS` option](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/203).
- Fixed [log a warning during a new DB creation](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/205).
- Fixed [false-negative `mdbx_cursor_eof()` result](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/207).
- Fixed [`make install` with non-GNU `install` utility (OSX, BSD)](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/208).
- Fixed [installation by `CMake` in special cases by complete use `GNUInstallDirs`'s variables](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/209).
- Fixed [C++ Buffer issue with `std::string` and alignment](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/191).
- Fixed `safe64_reset()` for platforms without atomic 64-bit compare-and-swap.
- Fixed hang/shutdown on big-endian platforms without `__cxa_thread_atexit()`.
- Fixed [using bad meta-pages if DB was partially/recoverable corrupted](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/217).
- Fixed extra `noexcept` for `buffer::&assign_reference()`.
- Fixed `bootid` generation on Windows for case of change system' time.
- Fixed [test framework keygen-related issue](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/127).
## v0.10.1 at 2021-06-01
Acknowledgements:
- [Alexey Akhunov](https://github.com/AlexeyAkhunov) and [Alex Sharov](https://github.com/AskAlexSharov) for bug reporting and testing.
- [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for bug reporting and testing related to WSL2.
New features:
- Added `-p` option to `mdbx_stat` utility for printing page operations statistic.
- Added explicit checking for and warning about using unfit github's archives.
- Added fallback from [OFD locking](https://bit.ly/3yFRtYC) to legacy non-OFD POSIX file locks on an `EINVAL` error.
- Added [Plan 9](https://en.wikipedia.org/wiki/9P_(protocol)) network file system to the whitelist for an ability to open a DB in exclusive mode.
- Support for opening from WSL2 environment a DB hosted on Windows drive and mounted via [DrvFs](https://docs.microsoft.com/it-it/archive/blogs/wsl/wsl-file-system-support#drvfs) (i.e by Plan 9 noted above).
Fixes:
- Fixed minor "foo not used" warnings from modern C++ compilers when building the C++ part of the library.
- Fixed confusing/messy errors when build library from unfit github's archives (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/197).
- Fixed `#elsif` typo.
- Fixed rare unexpected `MDBX_PROBLEM` error during altering data in huge transactions due to wrong spilling/oust of dirty pages (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/195).
- Re-Fixed WSL1/WSL2 detection with distinguishing (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/97).
## v0.10.0 at 2021-05-09
Acknowledgements:
- [Mahlon E. Smith](https://github.com/mahlonsmith) for [Ruby bindings](https://rubygems.org/gems/mdbx/).
- [Alex Sharov](https://github.com/AskAlexSharov) for [mdbx-go](https://github.com/torquem-ch/mdbx-go), bug reporting and testing.
- [Artem Vorotnikov](https://github.com/vorot93) for bug reporting and PR.
- [Paolo Rebuffo](https://www.linkedin.com/in/paolo-rebuffo-8255766/), [Alexey Akhunov](https://github.com/AlexeyAkhunov) and Mark Grosberg for donations.
- [Noel Kuntze](https://github.com/Thermi) for preliminary [Python bindings](https://github.com/Thermi/libmdbx/tree/python-bindings)
New features:
- Added `mdbx_env_set_option()` and `mdbx_env_get_option()` for controls
various runtime options for an environment (announce of this feature was missed in a previous news).
- Added `MDBX_DISABLE_PAGECHECKS` build option to disable some checks to reduce an overhead
and detection probability of database corruption to a values closer to the LMDB.
The `MDBX_DISABLE_PAGECHECKS=1` provides a performance boost of about 10% in CRUD scenarios,
and conjointly with the `MDBX_ENV_CHECKPID=0` and `MDBX_TXN_CHECKOWNER=0` options can yield
up to 30% more performance compared to LMDB.
- Using float point (exponential quantized) representation for internal 16-bit values
of grow step and shrink threshold when huge ones (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/166).
To minimize the impact on compatibility, only the odd values inside the upper half
of the range (i.e. 32769..65533) are used for the new representation.
- Added the `mdbx_drop` similar to LMDB command-line tool to purge or delete (sub)database(s).
- [Ruby bindings](https://rubygems.org/gems/mdbx/) is available now by [Mahlon E. Smith](https://github.com/mahlonsmith).
- Added `MDBX_ENABLE_MADVISE` build option which controls the use of POSIX `madvise()` hints and friends.
- The internal node sizes were refined, resulting in a reduction in large/overflow pages in some use cases
and a slight increase in limits for a keys size to ≈½ of page size.
- Added to `mdbx_chk` output number of keys/items on pages.
- Added explicit `install-strip` and `install-no-strip` targets to the `Makefile` (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/pull/180).
- Major rework page splitting (af9b7b560505684249b76730997f9e00614b8113) for
- An "auto-appending" feature upon insertion for both ascending and
descending key sequences. As a result, the optimality of page filling
increases significantly (more densely, less slackness) while
inserting ordered sequences of keys,
- A "splitting at middle" to make page tree more balanced on average.
- Added `mdbx_get_sysraminfo()` to the API.
- Added guessing a reasonable maximum DB size for the default upper limit of geometry (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/183).
- Major rework internal labeling of a dirty pages (958fd5b9479f52f2124ab7e83c6b18b04b0e7dda) for
a "transparent spilling" feature with the gist to make a dirty pages
be ready to spilling (writing to a disk) without further altering ones.
Thus in the `MDBX_WRITEMAP` mode the OS kernel able to oust dirty pages
to DB file without further penalty during transaction commit.
As a result, page swapping and I/O could be significantly reduced during extra large transactions and/or lack of memory.
- Minimized reading leaf-pages during dropping subDB(s) and nested trees.
- Major rework a spilling of dirty pages to support [LRU](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU))
policy and prioritization for a large/overflow pages.
- Statistics of page operations (split, merge, copy, spill, etc) now available through `mdbx_env_info_ex()`.
- Auto-setup limit for length of dirty pages list (`MDBX_opt_txn_dp_limit` option).
- Support `make options` to list available build options.
- Support `make help` to list available make targets.
- Silently `make`'s build by default.
- Preliminary [Python bindings](https://github.com/Thermi/libmdbx/tree/python-bindings) is available now
by [Noel Kuntze](https://github.com/Thermi) (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/147).
Backward compatibility break:
- The `MDBX_AVOID_CRT` build option was renamed to `MDBX_WITHOUT_MSVC_CRT`.
This option is only relevant when building for Windows.
- The `mdbx_env_stat()` always, and `mdbx_env_stat_ex()` when called with the zeroed transaction parameter,
now internally start temporary read transaction and thus may returns `MDBX_BAD_RSLOT` error.
So, just never use deprecated `mdbx_env_stat()' and call `mdbx_env_stat_ex()` with transaction parameter.
- The build option `MDBX_CONFIG_MANUAL_TLS_CALLBACK` was removed and now just a non-zero value of
the `MDBX_MANUAL_MODULE_HANDLER` macro indicates the requirement to manually call `mdbx_module_handler()`
when loading libraries and applications uses statically linked libmdbx on an obsolete Windows versions.
Fixes:
- Fixed performance regression due non-optimal C11 atomics usage (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/160).
- Fixed "reincarnation" of subDB after it deletion (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/168).
- Fixed (disallowing) implicit subDB deletion via operations on `@MAIN`'s DBI-handle.
- Fixed a crash of `mdbx_env_info_ex()` in case of a call for a non-open environment (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/171).
- Fixed the selecting/adjustment values inside `mdbx_env_set_geometry()` for implicit out-of-range cases (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/170).
- Fixed `mdbx_env_set_option()` for set initial and limit size of dirty page list ((https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/179).
- Fixed an unreasonably huge default upper limit for DB geometry (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/183).
- Fixed `constexpr` specifier for the `slice::invalid()`.
- Fixed (no)readahead auto-handling (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/164).
- Fixed non-alloy build for Windows.
- Switched to using Heap-functions instead of LocalAlloc/LocalFree on Windows.
- Fixed `mdbx_env_stat_ex()` to returning statistics of the whole environment instead of MainDB only (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/190).
- Fixed building by GCC 4.8.5 (added workaround for a preprocessor's bug).
- Fixed building C++ part for iOS <= 13.0 (unavailability of `std::filesystem::path`).
- Fixed building for Windows target versions prior to Windows Vista (`WIN32_WINNT < 0x0600`).
- Fixed building by MinGW for Windows (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/155).
-------------------------------------------------------------------------------
## v0.9.3 at 2021-02-02
Acknowledgements:
- [Mahlon E. Smith](http://www.martini.nu/) for [FreeBSD port of libmdbx](https://svnweb.freebsd.org/ports/head/databases/mdbx/).
- [장세연](http://www.castis.com) for bug fixing and PR.
- [Clément Renault](https://github.com/Kerollmops/heed) for [Heed](https://github.com/Kerollmops/heed) fully typed Rust wrapper.
- [Alex Sharov](https://github.com/AskAlexSharov) for bug reporting.
- [Noel Kuntze](https://github.com/Thermi) for bug reporting.
Removed options and features:
- Drop `MDBX_HUGE_TRANSACTIONS` build-option (now no longer required).
New features:
- Package for FreeBSD is available now by Mahlon E. Smith.
- New API functions to get/set various options (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/128):
- the maximum number of named databases for the environment;
- the maximum number of threads/reader slots;
- threshold (since the last unsteady commit) to force flush the data buffers to disk;
- relative period (since the last unsteady commit) to force flush the data buffers to disk;
- limit to grow a list of reclaimed/recycled page's numbers for finding a sequence of contiguous pages for large data items;
- limit to grow a cache of dirty pages for reuse in the current transaction;
- limit of a pre-allocated memory items for dirty pages;
- limit of dirty pages for a write transaction;
- initial allocation size for dirty pages list of a write transaction;
- maximal part of the dirty pages may be spilled when necessary;
- minimal part of the dirty pages should be spilled when necessary;
- how much of the parent transaction dirty pages will be spilled while start each child transaction;
- Unlimited/Dynamic size of retired and dirty page lists (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/123).
- Added `-p` option (purge subDB before loading) to `mdbx_load` tool.
- Reworked spilling of large transaction and committing of nested transactions:
- page spilling code reworked to avoid the flaws and bugs inherited from LMDB;
- limit for number of dirty pages now is controllable at runtime;
- a spilled pages, including overflow/large pages, now can be reused and refunded/compactified in nested transactions;
- more effective refunding/compactification especially for the loosed page cache.
- Added `MDBX_ENABLE_REFUND` and `MDBX_PNL_ASCENDING` internal/advanced build options.
- Added `mdbx_default_pagesize()` function.
- Better support architectures with a weak/relaxed memory consistency model (ARM, AARCH64, PPC, MIPS, RISC-V, etc) by means [C11 atomics](https://en.cppreference.com/w/c/atomic).
- Speed up page number lists and dirty page lists (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/132).
- Added `LIBMDBX_NO_EXPORTS_LEGACY_API` build option.
Fixes:
- Fixed missing cleanup (null assigned) in the C++ commit/abort (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/pull/143).
- Fixed `mdbx_realloc()` for case of nullptr and `MDBX_WITHOUT_MSVC_CRT=ON` for Windows.
- Fixed the possibility to use invalid and renewed (closed & re-opened, dropped & re-created) DBI-handles (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/146).
- Fixed 4-byte aligned access to 64-bit integers, including access to the `bootid` meta-page's field (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/153).
- Fixed minor/potential memory leak during page flushing and unspilling.
- Fixed handling states of cursors's and subDBs's for nested transactions.
- Fixed page leak in extra rare case the list of retired pages changed during update GC on transaction commit.
- Fixed assertions to avoid false-positive UB detection by CLANG/LLVM (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/153).
- Fixed `MDBX_TXN_FULL` and regressive `MDBX_KEYEXIST` during large transaction commit with `MDBX_LIFORECLAIM` (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/123).
- Fixed auto-recovery (`weak->steady` with the same boot-id) when Database size at last weak checkpoint is large than at last steady checkpoint.
- Fixed operation on systems with unusual small/large page size, including PowerPC (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/157).
## v0.9.2 at 2020-11-27
Acknowledgements:
- Jens Alfke (Mobile Architect at [Couchbase](https://www.couchbase.com/)) for [NimDBX](https://github.com/snej/nimdbx).
- Clément Renault (CTO at [MeiliSearch](https://www.meilisearch.com/)) for [mdbx-rs](https://github.com/Kerollmops/mdbx-rs).
- Alex Sharov (Go-Lang Teach Lead at [TurboGeth/Ethereum](https://ethereum.org/)) for an extreme test cases and bug reporting.
- George Hazan (CTO at [Miranda NG](https://www.miranda-ng.org/)) for bug reporting.
- [Positive Technologies](https://www.ptsecurity.com/) for funding and [The Standoff](https://standoff365.com/).
Added features:
- Provided package for [buildroot](https://buildroot.org/).
- Binding for Nim is [available](https://github.com/snej/nimdbx) now by Jens Alfke.
- Added `mdbx_env_delete()` for deletion an environment files in a proper and multiprocess-safe way.
- Added `mdbx_txn_commit_ex()` with collecting latency information.
- Fast completion pure nested transactions.
- Added `LIBMDBX_INLINE_API` macro and inline versions of some API functions.
- Added `mdbx_cursor_copy()` function.
- Extended tests for checking cursor tracking.
- Added `MDBX_SET_LOWERBOUND` operation for `mdbx_cursor_get()`.
Fixes:
- Fixed missing installation of `mdbx.h++`.
- Fixed use of obsolete `__noreturn`.
- Fixed use of `yield` instruction on ARM if unsupported.
- Added pthread workaround for buggy toolchain/cmake/buildroot.
- Fixed use of `pthread_yield()` for non-GLIBC.
- Fixed use of `RegGetValueA()` on Windows 2000/XP.
- Fixed use of `GetTickCount64()` on Windows 2000/XP.
- Fixed opening DB on a network shares (in the exclusive mode).
- Fixed copy&paste typos.
- Fixed minor false-positive GCC warning.
- Added workaround for broken `DEFINE_ENUM_FLAG_OPERATORS` from Windows SDK.
- Fixed cursor state after multimap/dupsort repeated deletes (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/121).
- Added `SIGPIPE` suppression for internal thread during `mdbx_env_copy()`.
- Fixed extra-rare `MDBX_KEY_EXIST` error during `mdbx_commit()` (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/131).
- Fixed spilled pages checking (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/126).
- Fixed `mdbx_load` for 'plain text' and without `-s name` cases (https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/136).
- Fixed save/restore/commit of cursors for nested transactions.
- Fixed cursors state in rare/special cases (move next beyond end-of-data, after deletion and so on).
- Added workaround for MSVC 19.28 (Visual Studio 16.8) (but may still hang during compilation).
- Fixed paranoidal Clang C++ UB for bitwise operations with flags defined by enums.
- Fixed large pages checking (for compatibility and to avoid false-positive errors from `mdbx_chk`).
- Added workaround for Wine (https://github.com/miranda-ng/miranda-ng/issues/1209).
- Fixed `ERROR_NOT_SUPPORTED` while opening DB by UNC pathnames (https://github.com/miranda-ng/miranda-ng/issues/2627).
- Added handling `EXCEPTION_POSSIBLE_DEADLOCK` condition for Windows.
## v0.9.1 2020-09-30
Added features:
- Preliminary C++ API with support for C++17 polymorphic allocators.
- [Online C++ API reference](https://libmdbx.dqdkfa.ru/) by Doxygen.
- Quick reference for Insert/Update/Delete operations.
- Explicit `MDBX_SYNC_DURABLE` to sync modes for API clarity.
- Explicit `MDBX_ALLDUPS` and `MDBX_UPSERT` for API clarity.
- Support for read transactions preparation (`MDBX_TXN_RDONLY_PREPARE` flag).
- Support for cursor preparation/(pre)allocation and reusing (`mdbx_cursor_create()` and `mdbx_cursor_bind()` functions).
- Support for checking database using specified meta-page (see `mdbx_chk -h`).
- Support for turn to the specific meta-page after checking (see `mdbx_chk -h`).
- Support for explicit reader threads (de)registration.
- The `mdbx_txn_break()` function to explicitly mark a transaction as broken.
- Improved handling of corrupted databases by `mdbx_chk` utility and `mdbx_walk_tree()` function.
- Improved DB corruption detection by checking parent-page-txnid.
- Improved opening large DB (> 4Gb) from 32-bit code.
- Provided `pure-function` and `const-function` attributes to C API.
- Support for user-settable context for transactions & cursors.
- Revised API and documentation related to Handle-Slow-Readers callback feature.
Deprecated functions and flags:
- For clarity and API simplification the `MDBX_MAPASYNC` flag is deprecated.
Just use `MDBX_SAFE_NOSYNC` or `MDBX_UTTERLY_NOSYNC` instead of it.
- `MDBX_oom_func`, `mdbx_env_set_oomfunc()` and `mdbx_env_get_oomfunc()`
replaced with `MDBX_hsr_func`, `mdbx_env_get_hsr` and `mdbx_env_get_hsr()`.
Fixes:
- Fix `mdbx_strerror()` for `MDBX_BUSY` error (no error description is returned).
- Fix update internal meta-geo information in read-only mode (`EACCESS` or `EBADFD` error).
- Fix `mdbx_page_get()` null-defer when DB corrupted (crash by `SIGSEGV`).
- Fix `mdbx_env_open()` for re-opening after non-fatal errors (`mdbx_chk` unexpected failures).
- Workaround for MSVC 19.27 `static_assert()` bug.
- Doxygen descriptions and refinement.
- Update Valgrind's suppressions.
- Workaround to avoid infinite loop of 'nested' testcase on MIPS under QEMU.
- Fix a lot of typos & spelling (Thanks to Josh Soref for PR).
- Fix `getopt()` messages for Windows (Thanks to Andrey Sporaw for reporting).
- Fix MSVC compiler version requirements (Thanks to Andrey Sporaw for reporting).
- Workarounds for QEMU's bugs to run tests for cross-builded library under QEMU.
- Now C++ compiler optional for building by CMake.
## v0.9.0 2020-07-31 (not a release, but API changes)
Added features:
- [Online C API reference](https://libmdbx.dqdkfa.ru/) by Doxygen.
- Separated enums for environment, sub-databases, transactions, copying and data-update flags.
Deprecated functions and flags:
- Usage of custom comparators and the `mdbx_dbi_open_ex()` are deprecated, since such databases couldn't be checked by the `mdbx_chk` utility.
Please use the value-to-key functions to provide keys that are compatible with the built-in libmdbx comparators.
-------------------------------------------------------------------------------
## 2020-07-06
- Added support multi-opening the same DB in a process with SysV locking (BSD).
- Fixed warnings & minors for LCC compiler (E2K).
- Enabled to simultaneously open the same database from processes with and without the `MDBX_WRITEMAP` option.
- Added key-to-value, `mdbx_get_keycmp()` and `mdbx_get_datacmp()` functions (helpful to avoid using custom comparators).
- Added `ENABLE_UBSAN` CMake option to enabling the UndefinedBehaviorSanitizer from GCC/CLANG.
- Workaround for [CLANG bug](https://bugs.llvm.org/show_bug.cgi?id=43275).
- Returning `MDBX_CORRUPTED` in case all meta-pages are weak and no other error.
- Refined mode bits while auto-creating LCK-file.
- Avoids unnecessary database file re-mapping in case geometry changed by another process(es).
From the user's point of view, the `MDBX_UNABLE_EXTEND_MAPSIZE` error will now be returned less frequently and only when using the DB in the current process really requires it to be reopened.
- Remapping on-the-fly and of the database file was implemented.
Now remapping with a change of address is performed automatically if there are no dependent readers in the current process.
## 2020-06-12
- Minor change versioning. The last number in the version now means the number of commits since last release/tag.
- Provide ChangeLog file.
- Fix for using libmdbx as a C-only sub-project with CMake.
- Fix `mdbx_env_set_geometry()` for case it is called from an opened environment outside of a write transaction.
- Add support for huge transactions and `MDBX_HUGE_TRANSACTIONS` build-option (default `OFF`).
- Refine LTO (link time optimization) for clang.
- Force enabling exceptions handling for MSVC (`/EHsc` option).
## 2020-06-05
- Support for Android/Bionic.
- Support for iOS.
- Auto-handling `MDBX_NOSUBDIR` while opening for any existing database.
- Engage github-actions to make release-assets.
- Clarify API description.
- Extended keygen-cases in stochastic test.
- Fix fetching of first/lower key from LEAF2-page during page merge.
- Fix missing comma in array of error messages.
- Fix div-by-zero while copy-with-compaction for non-resizable environments.
- Fixes & enhancements for custom-comparators.
- Fix `MDBX_WITHOUT_MSVC_CRT` option and missing `ntdll.def`.
- Fix `mdbx_env_close()` to work correctly called concurrently from several threads.
- Fix null-deref in an ASAN-enabled builds while opening the environment with error and/or read-only.
- Fix AddressSanitizer errors after closing the environment.
- Fix/workaround to avoid GCC 10.x pedantic warnings.
- Fix using `ENODATA` for FreeBSD.
- Avoid invalidation of DBI-handle(s) when it just closes.
- Avoid using `pwritev()` for single-writes (up to 10% speedup for some kernels & scenarios).
- Avoiding `MDBX_UTTERLY_NOSYNC` as result of flags merge.
- Add `mdbx_dbi_dupsort_depthmask()` function.
- Add `MDBX_CP_FORCE_RESIZEABLE` option.
- Add deprecated `MDBX_MAP_RESIZED` for compatibility.
- Add `MDBX_BUILD_TOOLS` option (default `ON`).
- Refine `mdbx_dbi_open_ex()` to safe concurrently opening the same handle from different threads.
- Truncate clk-file during environment closing. So a zero-length lck-file indicates that the environment was closed properly.
- Refine `mdbx_update_gc()` for huge transactions with small sizes of database page.
- Extends dump/load to support all MDBX attributes.
- Avoid upsertion the same key-value data, fix related assertions.
- Rework min/max length checking for keys & values.
- Checking the order of keys on all pages during checking.
- Support `CFLAGS_EXTRA` make-option for convenience.
- Preserve the last txnid while copying with compactification.
- Auto-reset running transaction in mdbx_txn_renew().
- Automatically abort errored transaction in mdbx_txn_commit().
- Auto-choose page size for large databases.
- Rearrange source files, rework build, options-support by CMake.
- Crutch for WSL1 (Windows subsystem for Linux).
- Refine install/uninstall targets.
- Support for Valgrind 3.14 and later.
- Add check-analyzer check-ubsan check-asan check-leak targets to Makefile.
- Minor fix/workaround to avoid UBSAN traps for `memcpy(ptr, NULL, 0)`.
- Avoid some GCC-analyzer false-positive warnings.
## 2020-03-18
- Workarounds for Wine (Windows compatibility layer for Linux).
- `MDBX_MAP_RESIZED` renamed to `MDBX_UNABLE_EXTEND_MAPSIZE`.
- Clarify API description, fix typos.
- Speedup runtime checks in debug/checked builds.
- Added checking for read/write transactions overlapping for the same thread, added `MDBX_TXN_OVERLAPPING` error and `MDBX_DBG_LEGACY_OVERLAP` option.
- Added `mdbx_key_from_jsonInteger()`, `mdbx_key_from_double()`, `mdbx_key_from_float()`, `mdbx_key_from_int64()` and `mdbx_key_from_int32()` functions. See `mdbx.h` for description.
- Fix compatibility (use zero for invalid DBI).
- Refine/clarify error messages.
- Avoids extra error messages "bad txn" from mdbx_chk when DB is corrupted.
## 2020-01-21
- Fix `mdbx_load` utility for custom comparators.
- Fix checks related to `MDBX_APPEND` flag inside `mdbx_cursor_put()`.
- Refine/fix dbi_bind() internals.
- Refine/fix handling `STATUS_CONFLICTING_ADDRESSES`.
- Rework `MDBX_DBG_DUMP` option to avoid disk I/O performance degradation.
- Add built-in help to test tool.
- Fix `mdbx_env_set_geometry()` for large page size.
- Fix env_set_geometry() for large pagesize.
- Clarify API description & comments, fix typos.
## 2019-12-31
- Fix returning MDBX_RESULT_TRUE from page_alloc().
- Fix false-positive ASAN issue.
- Fix assertion for `MDBX_NOTLS` option.
- Rework `MADV_DONTNEED` threshold.
- Fix `mdbx_chk` utility for don't checking some numbers if walking on the B-tree was disabled.
- Use page's mp_txnid for basic integrity checking.
- Add `MDBX_FORCE_ASSERTIONS` built-time option.
- Rework `MDBX_DBG_DUMP` to avoid performance degradation.
- Rename `MDBX_NOSYNC` to `MDBX_SAFE_NOSYNC` for clarity.
- Interpret `ERROR_ACCESS_DENIED` from `OpenProcess()` as 'process exists'.
- Avoid using `FILE_FLAG_NO_BUFFERING` for compatibility with small database pages.
- Added install section for CMake.
## 2019-12-02
- Support for Mac OSX, FreeBSD, NetBSD, OpenBSD, DragonFly BSD, OpenSolaris, OpenIndiana (AIX and HP-UX pending).
- Use bootid for decisions of rollback.
- Counting retired pages and extended transaction info.
- Add `MDBX_ACCEDE` flag for database opening.
- Using OFD-locks and tracking for in-process multi-opening.
- Hot backup into pipe.
- Support for cmake & amalgamated sources.
- Fastest internal sort implementation.
- New internal dirty-list implementation with lazy sorting.
- Support for lazy-sync-to-disk with polling.
- Extended key length.
- Last update transaction number for each sub-database.
- Automatic read ahead enabling/disabling.
- More auto-compactification.
- Using -fsanitize=undefined and -Wpedantic options.
- Rework page merging.
- Nested transactions.
- API description.
- Checking for non-local filesystems to avoid DB corruption.
-------------------------------------------------------------------------------
For early changes see the git commit history.

View File

@ -0,0 +1,375 @@
# This makefile is for GNU Make 3.80 or above, and nowadays provided
# just for compatibility and preservation of traditions.
#
# Please use CMake in case of any difficulties or
# problems with this old-school's magic.
#
################################################################################
#
# Basic internal definitios. For a customizable variables and options see below.
#
$(info // The GNU Make $(MAKE_VERSION))
SHELL := $(shell env bash -c 'echo $$BASH')
MAKE_VERx3 := $(shell printf "%3s%3s%3s" $(subst ., ,$(MAKE_VERSION)))
make_lt_3_81 := $(shell expr "$(MAKE_VERx3)" "<" " 3 81")
ifneq ($(make_lt_3_81),0)
$(error Please use GNU Make 3.81 or above)
endif
make_ge_4_1 := $(shell expr "$(MAKE_VERx3)" ">=" " 4 1")
SRC_PROBE_C := $(shell [ -f mdbx.c ] && echo mdbx.c || echo src/osal.c)
SRC_PROBE_CXX := $(shell [ -f mdbx.c++ ] && echo mdbx.c++ || echo src/mdbx.c++)
UNAME := $(shell uname -s 2>/dev/null || echo Unknown)
define cxx_filesystem_probe
int main(int argc, const char*argv[]) {
mdbx::filesystem::path probe(argv[0]);
if (argc != 1) throw mdbx::filesystem::filesystem_error(std::string("fake"), std::error_code());
return mdbx::filesystem::is_directory(probe.relative_path());
}
endef
#
################################################################################
#
# Use `make options` to list the available libmdbx build options.
#
# Note that the defaults should already be correct for most platforms;
# you should not need to change any of these. Read their descriptions
# in README and source code (see src/options.h) if you do.
#
# install sandbox
DESTDIR ?=
INSTALL ?= install
# install prefixes (inside sandbox)
prefix ?= /usr/local
mandir ?= $(prefix)/man
# lib/bin suffix for multiarch/biarch, e.g. '.x86_64'
suffix ?=
# toolchain
CC ?= gcc
CXX ?= g++
CFLAGS_EXTRA ?=
LD ?= ld
# build options
MDBX_BUILD_OPTIONS ?=-DNDEBUG=1
MDBX_BUILD_TIMESTAMP ?=$(shell date +%Y-%m-%dT%H:%M:%S%z)
# probe and compose common compiler flags with variable expansion trick (seems this work two times per session for GNU Make 3.81)
CFLAGS ?= $(strip $(eval CFLAGS := -std=gnu11 -O2 -g -Wall -Werror -Wextra -Wpedantic -ffunction-sections -fPIC -fvisibility=hidden -pthread -Wno-error=attributes $$(shell for opt in -fno-semantic-interposition -Wno-unused-command-line-argument -Wno-tautological-compare; do [ -z "$$$$($(CC) '-DMDBX_BUILD_FLAGS="probe"' $$$${opt} -c $(SRC_PROBE_C) -o /dev/null >/dev/null 2>&1 || echo failed)" ] && echo "$$$${opt} "; done)$(CFLAGS_EXTRA))$(CFLAGS))
# choosing C++ standard with variable expansion trick (seems this work two times per session for GNU Make 3.81)
CXXSTD ?= $(eval CXXSTD := $$(shell for std in gnu++23 c++23 gnu++2b c++2b gnu++20 c++20 gnu++2a c++2a gnu++17 c++17 gnu++1z c++1z gnu++14 c++14 gnu++1y c++1y gnu+11 c++11 gnu++0x c++0x; do $(CXX) -std=$$$${std} -c $(SRC_PROBE_CXX) -o /dev/null 2>probe4std-$$$${std}.err >/dev/null && echo "-std=$$$${std}" && exit; done))$(CXXSTD)
CXXFLAGS ?= $(strip $(CXXSTD) $(filter-out -std=gnu11,$(CFLAGS)))
# libraries and options for linking
EXE_LDFLAGS ?= -pthread
ifneq ($(make_ge_4_1),1)
# don't use variable expansion trick as workaround for bugs of GNU Make before 4.1
LIBS ?= $(shell $(uname2libs))
LDFLAGS ?= $(shell $(uname2ldflags))
LIB_STDCXXFS ?= $(shell echo '$(cxx_filesystem_probe)' | cat mdbx.h++ - | sed $$'1s/\xef\xbb\xbf//' | $(CXX) -x c++ $(CXXFLAGS) -Wno-error - -Wl,--allow-multiple-definition -lstdc++fs $(LIBS) $(LDFLAGS) $(EXE_LDFLAGS) -o /dev/null 2>probe4lstdfs.err >/dev/null && echo '-Wl,--allow-multiple-definition -lstdc++fs')
else
# using variable expansion trick to avoid repeaded probes
LIBS ?= $(eval LIBS := $$(shell $$(uname2libs)))$(LIBS)
LDFLAGS ?= $(eval LDFLAGS := $$(shell $$(uname2ldflags)))$(LDFLAGS)
LIB_STDCXXFS ?= $(eval LIB_STDCXXFS := $$(shell echo '$$(cxx_filesystem_probe)' | cat mdbx.h++ - | sed $$$$'1s/\xef\xbb\xbf//' | $(CXX) -x c++ $(CXXFLAGS) -Wno-error - -Wl,--allow-multiple-definition -lstdc++fs $(LIBS) $(LDFLAGS) $(EXE_LDFLAGS) -o /dev/null 2>probe4lstdfs.err >/dev/null && echo '-Wl,--allow-multiple-definition -lstdc++fs'))$(LIB_STDCXXFS)
endif
################################################################################
define uname2sosuffix
case "$(UNAME)" in
Darwin*|Mach*) echo dylib;;
CYGWIN*|MINGW*|MSYS*|Windows*) echo dll;;
*) echo so;;
esac
endef
define uname2ldflags
case "$(UNAME)" in
CYGWIN*|MINGW*|MSYS*|Windows*)
echo '-Wl,--gc-sections,-O1';
;;
*)
$(LD) --help 2>/dev/null | grep -q -- --gc-sections && echo '-Wl,--gc-sections,-z,relro,-O1';
$(LD) --help 2>/dev/null | grep -q -- -dead_strip && echo '-Wl,-dead_strip';
;;
esac
endef
# TIP: try add the'-Wl, --no-as-needed,-lrt' for ability to built with modern glibc, but then use with the old.
define uname2libs
case "$(UNAME)" in
CYGWIN*|MINGW*|MSYS*|Windows*)
echo '-lm -lntdll -lwinmm';
;;
*SunOS*|*Solaris*)
echo '-lm -lkstat -lrt';
;;
*Darwin*|OpenBSD*)
echo '-lm';
;;
*)
echo '-lm -lrt';
;;
esac
endef
SO_SUFFIX := $(shell $(uname2sosuffix))
HEADERS := mdbx.h mdbx.h++
LIBRARIES := libmdbx.a libmdbx.$(SO_SUFFIX)
TOOLS := mdbx_stat mdbx_copy mdbx_dump mdbx_load mdbx_chk mdbx_drop
MANPAGES := mdbx_stat.1 mdbx_copy.1 mdbx_dump.1 mdbx_load.1 mdbx_chk.1 mdbx_drop.1
TIP := // TIP:
.PHONY: all help options lib libs tools clean install uninstall check_buildflags_tag tools-static
.PHONY: install-strip install-no-strip strip libmdbx mdbx show-options lib-static lib-shared
ifeq ("$(origin V)", "command line")
MDBX_BUILD_VERBOSE := $(V)
endif
ifndef MDBX_BUILD_VERBOSE
MDBX_BUILD_VERBOSE := 0
endif
ifeq ($(MDBX_BUILD_VERBOSE),1)
QUIET :=
HUSH :=
$(info $(TIP) Use `make V=0` for quiet.)
else
QUIET := @
HUSH := >/dev/null
$(info $(TIP) Use `make V=1` for verbose.)
endif
all: show-options $(LIBRARIES) $(TOOLS)
help:
@echo " make all - build libraries and tools"
@echo " make help - print this help"
@echo " make options - list build options"
@echo " make lib - build libraries, also lib-static and lib-shared"
@echo " make tools - build the tools"
@echo " make tools-static - build the tools with statically linking with system libraries and compiler runtime"
@echo " make clean "
@echo " make install "
@echo " make uninstall "
@echo ""
@echo " make strip - strip debug symbols from binaries"
@echo " make install-no-strip - install explicitly without strip"
@echo " make install-strip - install explicitly with strip"
@echo ""
@echo " make bench - run ioarena-benchmark"
@echo " make bench-couple - run ioarena-benchmark for mdbx and lmdb"
@echo " make bench-triplet - run ioarena-benchmark for mdbx, lmdb, sqlite3"
@echo " make bench-quartet - run ioarena-benchmark for mdbx, lmdb, rocksdb, wiredtiger"
@echo " make bench-clean - remove temp database(s) after benchmark"
show-options:
@echo " MDBX_BUILD_OPTIONS = $(MDBX_BUILD_OPTIONS)"
@echo " MDBX_BUILD_TIMESTAMP = $(MDBX_BUILD_TIMESTAMP)"
@echo '$(TIP) Use `make options` to listing available build options.'
@echo " CC =`which $(CC)` | `$(CC) --version | head -1`"
@echo " CFLAGS =$(CFLAGS)"
@echo " CXXFLAGS =$(CXXFLAGS)"
@echo " LDFLAGS =$(LDFLAGS) $(LIB_STDCXXFS) $(LIBS) $(EXE_LDFLAGS)"
@echo '$(TIP) Use `make help` to listing available targets.'
options:
@echo " INSTALL =$(INSTALL)"
@echo " DESTDIR =$(DESTDIR)"
@echo " prefix =$(prefix)"
@echo " mandir =$(mandir)"
@echo " suffix =$(suffix)"
@echo ""
@echo " CC =$(CC)"
@echo " CFLAGS_EXTRA =$(CFLAGS_EXTRA)"
@echo " CFLAGS =$(CFLAGS)"
@echo " CXX =$(CXX)"
@echo " CXXSTD =$(CXXSTD)"
@echo " CXXFLAGS =$(CXXFLAGS)"
@echo ""
@echo " LD =$(LD)"
@echo " LDFLAGS =$(LDFLAGS)"
@echo " EXE_LDFLAGS =$(EXE_LDFLAGS)"
@echo " LIBS =$(LIBS)"
@echo ""
@echo " MDBX_BUILD_OPTIONS = $(MDBX_BUILD_OPTIONS)"
@echo " MDBX_BUILD_TIMESTAMP = $(MDBX_BUILD_TIMESTAMP)"
@echo ""
@echo "## Assortment items for MDBX_BUILD_OPTIONS:"
@echo "## Note that the defaults should already be correct for most platforms;"
@echo "## you should not need to change any of these. Read their descriptions"
@echo "## in README and source code (see mdbx.c) if you do."
@grep -h '#ifndef MDBX_' mdbx.c | grep -v BUILD | uniq | sed 's/#ifndef / /'
lib libs libmdbx mdbx: libmdbx.a libmdbx.$(SO_SUFFIX)
tools: $(TOOLS)
tools-static: $(addsuffix .static,$(TOOLS)) $(addsuffix .static-lto,$(TOOLS))
strip: all
@echo ' STRIP libmdbx.$(SO_SUFFIX) $(TOOLS)'
$(TRACE )strip libmdbx.$(SO_SUFFIX) $(TOOLS)
clean:
@echo ' REMOVE ...'
$(QUIET)rm -rf $(TOOLS) mdbx_test @* *.[ao] *.[ls]o *.$(SO_SUFFIX) *.dSYM *~ tmp.db/* \
*.gcov *.log *.err src/*.o test/*.o mdbx_example dist \
config.h src/config.h src/version.c *.tar* buildflags.tag \
mdbx_*.static mdbx_*.static-lto
MDBX_BUILD_FLAGS =$(strip $(MDBX_BUILD_OPTIONS) $(CXXSTD) $(CFLAGS) $(LDFLAGS) $(LIBS))
check_buildflags_tag:
$(QUIET)if [ "$(MDBX_BUILD_FLAGS)" != "$$(cat buildflags.tag 2>&1)" ]; then \
echo -n " CLEAN for build with specified flags..." && \
$(MAKE) IOARENA=false CXXSTD= -s clean >/dev/null && echo " Ok" && \
echo '$(MDBX_BUILD_FLAGS)' > buildflags.tag; \
fi
buildflags.tag: check_buildflags_tag
lib-static libmdbx.a: mdbx-static.o mdbx++-static.o
@echo ' AR $@'
$(QUIET)$(AR) rcs $@ $? $(HUSH)
lib-shared libmdbx.$(SO_SUFFIX): mdbx-dylib.o mdbx++-dylib.o
@echo ' LD $@'
$(QUIET)$(CXX) $(CXXFLAGS) $^ -pthread -shared $(LDFLAGS) $(LIB_STDCXXFS) $(LIBS) -o $@
################################################################################
# Amalgamated source code, i.e. distributed after `make dist`
MAN_SRCDIR := man1/
config.h: buildflags.tag mdbx.c $(lastword $(MAKEFILE_LIST))
@echo ' MAKE $@'
$(QUIET)(echo '#define MDBX_BUILD_TIMESTAMP "$(MDBX_BUILD_TIMESTAMP)"' \
&& echo "#define MDBX_BUILD_FLAGS \"$$(cat buildflags.tag)\"" \
&& echo '#define MDBX_BUILD_COMPILER "$(shell (LC_ALL=C $(CC) --version || echo 'Please use GCC or CLANG compatible compiler') | head -1)"' \
&& echo '#define MDBX_BUILD_TARGET "$(shell set -o pipefail; (LC_ALL=C $(CC) -v 2>&1 | grep -i '^Target:' | cut -d ' ' -f 2- || (LC_ALL=C $(CC) --version | grep -qi e2k && echo E2K) || echo 'Please use GCC or CLANG compatible compiler') | head -1)"' \
) >$@
mdbx-dylib.o: config.h mdbx.c mdbx.h $(lastword $(MAKEFILE_LIST))
@echo ' CC $@'
$(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -DLIBMDBX_EXPORTS=1 -c mdbx.c -o $@
mdbx-static.o: config.h mdbx.c mdbx.h $(lastword $(MAKEFILE_LIST))
@echo ' CC $@'
$(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -ULIBMDBX_EXPORTS -c mdbx.c -o $@
mdbx++-dylib.o: config.h mdbx.c++ mdbx.h mdbx.h++ $(lastword $(MAKEFILE_LIST))
@echo ' CC $@'
$(QUIET)$(CXX) $(CXXFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -DLIBMDBX_EXPORTS=1 -c mdbx.c++ -o $@
mdbx++-static.o: config.h mdbx.c++ mdbx.h mdbx.h++ $(lastword $(MAKEFILE_LIST))
@echo ' CC $@'
$(QUIET)$(CXX) $(CXXFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -ULIBMDBX_EXPORTS -c mdbx.c++ -o $@
mdbx_%: mdbx_%.c mdbx-static.o
@echo ' CC+LD $@'
$(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' $^ $(EXE_LDFLAGS) $(LIBS) -o $@
mdbx_%.static: mdbx_%.c mdbx-static.o
@echo ' CC+LD $@'
$(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' $^ $(EXE_LDFLAGS) -static -Wl,--strip-all -o $@
mdbx_%.static-lto: mdbx_%.c config.h mdbx.c mdbx.h
@echo ' CC+LD $@'
$(QUIET)$(CC) $(CFLAGS) -Os -flto $(MDBX_BUILD_OPTIONS) '-DLIBMDBX_API=' '-DMDBX_CONFIG_H="config.h"' \
$< mdbx.c $(EXE_LDFLAGS) $(LIBS) -static -Wl,--strip-all -o $@
install: $(LIBRARIES) $(TOOLS) $(HEADERS)
@echo ' INSTALLING...'
$(QUIET)mkdir -p $(DESTDIR)$(prefix)/bin$(suffix) && \
$(INSTALL) -p $(EXE_INSTALL_FLAGS) $(TOOLS) $(DESTDIR)$(prefix)/bin$(suffix)/ && \
mkdir -p $(DESTDIR)$(prefix)/lib$(suffix)/ && \
$(INSTALL) -p $(EXE_INSTALL_FLAGS) $(filter-out libmdbx.a,$(LIBRARIES)) $(DESTDIR)$(prefix)/lib$(suffix)/ && \
mkdir -p $(DESTDIR)$(prefix)/lib$(suffix)/ && \
$(INSTALL) -p libmdbx.a $(DESTDIR)$(prefix)/lib$(suffix)/ && \
mkdir -p $(DESTDIR)$(prefix)/include/ && \
$(INSTALL) -p -m 444 $(HEADERS) $(DESTDIR)$(prefix)/include/ && \
mkdir -p $(DESTDIR)$(mandir)/man1/ && \
$(INSTALL) -p -m 444 $(addprefix $(MAN_SRCDIR), $(MANPAGES)) $(DESTDIR)$(mandir)/man1/
install-strip: EXE_INSTALL_FLAGS = -s
install-strip: install
install-no-strip: EXE_INSTALL_FLAGS =
install-no-strip: install
uninstall:
@echo ' UNINSTALLING/REMOVE...'
$(QUIET)rm -f $(addprefix $(DESTDIR)$(prefix)/bin$(suffix)/,$(TOOLS)) \
$(addprefix $(DESTDIR)$(prefix)/lib$(suffix)/,$(LIBRARIES)) \
$(addprefix $(DESTDIR)$(prefix)/include/,$(HEADERS)) \
$(addprefix $(DESTDIR)$(mandir)/man1/,$(MANPAGES))
################################################################################
# Benchmarking by ioarena
ifeq ($(origin IOARENA),undefined)
IOARENA := $(shell \
(test -x ../ioarena/@BUILD/src/ioarena && echo ../ioarena/@BUILD/src/ioarena) || \
(test -x ../../@BUILD/src/ioarena && echo ../../@BUILD/src/ioarena) || \
(test -x ../../src/ioarena && echo ../../src/ioarena) || which ioarena 2>&- || \
(echo false && echo '$(TIP) Clone and build the https://github.com/pmwkaa/ioarena.git within a neighbouring directory for availability of benchmarking.' >&2))
endif
NN ?= 25000000
BENCH_CRUD_MODE ?= nosync
bench-clean:
@echo ' REMOVE bench-*.txt _ioarena/*'
$(QUIET)rm -rf bench-*.txt _ioarena/*
re-bench: bench-clean bench
ifeq ($(or $(IOARENA),false),false)
bench bench-quartet bench-triplet bench-couple:
$(QUIET)echo 'The `ioarena` benchmark is required.' >&2 && \
echo 'Please clone and build the https://github.com/pmwkaa/ioarena.git within a neighbouring `ioarena` directory.' >&2 && \
false
else
.PHONY: bench bench-clean bench-couple re-bench bench-quartet bench-triplet
define bench-rule
bench-$(1)_$(2).txt: $(3) $(IOARENA) $(lastword $(MAKEFILE_LIST))
@echo ' RUNNING ioarena for $1/$2...'
$(QUIET)LD_LIBRARY_PATH="./:$$$${LD_LIBRARY_PATH}" \
$(IOARENA) -D $(1) -B crud -m $(BENCH_CRUD_MODE) -n $(2) \
| tee $$@ | grep throughput && \
LD_LIBRARY_PATH="./:$$$${LD_LIBRARY_PATH}" \
$(IOARENA) -D $(1) -B get,iterate -m $(BENCH_CRUD_MODE) -r 4 -n $(2) \
| tee -a $$@ | grep throughput \
|| mv -f $$@ $$@.error
endef
$(eval $(call bench-rule,mdbx,$(NN),libmdbx.$(SO_SUFFIX)))
$(eval $(call bench-rule,sophia,$(NN)))
$(eval $(call bench-rule,leveldb,$(NN)))
$(eval $(call bench-rule,rocksdb,$(NN)))
$(eval $(call bench-rule,wiredtiger,$(NN)))
$(eval $(call bench-rule,forestdb,$(NN)))
$(eval $(call bench-rule,lmdb,$(NN)))
$(eval $(call bench-rule,nessdb,$(NN)))
$(eval $(call bench-rule,sqlite3,$(NN)))
$(eval $(call bench-rule,ejdb,$(NN)))
$(eval $(call bench-rule,vedisdb,$(NN)))
$(eval $(call bench-rule,dummy,$(NN)))
bench: bench-mdbx_$(NN).txt
bench-quartet: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt bench-rocksdb_$(NN).txt bench-wiredtiger_$(NN).txt
bench-triplet: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt bench-sqlite3_$(NN).txt
bench-couple: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt
# $(eval $(call bench-rule,debug,10))
# .PHONY: bench-debug
# bench-debug: bench-debug_10.txt
endif

View File

@ -0,0 +1,47 @@
The OpenLDAP Public License
Version 2.8, 17 August 2003
Redistribution and use of this software and associated documentation
("Software"), with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions in source form must retain copyright statements
and notices,
2. Redistributions in binary form must reproduce applicable copyright
statements and notices, this list of conditions, and the following
disclaimer in the documentation and/or other materials provided
with the distribution, and
3. Redistributions must contain a verbatim copy of this document.
The OpenLDAP Foundation may revise this license from time to time.
Each revision is distinguished by a version number. You may use
this Software under terms of this license revision or under the
terms of any subsequent revision of the license.
THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS
CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S)
OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The names of the authors and copyright holders must not be used in
advertising or otherwise to promote the sale, use or other dealing
in this Software without specific, written prior permission. Title
to copyright in this Software shall at all times remain with copyright
holders.
OpenLDAP is a registered trademark of the OpenLDAP Foundation.
Copyright 1999-2003 The OpenLDAP Foundation, Redwood City,
California, USA. All Rights Reserved. Permission to copy and
distribute verbatim copies of this document is granted.

View File

@ -0,0 +1,15 @@
# This is thunk-Makefile for calling GNU Make 3.80 or above
all help options \
clean install install-no-strip install-strip strip tools uninstall \
bench bench-clean bench-couple bench-quartet bench-triplet re-bench \
lib libs lib-static lib-shared tools-static \
libmdbx mdbx mdbx_chk mdbx_copy mdbx_drop mdbx_dump mdbx_load mdbx_stat \
check dist memcheck cross-gcc cross-qemu doxygen gcc-analyzer reformat \
release-assets tags test build-test mdbx_test smoke smoke-fault smoke-singleprocess \
smoke-assertion test-assertion long-test-assertion \
test-asan test-leak test-singleprocess test-ubsan test-valgrind:
@CC=$(CC) \
CXX=`if test -n "$(CXX)" && which "$(CXX)" > /dev/null; then echo "$(CXX)"; elif test -n "$(CCC)" && which "$(CCC)" > /dev/null; then echo "$(CCC)"; else echo "c++"; fi` \
`which gmake || which gnumake || echo 'echo "GNU Make 3.80 or above is required"; exit 2;'` \
$(MAKEFLAGS) -f GNUmakefile $@

View File

@ -0,0 +1,750 @@
<!-- Required extensions: pymdownx.betterem, pymdownx.tilde, pymdownx.emoji, pymdownx.tasklist, pymdownx.superfences -->
### The origin has been migrated to [GitFlic](https://gitflic.ru/project/erthink/libmdbx)
since on 2022-04-15 the Github administration, without any warning
nor explanation, deleted _libmdbx_ along with a lot of other projects,
simultaneously blocking access for many developers.
For the same reason ~~Github~~ is blacklisted forever.
GitFlic's developers plan to support other languages,
including English 和 中文, in the near future.
### Основной репозиторий перемещен на [GitFlic](https://gitflic.ru/project/erthink/libmdbx)
так как 15 апреля 2022 администрация Github без предупреждения и
объяснения причин удалила _libmdbx_ вместе с массой других проектов,
одновременно заблокировав доступ многим разработчикам.
По этой же причине ~~Github~~ навсегда занесен в черный список.
--------------------------------------------------------------------------------
*The Future will (be) [Positive](https://www.ptsecurity.com). Всё будет хорошо.*
> Please refer to the online [documentation](https://libmdbx.dqdkfa.ru)
> with [`C` API description](https://libmdbx.dqdkfa.ru/group__c__api.html)
> and pay attention to the [`C++` API](https://gitflic.ru/project/erthink/libmdbx/blob?file=mdbx.h%2B%2B#line-num-1).
> Questions, feedback and suggestions are welcome to the [Telegram' group](https://t.me/libmdbx).
> For NEWS take a look to the [ChangeLog](https://gitflic.ru/project/erthink/libmdbx/blob?file=ChangeLog.md)
> or the [TODO](https://gitflic.ru/project/erthink/libmdbx/blob?file=TODO.md).
libmdbx
========
<!-- section-begin overview -->
_libmdbx_ is an extremely fast, compact, powerful, embedded, transactional
[key-value database](https://en.wikipedia.org/wiki/Key-value_database),
with [permissive license](https://gitflic.ru/project/erthink/libmdbx/blob?file=LICENSE).
_libmdbx_ has a specific set of properties and capabilities,
focused on creating unique lightweight solutions.
1. Allows **a swarm of multi-threaded processes to
[ACID](https://en.wikipedia.org/wiki/ACID)ly read and update** several
key-value [maps](https://en.wikipedia.org/wiki/Associative_array) and
[multimaps](https://en.wikipedia.org/wiki/Multimap) in a locally-shared
database.
2. Provides **extraordinary performance**, minimal overhead through
[Memory-Mapping](https://en.wikipedia.org/wiki/Memory-mapped_file) and
`Olog(N)` operations costs by virtue of [B+
tree](https://en.wikipedia.org/wiki/B%2B_tree).
3. Requires **no maintenance and no crash recovery** since it doesn't use
[WAL](https://en.wikipedia.org/wiki/Write-ahead_logging), but that might
be a caveat for write-intensive workloads with durability requirements.
4. **Compact and friendly for fully embedding**. Only ≈25KLOC of `C11`,
≈64K x86 binary code of core, no internal threads neither server process(es),
but implements a simplified variant of the [Berkeley
DB](https://en.wikipedia.org/wiki/Berkeley_DB) and
[dbm](https://en.wikipedia.org/wiki/DBM_(computing)) API.
5. Enforces [serializability](https://en.wikipedia.org/wiki/Serializability) for
writers just by single
[mutex](https://en.wikipedia.org/wiki/Mutual_exclusion) and affords
[wait-free](https://en.wikipedia.org/wiki/Non-blocking_algorithm#Wait-freedom)
for parallel readers without atomic/interlocked operations, while
**writing and reading transactions do not block each other**.
6. **Guarantee data integrity** after crash unless this was explicitly
neglected in favour of write performance.
7. Supports Linux, Windows, MacOS, Android, iOS, FreeBSD, DragonFly, Solaris,
OpenSolaris, OpenIndiana, NetBSD, OpenBSD and other systems compliant with
**POSIX.1-2008**.
<!-- section-end -->
Historically, _libmdbx_ is a deeply revised and extended descendant of the amazing
[Lightning Memory-Mapped Database](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database).
_libmdbx_ inherits all benefits from _LMDB_, but resolves some issues and adds [a set of improvements](#improvements-beyond-lmdb).
<!-- section-begin mithril -->
The next version is under active non-public development from scratch and will be
released as **MithrilDB** and `libmithrildb` for libraries & packages.
Admittedly mythical [Mithril](https://en.wikipedia.org/wiki/Mithril) is
resembling silver but being stronger and lighter than steel. Therefore
_MithrilDB_ is a rightly relevant name.
> _MithrilDB_ will be radically different from _libmdbx_ by the new
> database format and API based on C++17, as well as the [Apache 2.0
> License](https://www.apache.org/licenses/LICENSE-2.0). The goal of this
> revolution is to provide a clearer and robust API, add more features and
> new valuable properties of the database.
<!-- section-end -->
-----
## Table of Contents
- [Characteristics](#characteristics)
- [Features](#features)
- [Limitations](#limitations)
- [Gotchas](#gotchas)
- [Comparison with other databases](#comparison-with-other-databases)
- [Improvements beyond LMDB](#improvements-beyond-lmdb)
- [History & Acknowledgments](#history)
- [Usage](#usage)
- [Building and Testing](#building-and-testing)
- [API description](#api-description)
- [Bindings](#bindings)
- [Performance comparison](#performance-comparison)
- [Integral performance](#integral-performance)
- [Read scalability](#read-scalability)
- [Sync-write mode](#sync-write-mode)
- [Lazy-write mode](#lazy-write-mode)
- [Async-write mode](#async-write-mode)
- [Cost comparison](#cost-comparison)
# Characteristics
<!-- section-begin characteristics -->
## Features
- Key-value data model, keys are always sorted.
- Fully [ACID](https://en.wikipedia.org/wiki/ACID)-compliant, through to
[MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control)
and [CoW](https://en.wikipedia.org/wiki/Copy-on-write).
- Multiple key-value sub-databases within a single datafile.
- Range lookups, including range query estimation.
- Efficient support for short fixed length keys, including native 32/64-bit integers.
- Ultra-efficient support for [multimaps](https://en.wikipedia.org/wiki/Multimap). Multi-values sorted, searchable and iterable. Keys stored without duplication.
- Data is [memory-mapped](https://en.wikipedia.org/wiki/Memory-mapped_file) and accessible directly/zero-copy. Traversal of database records is extremely-fast.
- Transactions for readers and writers, ones do not block others.
- Writes are strongly serialized. No transaction conflicts nor deadlocks.
- Readers are [non-blocking](https://en.wikipedia.org/wiki/Non-blocking_algorithm), notwithstanding [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation).
- Nested write transactions.
- Reads scale linearly across CPUs.
- Continuous zero-overhead database compactification.
- Automatic on-the-fly database size adjustment.
- Customizable database page size.
- `Olog(N)` cost of lookup, insert, update, and delete operations by virtue of [B+ tree characteristics](https://en.wikipedia.org/wiki/B%2B_tree#Characteristics).
- Online hot backup.
- Append operation for efficient bulk insertion of pre-sorted data.
- No [WAL](https://en.wikipedia.org/wiki/Write-ahead_logging) nor any
transaction journal. No crash recovery needed. No maintenance is required.
- No internal cache and/or memory management, all done by basic OS services.
## Limitations
- **Page size**: a power of 2, minimum `256` (mostly for testing), maximum `65536` bytes, default `4096` bytes.
- **Key size**: minimum `0`, maximum ≈½ pagesize (`2022` bytes for default 4K pagesize, `32742` bytes for 64K pagesize).
- **Value size**: minimum `0`, maximum `2146435072` (`0x7FF00000`) bytes for maps, ≈½ pagesize for multimaps (`2022` bytes for default 4K pagesize, `32742` bytes for 64K pagesize).
- **Write transaction size**: up to `1327217884` pages (`4.944272` TiB for default 4K pagesize, `79.108351` TiB for 64K pagesize).
- **Database size**: up to `2147483648` pages (≈`8.0` TiB for default 4K pagesize, ≈`128.0` TiB for 64K pagesize).
- **Maximum sub-databases**: `32765`.
## Gotchas
1. There cannot be more than one writer at a time, i.e. no more than one write transaction at a time.
2. _libmdbx_ is based on [B+ tree](https://en.wikipedia.org/wiki/B%2B_tree), so access to database pages is mostly random.
Thus SSDs provide a significant performance boost over spinning disks for large databases.
3. _libmdbx_ uses [shadow paging](https://en.wikipedia.org/wiki/Shadow_paging) instead of [WAL](https://en.wikipedia.org/wiki/Write-ahead_logging).
Thus syncing data to disk might be a bottleneck for write intensive workload.
4. _libmdbx_ uses [copy-on-write](https://en.wikipedia.org/wiki/Copy-on-write) for [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation) during updates,
but read transactions prevents recycling an old retired/freed pages, since it read ones. Thus altering of data during a parallel
long-lived read operation will increase the process work set, may exhaust entire free database space,
the database can grow quickly, and result in performance degradation.
Try to avoid long running read transactions.
5. _libmdbx_ is extraordinarily fast and provides minimal overhead for data access,
so you should reconsider using brute force techniques and double check your code.
On the one hand, in the case of _libmdbx_, a simple linear search may be more profitable than complex indexes.
On the other hand, if you make something suboptimally, you can notice detrimentally only on sufficiently large data.
## Comparison with other databases
For now please refer to [chapter of "BoltDB comparison with other
databases"](https://github.com/coreos/bbolt#comparison-with-other-databases)
which is also (mostly) applicable to _libmdbx_.
<!-- section-end -->
<!-- section-begin improvements -->
Improvements beyond LMDB
========================
_libmdbx_ is superior to legendary _[LMDB](https://symas.com/lmdb/)_ in
terms of features and reliability, not inferior in performance. In
comparison to _LMDB_, _libmdbx_ make things "just work" perfectly and
out-of-the-box, not silently and catastrophically break down. The list
below is pruned down to the improvements most notable and obvious from
the user's point of view.
## Added Features
1. Keys could be more than 2 times longer than _LMDB_.
> For DB with default page size _libmdbx_ support keys up to 2022 bytes
> and up to 32742 bytes for 64K page size. _LMDB_ allows key size up to
> 511 bytes and may silently loses data with large values.
2. Up to 30% faster than _LMDB_ in [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) benchmarks.
> Benchmarks of the in-[tmpfs](https://en.wikipedia.org/wiki/Tmpfs) scenarios,
> that tests the speed of the engine itself, showned that _libmdbx_ 10-20% faster than _LMDB_,
> and up to 30% faster when _libmdbx_ compiled with specific build options
> which downgrades several runtime checks to be match with LMDB behaviour.
>
> These and other results could be easily reproduced with [ioArena](https://github.com/pmwkaa/ioarena) just by `make bench-quartet` command,
> including comparisons with [RockDB](https://en.wikipedia.org/wiki/RocksDB)
> and [WiredTiger](https://en.wikipedia.org/wiki/WiredTiger).
3. Automatic on-the-fly database size adjustment, both increment and reduction.
> _libmdbx_ manages the database size according to parameters specified
> by `mdbx_env_set_geometry()` function,
> ones include the growth step and the truncation threshold.
>
> Unfortunately, on-the-fly database size adjustment doesn't work under [Wine](https://en.wikipedia.org/wiki/Wine_(software))
> due to its internal limitations and unimplemented functions, i.e. the `MDBX_UNABLE_EXTEND_MAPSIZE` error will be returned.
4. Automatic continuous zero-overhead database compactification.
> During each commit _libmdbx_ merges a freeing pages which adjacent with the unallocated area
> at the end of file, and then truncates unused space when a lot enough of.
5. The same database format for 32- and 64-bit builds.
> _libmdbx_ database format depends only on the [endianness](https://en.wikipedia.org/wiki/Endianness) but not on the [bitness](https://en.wiktionary.org/wiki/bitness).
6. LIFO policy for Garbage Collection recycling. This can significantly increase write performance due write-back disk cache up to several times in a best case scenario.
> LIFO means that for reuse will be taken the latest becomes unused pages.
> Therefore the loop of database pages circulation becomes as short as possible.
> In other words, the set of pages, that are (over)written in memory and on disk during a series of write transactions, will be as small as possible.
> Thus creates ideal conditions for the battery-backed or flash-backed disk cache efficiency.
7. Fast estimation of range query result volume, i.e. how many items can
be found between a `KEY1` and a `KEY2`. This is a prerequisite for build
and/or optimize query execution plans.
> _libmdbx_ performs a rough estimate based on common B-tree pages of the paths from root to corresponding keys.
8. `mdbx_chk` utility for database integrity check.
Since version 0.9.1, the utility supports checking the database using any of the three meta pages and the ability to switch to it.
9. Support for opening databases in the exclusive mode, including on a network share.
10. Zero-length for keys and values.
11. Ability to determine whether the particular data is on a dirty page
or not, that allows to avoid copy-out before updates.
12. Extended information of whole-database, sub-databases, transactions, readers enumeration.
> _libmdbx_ provides a lot of information, including dirty and leftover pages
> for a write transaction, reading lag and holdover space for read transactions.
13. Extended update and delete operations.
> _libmdbx_ allows one _at once_ with getting previous value
> and addressing the particular item from multi-value with the same key.
14. Useful runtime options for tuning engine to application's requirements and use cases specific.
15. Automated steady sync-to-disk upon several thresholds and/or timeout via cheap polling.
16. Sequence generation and three persistent 64-bit markers.
17. Handle-Slow-Readers callback to resolve a database full/overflow issues due to long-lived read transaction(s).
18. Ability to determine whether the cursor is pointed to a key-value
pair, to the first, to the last, or not set to anything.
## Other fixes and specifics
1. Fixed more than 10 significant errors, in particular: page leaks,
wrong sub-database statistics, segfault in several conditions,
nonoptimal page merge strategy, updating an existing record with
a change in data size (including for multimap), etc.
2. All cursors can be reused and should be closed explicitly,
regardless ones were opened within a write or read transaction.
3. Opening database handles are spared from race conditions and
pre-opening is not needed.
4. Returning `MDBX_EMULTIVAL` error in case of ambiguous update or delete.
5. Guarantee of database integrity even in asynchronous unordered write-to-disk mode.
> _libmdbx_ propose additional trade-off by `MDBX_SAFE_NOSYNC` with append-like manner for updates,
> that avoids database corruption after a system crash contrary to LMDB.
> Nevertheless, the `MDBX_UTTERLY_NOSYNC` mode is available to match LMDB's behaviour for `MDB_NOSYNC`.
6. On **MacOS & iOS** the `fcntl(F_FULLFSYNC)` syscall is used _by
default_ to synchronize data with the disk, as this is [the only way to
guarantee data
durability](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/fsync.2.html)
in case of power failure. Unfortunately, in scenarios with high write
intensity, the use of `F_FULLFSYNC` significantly degrades performance
compared to LMDB, where the `fsync()` syscall is used. Therefore,
_libmdbx_ allows you to override this behavior by defining the
`MDBX_OSX_SPEED_INSTEADOF_DURABILITY=1` option while build the library.
7. On **Windows** the `LockFileEx()` syscall is used for locking, since
it allows place the database on network drives, and provides protection
against incompetent user actions (aka
[poka-yoke](https://en.wikipedia.org/wiki/Poka-yoke)). Therefore
_libmdbx_ may be a little lag in performance tests from LMDB where the
named mutexes are used.
<!-- section-end -->
<!-- section-begin history -->
# History
Historically, _libmdbx_ is a deeply revised and extended descendant of the
[Lightning Memory-Mapped Database](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database).
At first the development was carried out within the
[ReOpenLDAP](https://web.archive.org/web/20220414235959/https://github.com/erthink/ReOpenLDAP) project. About a
year later _libmdbx_ was separated into a standalone project, which was
[presented at Highload++ 2015
conference](http://www.highload.ru/2015/abstracts/1831.html).
Since 2017 _libmdbx_ is used in [Fast Positive Tables](https://gitflic.ru/project/erthink/libfpta),
and development is funded by [Positive Technologies](https://www.ptsecurity.com).
On 2022-04-15 the Github administration, without any warning nor
explanation, deleted _libmdbx_ along with a lot of other projects,
simultaneously blocking access for many developers. Therefore on
2022-04-21 we have migrated to a reliable trusted infrastructure.
The origin for now is at [GitFlic](https://gitflic.ru/project/erthink/libmdbx)
with backup at [ABF by ROSA Лаб](https://abf.rosalinux.ru/erthink/libmdbx).
For the same reason ~~Github~~ is blacklisted forever.
## Acknowledgments
Howard Chu <hyc@openldap.org> is the author of LMDB, from which
originated the _libmdbx_ in 2015.
Martin Hedenfalk <martin@bzero.se> is the author of `btree.c` code, which
was used to begin development of LMDB.
<!-- section-end -->
--------------------------------------------------------------------------------
Usage
=====
<!-- section-begin usage -->
Currently, libmdbx is only available in a
[source code](https://en.wikipedia.org/wiki/Source_code) form.
Packages support for common Linux distributions is planned in the future,
since release the version 1.0.
## Source code embedding
_libmdbx_ provides two official ways for integration in source code form:
1. Using the amalgamated source code.
> The amalgamated source code includes all files required to build and
> use _libmdbx_, but not for testing _libmdbx_ itself.
2. Adding the complete original source code as a `git submodule`.
> This allows you to build as _libmdbx_ and testing tool.
> On the other hand, this way requires you to pull git tags, and use C++11 compiler for test tool.
_**Please, avoid using any other techniques.**_ Otherwise, at least
don't ask for support and don't name such chimeras `libmdbx`.
The amalgamated source code could be created from the original clone of git
repository on Linux by executing `make dist`. As a result, the desired
set of files will be formed in the `dist` subdirectory.
## Building and Testing
Both amalgamated and original source code provides build through the use
[CMake](https://cmake.org/) or [GNU
Make](https://www.gnu.org/software/make/) with
[bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)). All build ways
are completely traditional and have minimal prerequirements like
`build-essential`, i.e. the non-obsolete C/C++ compiler and a
[SDK](https://en.wikipedia.org/wiki/Software_development_kit) for the
target platform. Obviously you need building tools itself, i.e. `git`,
`cmake` or GNU `make` with `bash`. For your convenience, `make help`
and `make options` are also available for listing existing targets
and build options respectively.
The only significant specificity is that git' tags are required
to build from complete (not amalgamated) source codes.
Executing **`git fetch --tags --force --prune`** is enough to get ones,
and `--unshallow` or `--update-shallow` is required for shallow cloned case.
So just using CMake or GNU Make in your habitual manner and feel free to
fill an issue or make pull request in the case something will be
unexpected or broken down.
### Testing
The amalgamated source code does not contain any tests for or several reasons.
Please read [the explanation](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/issues/214#issuecomment-870717981) and don't ask to alter this.
So for testing _libmdbx_ itself you need a full source code, i.e. the clone of a git repository, there is no option.
The full source code of _libmdbx_ has a [`test` subdirectory](https://gitflic.ru/project/erthink/libmdbx/tree/master/test) with minimalistic test "framework".
Actually yonder is a source code of the `mdbx_test` console utility which has a set of command-line options that allow construct and run a reasonable enough test scenarios.
This test utility is intended for _libmdbx_'s developers for testing library itself, but not for use by users.
Therefore, only basic information is provided:
- There are few CRUD-based test cases (hill, TTL, nested, append, jitter, etc),
which can be combined to test the concurrent operations within shared database in a multi-processes environment.
This is the `basic` test scenario.
- The `Makefile` provide several self-described targets for testing: `smoke`, `test`, `check`, `memcheck`, `test-valgrind`,
`test-asan`, `test-leak`, `test-ubsan`, `cross-gcc`, `cross-qemu`, `gcc-analyzer`, `smoke-fault`, `smoke-singleprocess`,
`test-singleprocess`, 'long-test'. Please run `make --help` if doubt.
- In addition to the `mdbx_test` utility, there is the script [`long_stochastic.sh`](https://gitflic.ru/project/erthink/libmdbx/blob/master/test/long_stochastic.sh),
which calls `mdbx_test` by going through set of modes and options, with gradually increasing the number of operations and the size of transactions.
This script is used for mostly of all automatic testing, including `Makefile` targets and Continuous Integration.
- Brief information of available command-line options is available by `--help`.
However, you should dive into source code to get all, there is no option.
Anyway, no matter how thoroughly the _libmdbx_ is tested, you should rely only on your own tests for a few reasons:
1. Mostly of all use cases are unique.
So it is no warranty that your use case was properly tested, even the _libmdbx_'s tests engages stochastic approach.
2. If there are problems, then your test on the one hand will help to verify whether you are using _libmdbx_ correctly,
on the other hand it will allow to reproduce the problem and insure against regression in a future.
3. Actually you should rely on than you checked by yourself or take a risk.
### Common important details
#### Build reproducibility
By default _libmdbx_ track build time via `MDBX_BUILD_TIMESTAMP` build option and macro.
So for a [reproducible builds](https://en.wikipedia.org/wiki/Reproducible_builds) you should predefine/override it to known fixed string value.
For instance:
- for reproducible build with make: `make MDBX_BUILD_TIMESTAMP=unknown ` ...
- or during configure by CMake: `cmake -DMDBX_BUILD_TIMESTAMP:STRING=unknown ` ...
Of course, in addition to this, your toolchain must ensure the reproducibility of builds.
For more information please refer to [reproducible-builds.org](https://reproducible-builds.org/).
#### Containers
There are no special traits nor quirks if you use libmdbx ONLY inside the single container.
But in a cross-container cases or with a host-container(s) mix the two major things MUST be
guaranteed:
1. Coherence of memory mapping content and unified page cache inside OS kernel for host and all container(s) operated with a DB.
Basically this means must be only a single physical copy of each memory mapped DB' page in the system memory.
2. Uniqueness of [PID](https://en.wikipedia.org/wiki/Process_identifier) values and/or a common space for ones:
- for POSIX systems: PID uniqueness for all processes operated with a DB.
I.e. the `--pid=host` is required for run DB-aware processes inside Docker,
either without host interaction a `--pid=container:<name|id>` with the same name/id.
- for non-POSIX (i.e. Windows) systems: inter-visibility of processes handles.
I.e. the `OpenProcess(SYNCHRONIZE, ..., PID)` must return reasonable error,
including `ERROR_ACCESS_DENIED`,
but not the `ERROR_INVALID_PARAMETER` as for an invalid/non-existent PID.
#### DSO/DLL unloading and destructors of Thread-Local-Storage objects
When building _libmdbx_ as a shared library or use static _libmdbx_ as a
part of another dynamic library, it is advisable to make sure that your
system ensures the correctness of the call destructors of
Thread-Local-Storage objects when unloading dynamic libraries.
If this is not the case, then unloading a dynamic-link library with
_libmdbx_ code inside, can result in either a resource leak or a crash
due to calling destructors from an already unloaded DSO/DLL object. The
problem can only manifest in a multithreaded application, which makes
the unloading of shared dynamic libraries with _libmdbx_ code inside,
after using _libmdbx_. It is known that TLS-destructors are properly
maintained in the following cases:
- On all modern versions of Windows (Windows 7 and later).
- On systems with the
[`__cxa_thread_atexit_impl()`](https://sourceware.org/glibc/wiki/Destructor%20support%20for%20thread_local%20variables)
function in the standard C library, including systems with GNU libc
version 2.18 and later.
- On systems with libpthread/ntpl from GNU libc with bug fixes
[#21031](https://sourceware.org/bugzilla/show_bug.cgi?id=21031) and
[#21032](https://sourceware.org/bugzilla/show_bug.cgi?id=21032), or
where there are no similar bugs in the pthreads implementation.
### Linux and other platforms with GNU Make
To build the library it is enough to execute `make all` in the directory
of source code, and `make check` to execute the basic tests.
If the `make` installed on the system is not GNU Make, there will be a
lot of errors from make when trying to build. In this case, perhaps you
should use `gmake` instead of `make`, or even `gnu-make`, etc.
### FreeBSD and related platforms
As a rule on BSD and it derivatives the default is to use Berkeley Make and
[Bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) is not installed.
So you need to install the required components: GNU Make, Bash, C and C++
compilers compatible with GCC or CLANG. After that, to build the
library, it is enough to execute `gmake all` (or `make all`) in the
directory with source code, and `gmake check` (or `make check`) to run
the basic tests.
### Windows
For build _libmdbx_ on Windows the _original_ CMake and [Microsoft Visual
Studio 2019](https://en.wikipedia.org/wiki/Microsoft_Visual_Studio) are
recommended. Please use the recent versions of CMake, Visual Studio and Windows
SDK to avoid troubles with C11 support and `alignas()` feature.
For build by MinGW the 10.2 or recent version coupled with a modern CMake are required.
So it is recommended to use [chocolatey](https://chocolatey.org/) to install and/or update the ones.
Another ways to build is potentially possible but not supported and will not.
The `CMakeLists.txt` or `GNUMakefile` scripts will probably need to be modified accordingly.
Using other methods do not forget to add the `ntdll.lib` to linking.
It should be noted that in _libmdbx_ was efforts to avoid
runtime dependencies from CRT and other MSVC libraries.
For this is enough to pass the `-DMDBX_WITHOUT_MSVC_CRT:BOOL=ON` option
during configure by CMake.
An example of running a basic test script can be found in the
[CI-script](appveyor.yml) for [AppVeyor](https://www.appveyor.com/). To
run the [long stochastic test scenario](test/long_stochastic.sh),
[bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) is required, and
such testing is recommended with placing the test data on the
[RAM-disk](https://en.wikipedia.org/wiki/RAM_drive).
### Windows Subsystem for Linux
_libmdbx_ could be used in [WSL2](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux#WSL_2)
but NOT in [WSL1](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux#WSL_1) environment.
This is a consequence of the fundamental shortcomings of _WSL1_ and cannot be fixed.
To avoid data loss, _libmdbx_ returns the `ENOLCK` (37, "No record locks available")
error when opening the database in a _WSL1_ environment.
### MacOS
Current [native build tools](https://en.wikipedia.org/wiki/Xcode) for
MacOS include GNU Make, CLANG and an outdated version of Bash.
Therefore, to build the library, it is enough to run `make all` in the
directory with source code, and run `make check` to execute the base
tests. If something goes wrong, it is recommended to install
[Homebrew](https://brew.sh/) and try again.
To run the [long stochastic test scenario](test/long_stochastic.sh), you
will need to install the current (not outdated) version of
[Bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)). To do this, we
recommend that you install [Homebrew](https://brew.sh/) and then execute
`brew install bash`.
### Android
We recommend using CMake to build _libmdbx_ for Android.
Please refer to the [official guide](https://developer.android.com/studio/projects/add-native-code).
### iOS
To build _libmdbx_ for iOS, we recommend using CMake with the
["toolchain file"](https://cmake.org/cmake/help/latest/variable/CMAKE_TOOLCHAIN_FILE.html)
from the [ios-cmake](https://github.com/leetal/ios-cmake) project.
<!-- section-end -->
## API description
Please refer to the online [_libmdbx_ API reference](https://libmdbx.dqdkfa.ru/docs)
and/or see the [mdbx.h++](mdbx.h%2B%2B) and [mdbx.h](mdbx.h) headers.
<!-- section-begin bindings -->
Bindings
========
| Runtime | Repo | Author |
| ------- | ------ | ------ |
| Scala | [mdbx4s](https://github.com/david-bouyssie/mdbx4s) | [David Bouyssié](https://github.com/david-bouyssie) |
| Haskell | [libmdbx-hs](https://hackage.haskell.org/package/libmdbx) | [Francisco Vallarino](https://github.com/fjvallarino) |
| NodeJS, [Deno](https://deno.land/) | [lmdbx-js](https://github.com/kriszyp/lmdbx-js) | [Kris Zyp](https://github.com/kriszyp/)
| NodeJS | [node-mdbx](https://www.npmjs.com/package/node-mdbx/) | [Сергей Федотов](mailto:sergey.fedotov@corp.mail.ru) |
| Ruby | [ruby-mdbx](https://rubygems.org/gems/mdbx/) | [Mahlon E. Smith](https://github.com/mahlonsmith) |
| Go | [mdbx-go](https://github.com/torquem-ch/mdbx-go) | [Alex Sharov](https://github.com/AskAlexSharov) |
| [Nim](https://en.wikipedia.org/wiki/Nim_(programming_language)) | [NimDBX](https://github.com/snej/nimdbx) | [Jens Alfke](https://github.com/snej)
| Lua | [lua-libmdbx](https://github.com/mah0x211/lua-libmdbx) | [Masatoshi Fukunaga](https://github.com/mah0x211) |
| Rust | [libmdbx-rs](https://github.com/vorot93/libmdbx-rs) | [Artem Vorotnikov](https://github.com/vorot93) |
| Rust | [mdbx](https://crates.io/crates/mdbx) | [gcxfd](https://github.com/gcxfd) |
| Java | [mdbxjni](https://github.com/castortech/mdbxjni) | [Castor Technologies](https://castortech.com/) |
| Python (draft) | [python-bindings](https://web.archive.org/web/20220414235959/https://github.com/erthink/libmdbx/commits/python-bindings) branch | [Noel Kuntze](https://github.com/Thermi)
| .NET (obsolete) | [mdbx.NET](https://github.com/wangjia184/mdbx.NET) | [Jerry Wang](https://github.com/wangjia184) |
<!-- section-end -->
--------------------------------------------------------------------------------
<!-- section-begin performance -->
Performance comparison
======================
All benchmarks were done in 2015 by [IOArena](https://github.com/pmwkaa/ioarena)
and multiple [scripts](https://github.com/pmwkaa/ioarena/tree/HL%2B%2B2015)
runs on Lenovo Carbon-2 laptop, i7-4600U 2.1 GHz (2 physical cores, 4 HyperThreading cores), 8 Gb RAM,
SSD SAMSUNG MZNTD512HAGL-000L1 (DXT23L0Q) 512 Gb.
## Integral performance
Here showed sum of performance metrics in 3 benchmarks:
- Read/Search on the machine with 4 logical CPUs in HyperThreading mode (i.e. actually 2 physical CPU cores);
- Transactions with [CRUD](https://en.wikipedia.org/wiki/CRUD)
operations in sync-write mode (fdatasync is called after each
transaction);
- Transactions with [CRUD](https://en.wikipedia.org/wiki/CRUD)
operations in lazy-write mode (moment to sync data to persistent storage
is decided by OS).
*Reasons why asynchronous mode isn't benchmarked here:*
1. It doesn't make sense as it has to be done with DB engines, oriented
for keeping data in memory e.g. [Tarantool](https://tarantool.io/),
[Redis](https://redis.io/)), etc.
2. Performance gap is too high to compare in any meaningful way.
![Comparison #1: Integral Performance](https://libmdbx.dqdkfa.ru/img/perf-slide-1.png)
--------------------------------------------------------------------------------
## Read Scalability
Summary performance with concurrent read/search queries in 1-2-4-8
threads on the machine with 4 logical CPUs in HyperThreading mode (i.e. actually 2 physical CPU cores).
![Comparison #2: Read Scalability](https://libmdbx.dqdkfa.ru/img/perf-slide-2.png)
--------------------------------------------------------------------------------
## Sync-write mode
- Linear scale on left and dark rectangles mean arithmetic mean
transactions per second;
- Logarithmic scale on right is in seconds and yellow intervals mean
execution time of transactions. Each interval shows minimal and maximum
execution time, cross marks standard deviation.
**10,000 transactions in sync-write mode**. In case of a crash all data
is consistent and conforms to the last successful transaction. The
[fdatasync](https://linux.die.net/man/2/fdatasync) syscall is used after
each write transaction in this mode.
In the benchmark each transaction contains combined CRUD operations (2
inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database
and after full run the database contains 10,000 small key-value records.
![Comparison #3: Sync-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-3.png)
--------------------------------------------------------------------------------
## Lazy-write mode
- Linear scale on left and dark rectangles mean arithmetic mean of
thousands transactions per second;
- Logarithmic scale on right in seconds and yellow intervals mean
execution time of transactions. Each interval shows minimal and maximum
execution time, cross marks standard deviation.
**100,000 transactions in lazy-write mode**. In case of a crash all data
is consistent and conforms to the one of last successful transactions, but
transactions after it will be lost. Other DB engines use
[WAL](https://en.wikipedia.org/wiki/Write-ahead_logging) or transaction
journal for that, which in turn depends on order of operations in the
journaled filesystem. _libmdbx_ doesn't use WAL and hands I/O operations
to filesystem and OS kernel (mmap).
In the benchmark each transaction contains combined CRUD operations (2
inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database
and after full run the database contains 100,000 small key-value
records.
![Comparison #4: Lazy-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-4.png)
--------------------------------------------------------------------------------
## Async-write mode
- Linear scale on left and dark rectangles mean arithmetic mean of
thousands transactions per second;
- Logarithmic scale on right in seconds and yellow intervals mean
execution time of transactions. Each interval shows minimal and maximum
execution time, cross marks standard deviation.
**1,000,000 transactions in async-write mode**.
In case of a crash all data is consistent and conforms to the one of last successful transactions,
but lost transaction count is much higher than in
lazy-write mode. All DB engines in this mode do as little writes as
possible on persistent storage. _libmdbx_ uses
[msync(MS_ASYNC)](https://linux.die.net/man/2/msync) in this mode.
In the benchmark each transaction contains combined CRUD operations (2
inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database
and after full run the database contains 10,000 small key-value records.
![Comparison #5: Async-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-5.png)
--------------------------------------------------------------------------------
## Cost comparison
Summary of used resources during lazy-write mode benchmarks:
- Read and write IOPs;
- Sum of user CPU time and sys CPU time;
- Used space on persistent storage after the test and closed DB, but not
waiting for the end of all internal housekeeping operations (LSM
compactification, etc).
_ForestDB_ is excluded because benchmark showed it's resource
consumption for each resource (CPU, IOPs) much higher than other engines
which prevents to meaningfully compare it with them.
All benchmark data is gathered by
[getrusage()](http://man7.org/linux/man-pages/man2/getrusage.2.html)
syscall and by scanning the data directory.
![Comparison #6: Cost comparison](https://libmdbx.dqdkfa.ru/img/perf-slide-6.png)
<!-- section-end -->

View File

@ -0,0 +1 @@
0.11.8.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,53 @@
## Copyright (c) 2012-2022 Leonid Yuriev <leo@yuriev.ru>.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
if(CMAKE_VERSION VERSION_LESS 3.12)
cmake_minimum_required(VERSION 3.8.2)
else()
cmake_minimum_required(VERSION 3.12)
endif()
cmake_policy(PUSH)
cmake_policy(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION})
include(CheckLibraryExists)
check_library_exists(gcov __gcov_flush "" HAVE_GCOV)
option(ENABLE_GCOV
"Enable integration with gcov, a code coverage program" OFF)
option(ENABLE_GPROF
"Enable integration with gprof, a performance analyzing tool" OFF)
if(CMAKE_CXX_COMPILER_LOADED)
include(CheckIncludeFileCXX)
check_include_file_cxx(valgrind/memcheck.h HAVE_VALGRIND_MEMCHECK_H)
else()
include(CheckIncludeFile)
check_include_file(valgrind/memcheck.h HAVE_VALGRIND_MEMCHECK_H)
endif()
option(MDBX_USE_VALGRIND "Enable integration with valgrind, a memory analyzing tool" OFF)
if(MDBX_USE_VALGRIND AND NOT HAVE_VALGRIND_MEMCHECK_H)
message(FATAL_ERROR "MDBX_USE_VALGRIND option is set but valgrind/memcheck.h is not found")
endif()
option(ENABLE_ASAN
"Enable AddressSanitizer, a fast memory error detector based on compiler instrumentation" OFF)
option(ENABLE_UBSAN
"Enable UndefinedBehaviorSanitizer, a fast undefined behavior detector based on compiler instrumentation" OFF)
cmake_policy(POP)

View File

@ -0,0 +1,284 @@
## Copyright (c) 2012-2022 Leonid Yuriev <leo@yuriev.ru>.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
if(CMAKE_VERSION VERSION_LESS 3.12)
cmake_minimum_required(VERSION 3.8.2)
else()
cmake_minimum_required(VERSION 3.12)
endif()
cmake_policy(PUSH)
cmake_policy(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION})
macro(add_compile_flags languages)
foreach(_lang ${languages})
string(REPLACE ";" " " _flags "${ARGN}")
if(CMAKE_CXX_COMPILER_LOADED AND _lang STREQUAL "CXX")
set("${_lang}_FLAGS" "${${_lang}_FLAGS} ${_flags}")
endif()
if(CMAKE_C_COMPILER_LOADED AND _lang STREQUAL "C")
set("${_lang}_FLAGS" "${${_lang}_FLAGS} ${_flags}")
endif()
endforeach()
unset(_lang)
unset(_flags)
endmacro(add_compile_flags)
macro(remove_flag varname flag)
string(REGEX REPLACE "^(.*)( ${flag} )(.*)$" "\\1 \\3" ${varname} ${${varname}})
string(REGEX REPLACE "^((.+ )*)(${flag})(( .+)*)$" "\\1\\4" ${varname} ${${varname}})
endmacro(remove_flag)
macro(remove_compile_flag languages flag)
foreach(_lang ${languages})
if(CMAKE_CXX_COMPILER_LOADED AND _lang STREQUAL "CXX")
remove_flag(${_lang}_FLAGS ${flag})
endif()
if(CMAKE_C_COMPILER_LOADED AND _lang STREQUAL "C")
remove_flag(${_lang}_FLAGS ${flag})
endif()
endforeach()
unset(_lang)
endmacro(remove_compile_flag)
macro(set_source_files_compile_flags)
foreach(file ${ARGN})
get_filename_component(_file_ext ${file} EXT)
set(_lang "")
if("${_file_ext}" STREQUAL ".m")
set(_lang OBJC)
# CMake believes that Objective C is a flavor of C++, not C,
# and uses g++ compiler for .m files.
# LANGUAGE property forces CMake to use CC for ${file}
set_source_files_properties(${file} PROPERTIES LANGUAGE C)
elseif("${_file_ext}" STREQUAL ".mm")
set(_lang OBJCXX)
endif()
if(_lang)
get_source_file_property(_flags ${file} COMPILE_FLAGS)
if("${_flags}" STREQUAL "NOTFOUND")
set(_flags "${CMAKE_${_lang}_FLAGS}")
else()
set(_flags "${_flags} ${CMAKE_${_lang}_FLAGS}")
endif()
# message(STATUS "Set (${file} ${_flags}")
set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS
"${_flags}")
endif()
endforeach()
unset(_file_ext)
unset(_lang)
endmacro(set_source_files_compile_flags)
macro(fetch_version name source_root_directory parent_scope)
set(${name}_VERSION "")
set(${name}_GIT_DESCRIBE "")
set(${name}_GIT_TIMESTAMP "")
set(${name}_GIT_TREE "")
set(${name}_GIT_COMMIT "")
set(${name}_GIT_REVISION 0)
set(${name}_GIT_VERSION "")
if(GIT AND EXISTS "${source_root_directory}/.git")
execute_process(COMMAND ${GIT} show --no-patch --format=%cI HEAD
OUTPUT_VARIABLE ${name}_GIT_TIMESTAMP
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_TIMESTAMP" STREQUAL "%cI")
execute_process(COMMAND ${GIT} show --no-patch --format=%ci HEAD
OUTPUT_VARIABLE ${name}_GIT_TIMESTAMP
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_TIMESTAMP" STREQUAL "%ci")
message(FATAL_ERROR "Please install latest version of git (`show --no-patch --format=%cI HEAD` failed)")
endif()
endif()
execute_process(COMMAND ${GIT} show --no-patch --format=%T HEAD
OUTPUT_VARIABLE ${name}_GIT_TREE
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_TREE" STREQUAL "")
message(FATAL_ERROR "Please install latest version of git (`show --no-patch --format=%T HEAD` failed)")
endif()
execute_process(COMMAND ${GIT} show --no-patch --format=%H HEAD
OUTPUT_VARIABLE ${name}_GIT_COMMIT
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_COMMIT" STREQUAL "")
message(FATAL_ERROR "Please install latest version of git (`show --no-patch --format=%H HEAD` failed)")
endif()
execute_process(COMMAND ${GIT} rev-list --tags --count
OUTPUT_VARIABLE tag_count
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc)
message(FATAL_ERROR "Please install latest version of git (`git rev-list --tags --count` failed)")
endif()
if(tag_count EQUAL 0)
execute_process(COMMAND ${GIT} rev-list --all --count
OUTPUT_VARIABLE whole_count
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc)
message(FATAL_ERROR "Please install latest version of git (`git rev-list --all --count` failed)")
endif()
if(whole_count GREATER 42)
message(FATAL_ERROR "Please fetch tags (no any tags for ${whole_count} commits)")
endif()
set(${name}_GIT_VERSION "0;0;0")
execute_process(COMMAND ${GIT} rev-list --count --all --no-merges
OUTPUT_VARIABLE ${name}_GIT_REVISION
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_REVISION" STREQUAL "")
message(FATAL_ERROR "Please install latest version of git (`rev-list --count --all --no-merges` failed)")
endif()
else(tag_count EQUAL 0)
execute_process(COMMAND ${GIT} describe --tags --long --dirty=-dirty "--match=v[0-9]*"
OUTPUT_VARIABLE ${name}_GIT_DESCRIBE
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_DESCRIBE" STREQUAL "")
if(_whole_count GREATER 42)
message(FATAL_ERROR "Please fetch tags (`describe --tags --long --dirty --match=v[0-9]*` failed)")
else()
execute_process(COMMAND ${GIT} describe --all --long --dirty=-dirty
OUTPUT_VARIABLE ${name}_GIT_DESCRIBE
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_DESCRIBE" STREQUAL "")
message(FATAL_ERROR "Please install latest version of git (`git rev-list --tags --count` and/or `git rev-list --all --count` failed)")
endif()
endif()
endif()
execute_process(COMMAND ${GIT} describe --tags --abbrev=0 "--match=v[0-9]*"
OUTPUT_VARIABLE last_release_tag
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc)
message(FATAL_ERROR "Please install latest version of git (`describe --tags --abbrev=0 --match=v[0-9]*` failed)")
endif()
if (last_release_tag)
set(git_revlist_arg "${last_release_tag}..HEAD")
else()
execute_process(COMMAND ${GIT} tag --sort=-version:refname
OUTPUT_VARIABLE tag_list
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc)
message(FATAL_ERROR "Please install latest version of git (`tag --sort=-version:refname` failed)")
endif()
string(REGEX REPLACE "\n" ";" tag_list "${tag_list}")
set(git_revlist_arg "HEAD")
foreach(tag IN LISTS tag_list)
if(NOT last_release_tag)
string(REGEX MATCH "^v[0-9]+(\.[0-9]+)+" last_release_tag "${tag}")
set(git_revlist_arg "${tag}..HEAD")
endif()
endforeach(tag)
endif()
execute_process(COMMAND ${GIT} rev-list --count "${git_revlist_arg}"
OUTPUT_VARIABLE ${name}_GIT_REVISION
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${source_root_directory}
RESULT_VARIABLE rc)
if(rc OR "${name}_GIT_REVISION" STREQUAL "")
message(FATAL_ERROR "Please install latest version of git (`rev-list --count ${git_revlist_arg}` failed)")
endif()
string(REGEX MATCH "^(v)?([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)?" git_version_valid "${${name}_GIT_DESCRIBE}")
if(git_version_valid)
string(REGEX REPLACE "^(v)?([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)?" "\\2;\\3;\\4" ${name}_GIT_VERSION ${${name}_GIT_DESCRIBE})
else()
string(REGEX MATCH "^(v)?([0-9]+)\\.([0-9]+)(.*)?" git_version_valid "${${name}_GIT_DESCRIBE}")
if(git_version_valid)
string(REGEX REPLACE "^(v)?([0-9]+)\\.([0-9]+)(.*)?" "\\2;\\3;0" ${name}_GIT_VERSION ${${name}_GIT_DESCRIBE})
else()
message(AUTHOR_WARNING "Bad ${name} version \"${${name}_GIT_DESCRIBE}\"; falling back to 0.0.0 (have you made an initial release?)")
set(${name}_GIT_VERSION "0;0;0")
endif()
endif()
endif(tag_count EQUAL 0)
endif()
if(NOT ${name}_GIT_VERSION OR NOT ${name}_GIT_TIMESTAMP OR ${name}_GIT_REVISION STREQUAL "")
if(GIT AND EXISTS "${source_root_directory}/.git")
message(WARNING "Unable to retrieve ${name} version from git.")
endif()
set(${name}_GIT_VERSION "0;0;0;0")
set(${name}_GIT_TIMESTAMP "")
set(${name}_GIT_REVISION 0)
# Try to get version from VERSION file
set(version_file "${source_root_directory}/VERSION.txt")
if(NOT EXISTS "${version_file}")
set(version_file "${source_root_directory}/VERSION")
endif()
if(EXISTS "${version_file}")
file(STRINGS "${version_file}" ${name}_VERSION LIMIT_COUNT 1 LIMIT_INPUT 42)
endif()
if(NOT ${name}_VERSION)
message(WARNING "Unable to retrieve ${name} version from \"${version_file}\" file.")
set(${name}_VERSION_LIST ${${name}_GIT_VERSION})
string(REPLACE ";" "." ${name}_VERSION "${${name}_GIT_VERSION}")
else()
string(REPLACE "." ";" ${name}_VERSION_LIST ${${name}_VERSION})
endif()
else()
list(APPEND ${name}_GIT_VERSION ${${name}_GIT_REVISION})
set(${name}_VERSION_LIST ${${name}_GIT_VERSION})
string(REPLACE ";" "." ${name}_VERSION "${${name}_GIT_VERSION}")
endif()
list(GET ${name}_VERSION_LIST 0 "${name}_VERSION_MAJOR")
list(GET ${name}_VERSION_LIST 1 "${name}_VERSION_MINOR")
list(GET ${name}_VERSION_LIST 2 "${name}_VERSION_RELEASE")
list(GET ${name}_VERSION_LIST 3 "${name}_VERSION_REVISION")
if(${parent_scope})
set(${name}_VERSION_MAJOR "${${name}_VERSION_MAJOR}" PARENT_SCOPE)
set(${name}_VERSION_MINOR "${${name}_VERSION_MINOR}" PARENT_SCOPE)
set(${name}_VERSION_RELEASE "${${name}_VERSION_RELEASE}" PARENT_SCOPE)
set(${name}_VERSION_REVISION "${${name}_VERSION_REVISION}" PARENT_SCOPE)
set(${name}_VERSION "${${name}_VERSION}" PARENT_SCOPE)
set(${name}_GIT_DESCRIBE "${${name}_GIT_DESCRIBE}" PARENT_SCOPE)
set(${name}_GIT_TIMESTAMP "${${name}_GIT_TIMESTAMP}" PARENT_SCOPE)
set(${name}_GIT_TREE "${${name}_GIT_TREE}" PARENT_SCOPE)
set(${name}_GIT_COMMIT "${${name}_GIT_COMMIT}" PARENT_SCOPE)
set(${name}_GIT_REVISION "${${name}_GIT_REVISION}" PARENT_SCOPE)
set(${name}_GIT_VERSION "${${name}_GIT_VERSION}" PARENT_SCOPE)
endif()
endmacro(fetch_version)
cmake_policy(POP)

View File

@ -0,0 +1,63 @@
/* This is CMake-template for libmdbx's config.h
******************************************************************************/
/* *INDENT-OFF* */
/* clang-format off */
#cmakedefine LTO_ENABLED
#cmakedefine MDBX_USE_VALGRIND
#cmakedefine ENABLE_GPROF
#cmakedefine ENABLE_GCOV
#cmakedefine ENABLE_ASAN
#cmakedefine ENABLE_UBSAN
#cmakedefine01 MDBX_FORCE_ASSERTIONS
/* Common */
#cmakedefine01 MDBX_TXN_CHECKOWNER
#cmakedefine MDBX_ENV_CHECKPID_AUTO
#ifndef MDBX_ENV_CHECKPID_AUTO
#cmakedefine01 MDBX_ENV_CHECKPID
#endif
#cmakedefine MDBX_LOCKING_AUTO
#ifndef MDBX_LOCKING_AUTO
#cmakedefine MDBX_LOCKING @MDBX_LOCKING@
#endif
#cmakedefine MDBX_TRUST_RTC_AUTO
#ifndef MDBX_TRUST_RTC_AUTO
#cmakedefine01 MDBX_TRUST_RTC
#endif
#cmakedefine01 MDBX_DISABLE_PAGECHECKS
/* Windows */
#cmakedefine01 MDBX_WITHOUT_MSVC_CRT
/* MacOS & iOS */
#cmakedefine01 MDBX_OSX_SPEED_INSTEADOF_DURABILITY
/* POSIX */
#cmakedefine01 MDBX_DISABLE_GNU_SOURCE
#cmakedefine MDBX_USE_OFDLOCKS_AUTO
#ifndef MDBX_USE_OFDLOCKS_AUTO
#cmakedefine01 MDBX_USE_OFDLOCKS
#endif
/* Build Info */
#ifndef MDBX_BUILD_TIMESTAMP
#cmakedefine MDBX_BUILD_TIMESTAMP "@MDBX_BUILD_TIMESTAMP@"
#endif
#ifndef MDBX_BUILD_TARGET
#cmakedefine MDBX_BUILD_TARGET "@MDBX_BUILD_TARGET@"
#endif
#ifndef MDBX_BUILD_TYPE
#cmakedefine MDBX_BUILD_TYPE "@MDBX_BUILD_TYPE@"
#endif
#ifndef MDBX_BUILD_COMPILER
#cmakedefine MDBX_BUILD_COMPILER "@MDBX_BUILD_COMPILER@"
#endif
#ifndef MDBX_BUILD_FLAGS
#cmakedefine MDBX_BUILD_FLAGS "@MDBX_BUILD_FLAGS@"
#endif
#cmakedefine MDBX_BUILD_SOURCERY @MDBX_BUILD_SOURCERY@
/* *INDENT-ON* */
/* clang-format on */

View File

@ -0,0 +1,99 @@
.\" Copyright 2015-2022 Leonid Yuriev <leo@yuriev.ru>.
.\" Copying restrictions apply. See COPYRIGHT/LICENSE.
.TH MDBX_CHK 1 "2022-04-22" "MDBX 0.11.8"
.SH NAME
mdbx_chk \- MDBX checking tool
.SH SYNOPSIS
.B mdbx_chk
[\c
.BR \-V ]
[\c
.BR \-v [ v [ v ]]]
[\c
.BR \-n ]
[\c
.BR \-q ]
[\c
.BR \-c ]
[\c
.BR \-w ]
[\c
.BR \-d ]
[\c
.BR \-i ]
[\c
.BI \-s \ subdb\fR]
.BR \ dbpath
.SH DESCRIPTION
The
.B mdbx_chk
utility intended to check an MDBX database file.
.SH OPTIONS
.TP
.BR \-V
Write the library version number to the standard output, and exit.
.TP
.BR \-v
Produce verbose output, including summarize space and page usage statistics.
If \fB\-vv\fP is given, be more verbose, show summarized B-tree info
and space allocation.
If \fB\-vvv\fP is given, be more verbose, include summarized statistics
of leaf B-tree pages.
If \fB\-vvvv\fP is given, be even more verbose, show info of each page
during B-tree traversal and basic info of each GC record.
If \fB\-vvvvv\fP is given, turn maximal verbosity, display the full list
of page IDs in the GC records and size of each key-value pair of database(s).
.TP
.BR \-q
Be quiet; do not output anything even if an error was detected.
.TP
.BR \-c
Force using cooperative mode while opening environment, i.e. don't try to open
in exclusive/monopolistic mode. Only exclusive/monopolistic mode allow complete
check, including full check of all meta-pages and actual size of database file.
.TP
.BR \-w
Open environment in read-write mode and lock for writing while checking.
This could be impossible if environment already used by another process(s)
in an incompatible read-write mode. This allow rollback to last steady commit
(in case environment was not closed properly) and then check transaction IDs
of meta-pages. Otherwise, without \fB\-w\fP option environment will be
opened in read-only mode.
.TP
.BR \-d
Disable page-by-page traversal of B-tree. In this case, without B-tree
traversal, it is unable to check for lost-unused pages nor for double-used
pages.
.TP
.BR \-i
Ignore wrong order errors, which will likely false-positive if custom
comparator(s) was used.
.TP
.BR \-s \ subdb
Verify and show info only for a specific subdatabase.
.TP
.BR \-0 | \-1 | \-2
Using specific meta-page 0, or 2 for checking.
.TP
.BR \-t
Turn to a specified meta-page on successful check.
.TP
.BR \-T
Turn to a specified meta-page EVEN ON UNSUCCESSFUL CHECK!
.TP
.BR \-n
Open MDBX environment(s) which do not use subdirectories.
This is legacy option. For now MDBX handles this automatically.
.SH DIAGNOSTICS
Exit status is zero if no errors occur. Errors result in a non-zero exit status
and a diagnostic message being written to standard error
if no quiet mode was requested.
.SH "SEE ALSO"
.BR mdbx_stat (1),
.BR mdbx_copy (1),
.BR mdbx_dump (1),
.BR mdbx_load (1)
.BR mdbx_drop (1)
.SH AUTHOR
Leonid Yuriev <https://gitflic.ru/user/erthink>

View File

@ -0,0 +1,68 @@
.\" Copyright 2015-2022 Leonid Yuriev <leo@yuriev.ru>.
.\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved.
.\" Copyright 2015,2016 Peter-Service R&D LLC <http://billing.ru/>.
.\" Copying restrictions apply. See COPYRIGHT/LICENSE.
.TH MDBX_COPY 1 "2022-04-22" "MDBX 0.11.8"
.SH NAME
mdbx_copy \- MDBX environment copy tool
.SH SYNOPSIS
.B mdbx_copy
[\c
.BR \-V ]
[\c
.BR \-q ]
[\c
.BR \-c ]
[\c
.BR \-n ]
.B src_path
[\c
.BR dest_path ]
.SH DESCRIPTION
The
.B mdbx_copy
utility copies an MDBX environment. The environment can
be copied regardless of whether it is currently in use.
No lockfile is created, since it gets recreated at need.
If
.I dest_path
is specified it must be the path of an empty directory
for storing the backup. Otherwise, the backup will be
written to stdout.
.SH OPTIONS
.TP
.BR \-V
Write the library version number to the standard output, and exit.
.TP
.BR \-q
Be quiet.
.TP
.BR \-c
Compact while copying. Only current data pages will be copied; freed
or unused pages will be omitted from the copy. This option will
slow down the backup process as it is more CPU-intensive.
Currently it fails if the environment has suffered a page leak.
.TP
.BR \-n
Open MDBX environment(s) which do not use subdirectories.
This is legacy option. For now MDBX handles this automatically.
.SH DIAGNOSTICS
Exit status is zero if no errors occur.
Errors result in a non-zero exit status and
a diagnostic message being written to standard error.
.SH CAVEATS
This utility can trigger significant file size growth if run
in parallel with write transactions, because pages which they
free during copying cannot be reused until the copy is done.
.SH "SEE ALSO"
.BR mdbx_dump (1),
.BR mdbx_chk (1),
.BR mdbx_stat (1),
.BR mdbx_load (1)
.BR mdbx_drop (1)
.SH AUTHOR
Howard Chu of Symas Corporation <http://www.symas.com>,
Leonid Yuriev <https://gitflic.ru/user/erthink>

View File

@ -0,0 +1,48 @@
.\" Copyright 2021-2022 Leonid Yuriev <leo@yuriev.ru>.
.\" Copyright 2014-2021 Howard Chu, Symas Corp. All Rights Reserved.
.\" Copying restrictions apply. See COPYRIGHT/LICENSE.
.TH MDBX_DROP 1 "2022-04-22" "MDBX 0.11.8"
.SH NAME
mdbx_drop \- MDBX database delete tool
.SH SYNOPSIS
.B mdbx_drop
[\c
.BR \-V ]
[\c
.BR \-d ]
[\c
.BI \-s \ subdb\fR]
[\c
.BR \-n ]
.BR \ dbpath
.SH DESCRIPTION
The
.B mdbx_drop
utility empties or deletes a database in the specified
environment.
.SH OPTIONS
.TP
.BR \-V
Write the library version number to the standard output, and exit.
.TP
.BR \-d
Delete the specified database, don't just empty it.
.TP
.BR \-s \ subdb
Operate on a specific subdatabase. If no database is specified, only the main database is dropped.
.TP
.BR \-n
Dump an MDBX database which does not use subdirectories.
This is legacy option. For now MDBX handles this automatically.
.SH DIAGNOSTICS
Exit status is zero if no errors occur.
Errors result in a non-zero exit status and
a diagnostic message being written to standard error.
.SH "SEE ALSO"
.BR mdbx_load (1),
.BR mdbx_copy (1),
.BR mdbx_chk (1),
.BR mdbx_stat (1)
.SH AUTHOR
Howard Chu of Symas Corporation <http://www.symas.com>

View File

@ -0,0 +1,94 @@
.\" Copyright 2015-2022 Leonid Yuriev <leo@yuriev.ru>.
.\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved.
.\" Copyright 2015,2016 Peter-Service R&D LLC <http://billing.ru/>.
.\" Copying restrictions apply. See COPYRIGHT/LICENSE.
.TH MDBX_DUMP 1 "2022-04-22" "MDBX 0.11.8"
.SH NAME
mdbx_dump \- MDBX environment export tool
.SH SYNOPSIS
.B mdbx_dump
[\c
.BR \-V ]
[\c
.BR \-q ]
[\c
.BI \-f \ file\fR]
[\c
.BR \-l ]
[\c
.BR \-p ]
[\c
.BR \-a \ |
.BI \-s \ subdb\fR]
[\c
.BR \-r ]
[\c
.BR \-n ]
.BR \ dbpath
.SH DESCRIPTION
The
.B mdbx_dump
utility reads a database and writes its contents to the
standard output using a portable flat-text format
understood by the
.BR mdbx_load (1)
utility.
.SH OPTIONS
.TP
.BR \-V
Write the library version number to the standard output, and exit.
.TP
.BR \-q
Be quiet.
.TP
.BR \-f \ file
Write to the specified file instead of to the standard output.
.TP
.BR \-l
List the databases stored in the environment. Just the
names will be listed, no data will be output.
.TP
.BR \-p
If characters in either the key or data items are printing characters (as
defined by isprint(3)), output them directly. This option permits users to
use standard text editors and tools to modify the contents of databases.
Note: different systems may have different notions about what characters
are considered printing characters, and databases dumped in this manner may
be less portable to external systems.
.TP
.BR \-a
Dump all of the subdatabases in the environment.
.TP
.BR \-s \ subdb
Dump a specific subdatabase. If no database is specified, only the main database is dumped.
.TP
.BR \-r
Rescure mode. Ignore some errors to dump corrupted DB.
.TP
.BR \-n
Dump an MDBX database which does not use subdirectories.
This is legacy option. For now MDBX handles this automatically.
.SH DIAGNOSTICS
Exit status is zero if no errors occur.
Errors result in a non-zero exit status and
a diagnostic message being written to standard error.
Dumping and reloading databases that use user-defined comparison functions
will result in new databases that use the default comparison functions.
\fBIn this case it is quite likely that the reloaded database will be
damaged beyond repair permitting neither record storage nor retrieval.\fP
The only available workaround is to modify the source for the
.BR mdbx_load (1)
utility to load the database using the correct comparison functions.
.SH "SEE ALSO"
.BR mdbx_load (1),
.BR mdbx_copy (1),
.BR mdbx_chk (1),
.BR mdbx_stat (1)
.BR mdbx_drop (1)
.SH AUTHOR
Howard Chu of Symas Corporation <http://www.symas.com>,
Leonid Yuriev <https://gitflic.ru/user/erthink>

View File

@ -0,0 +1,105 @@
.\" Copyright 2015-2022 Leonid Yuriev <leo@yuriev.ru>.
.\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved.
.\" Copyright 2015,2016 Peter-Service R&D LLC <http://billing.ru/>.
.\" Copying restrictions apply. See COPYRIGHT/LICENSE.
.TH MDBX_LOAD 1 "2022-04-22" "MDBX 0.11.8"
.SH NAME
mdbx_load \- MDBX environment import tool
.SH SYNOPSIS
.B mdbx_load
[\c
.BR \-V ]
[\c
.BR \-q ]
[\c
.BR \-a ]
[\c
.BI \-f \ file\fR]
[\c
.BI \-s \ subdb\fR]
[\c
.BR \-N ]
[\c
.BR \-T ]
[\c
.BR \-r ]
[\c
.BR \-n ]
.BR \ dbpath
.SH DESCRIPTION
The
.B mdbx_load
utility reads from the standard input and loads it into the
MDBX environment
.BR dbpath .
The input to
.B mdbx_load
must be in the output format specified by the
.BR mdbx_dump (1)
utility or as specified by the
.B -T
option below.
A simple escape mechanism, where newline and backslash (\\) characters are special, is
applied to the text input. Newline characters are interpreted as record separators.
Backslash characters in the text will be interpreted in one of two ways: If the backslash
character precedes another backslash character, the pair will be interpreted as a literal
backslash. If the backslash character precedes any other character, the two characters
following the backslash will be interpreted as a hexadecimal specification of a single
character; for example, \\0a is a newline character in the ASCII character set.
For this reason, any backslash or newline characters that naturally occur in the text
input must be escaped to avoid misinterpretation by
.BR mdbx_load .
.SH OPTIONS
.TP
.BR \-V
Write the library version number to the standard output, and exit.
.TP
.BR \-q
Be quiet.
.TP
.BR \-a
Append all records in the order they appear in the input. The input is assumed to already be
in correctly sorted order and no sorting or checking for redundant values will be performed.
This option must be used to reload data that was produced by running
.B mdbx_dump
on a database that uses custom compare functions.
.TP
.BR \-f \ file
Read from the specified file instead of from the standard input.
.TP
.BR \-s \ subdb
Load a specific subdatabase. If no database is specified, data is loaded into the main database.
.TP
.BR \-N
Don't overwrite existing records when loading into an already existing database; just skip them.
.TP
.BR \-T
Load data from simple text files. The input must be paired lines of text, where the first
line of the pair is the key item, and the second line of the pair is its corresponding
data item.
.TP
.BR \-r
Rescure mode. Ignore errors to load corrupted DB dump.
.TP
.BR \-n
Load an MDBX database which does not use subdirectories.
This is legacy option. For now MDBX handles this automatically.
.SH DIAGNOSTICS
Exit status is zero if no errors occur.
Errors result in a non-zero exit status and
a diagnostic message being written to standard error.
.SH "SEE ALSO"
.BR mdbx_dump (1),
.BR mdbx_chk (1),
.BR mdbx_stat (1),
.BR mdbx_copy (1)
.BR mdbx_drop (1)
.SH AUTHOR
Howard Chu of Symas Corporation <http://www.symas.com>,
Leonid Yuriev <https://gitflic.ru/user/erthink>

View File

@ -0,0 +1,86 @@
.\" Copyright 2015-2022 Leonid Yuriev <leo@yuriev.ru>.
.\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved.
.\" Copyright 2015,2016 Peter-Service R&D LLC <http://billing.ru/>.
.\" Copying restrictions apply. See COPYRIGHT/LICENSE.
.TH MDBX_STAT 1 "2022-04-22" "MDBX 0.11.8"
.SH NAME
mdbx_stat \- MDBX environment status tool
.SH SYNOPSIS
.B mdbx_stat
[\c
.BR \-V ]
[\c
.BR \-q ]
[\c
.BR \-p ]
[\c
.BR \-e ]
[\c
.BR \-f [ f [ f ]]]
[\c
.BR \-r [ r ]]
[\c
.BR \-a \ |
.BI \-s \ subdb\fR]
.BR \ dbpath
[\c
.BR \-n ]
.SH DESCRIPTION
The
.B mdbx_stat
utility displays the status of an MDBX environment.
.SH OPTIONS
.TP
.BR \-V
Write the library version number to the standard output, and exit.
.TP
.BR \-q
Be quiet.
.TP
.BR \-p
Display overall statistics of page operations of all (running, completed
and aborted) transactions in the current multi-process session (since the
first process opened the database after everyone had previously closed it).
.TP
.BR \-e
Display information about the database environment.
.TP
.BR \-f
Display information about the environment GC.
If \fB\-ff\fP is given, summarize each GC/freelist entry.
If \fB\-fff\fP is given, display the full list of page IDs in the GC/freelist.
.TP
.BR \-r
Display information about the environment reader table.
Shows the process ID, thread ID, and transaction ID for each active
reader slot. The process ID and transaction ID are in decimal, the
thread ID is in hexadecimal. The transaction ID is displayed as "-"
if the reader does not currently have a read transaction open.
If \fB\-rr\fP is given, check for stale entries in the reader
table and clear them. The reader table will be printed again
after the check is performed.
.TP
.BR \-a
Display the status of all of the subdatabases in the environment.
.TP
.BR \-s \ subdb
Display the status of a specific subdatabase.
.TP
.BR \-n
Display the status of an MDBX database which does not use subdirectories.
This is legacy option. For now MDBX handles this automatically
for existing databases, but may be required while creating new.
.SH DIAGNOSTICS
Exit status is zero if no errors occur.
Errors result in a non-zero exit status and
a diagnostic message being written to standard error.
.SH "SEE ALSO"
.BR mdbx_chk (1),
.BR mdbx_copy (1),
.BR mdbx_dump (1),
.BR mdbx_load (1)
.BR mdbx_drop (1)
.SH AUTHOR
Howard Chu of Symas Corporation <http://www.symas.com>,
Leonid Yuriev <https://gitflic.ru/user/erthink>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,7 @@
#![deny(warnings)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(clippy::all)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));

View File

@ -0,0 +1,117 @@
use crate::{error::mdbx_result, Error, TransactionKind};
use derive_more::*;
use std::{borrow::Cow, slice};
use thiserror::Error;
/// Implement this to be able to decode data values
pub trait TableObject<'tx> {
fn decode(data_val: &[u8]) -> Result<Self, Error>
where
Self: Sized;
#[doc(hidden)]
unsafe fn decode_val<K: TransactionKind>(
_: *const ffi::MDBX_txn,
data_val: &ffi::MDBX_val,
) -> Result<Self, Error>
where
Self: Sized,
{
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
TableObject::decode(s)
}
}
impl<'tx> TableObject<'tx> for Cow<'tx, [u8]> {
fn decode(_: &[u8]) -> Result<Self, Error> {
unreachable!()
}
#[doc(hidden)]
unsafe fn decode_val<K: TransactionKind>(
txn: *const ffi::MDBX_txn,
data_val: &ffi::MDBX_val,
) -> Result<Self, Error> {
let is_dirty = (!K::ONLY_CLEAN) && mdbx_result(ffi::mdbx_is_dirty(txn, data_val.iov_base))?;
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
Ok(if is_dirty {
Cow::Owned(s.to_vec())
} else {
Cow::Borrowed(s)
})
}
}
#[cfg(feature = "lifetimed-bytes")]
impl<'tx> TableObject<'tx> for lifetimed_bytes::Bytes<'tx> {
fn decode(_: &[u8]) -> Result<Self, Error> {
unreachable!()
}
#[doc(hidden)]
unsafe fn decode_val<K: TransactionKind>(
txn: *const ffi::MDBX_txn,
data_val: &ffi::MDBX_val,
) -> Result<Self, Error> {
Cow::<'tx, [u8]>::decode_val::<K>(txn, data_val).map(From::from)
}
}
impl<'tx> TableObject<'tx> for Vec<u8> {
fn decode(data_val: &[u8]) -> Result<Self, Error>
where
Self: Sized,
{
Ok(data_val.to_vec())
}
}
impl<'tx> TableObject<'tx> for () {
fn decode(_: &[u8]) -> Result<Self, Error> {
Ok(())
}
unsafe fn decode_val<K: TransactionKind>(
_: *const ffi::MDBX_txn,
_: &ffi::MDBX_val,
) -> Result<Self, Error> {
Ok(())
}
}
/// If you don't need the data itself, just its length.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Deref, DerefMut)]
pub struct ObjectLength(pub usize);
impl<'tx> TableObject<'tx> for ObjectLength {
fn decode(data_val: &[u8]) -> Result<Self, Error>
where
Self: Sized,
{
Ok(Self(data_val.len()))
}
}
impl<'tx, const LEN: usize> TableObject<'tx> for [u8; LEN] {
fn decode(data_val: &[u8]) -> Result<Self, Error>
where
Self: Sized,
{
#[derive(Clone, Debug, Display, Error)]
struct InvalidSize<const LEN: usize> {
got: usize,
}
if data_val.len() != LEN {
return Err(Error::DecodeError(Box::new(InvalidSize::<LEN> {
got: data_val.len(),
})));
}
let mut a = [0; LEN];
a[..].copy_from_slice(data_val);
Ok(a)
}
}

View File

@ -0,0 +1,831 @@
use crate::{
database::Database,
error::{mdbx_result, Error, Result},
flags::*,
mdbx_try_optional,
transaction::{txn_execute, TransactionKind, RW},
EnvironmentKind, TableObject, Transaction,
};
use ffi::{
MDBX_cursor_op, MDBX_FIRST, MDBX_FIRST_DUP, MDBX_GET_BOTH, MDBX_GET_BOTH_RANGE,
MDBX_GET_CURRENT, MDBX_GET_MULTIPLE, MDBX_LAST, MDBX_LAST_DUP, MDBX_NEXT, MDBX_NEXT_DUP,
MDBX_NEXT_MULTIPLE, MDBX_NEXT_NODUP, MDBX_PREV, MDBX_PREV_DUP, MDBX_PREV_MULTIPLE,
MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE,
};
use libc::{c_uint, c_void};
use parking_lot::Mutex;
use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr, result, sync::Arc};
/// A cursor for navigating the items within a database.
pub struct Cursor<'txn, K>
where
K: TransactionKind,
{
txn: Arc<Mutex<*mut ffi::MDBX_txn>>,
cursor: *mut ffi::MDBX_cursor,
_marker: PhantomData<fn(&'txn (), K)>,
}
impl<'txn, K> Cursor<'txn, K>
where
K: TransactionKind,
{
pub(crate) fn new<E: EnvironmentKind>(
txn: &'txn Transaction<K, E>,
db: &Database<'txn>,
) -> Result<Self> {
let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut();
let txn = txn.txn_mutex();
unsafe {
mdbx_result(txn_execute(&*txn, |txn| {
ffi::mdbx_cursor_open(txn, db.dbi(), &mut cursor)
}))?;
}
Ok(Self {
txn,
cursor,
_marker: PhantomData,
})
}
fn new_at_position(other: &Self) -> Result<Self> {
unsafe {
let cursor = ffi::mdbx_cursor_create(ptr::null_mut());
let res = ffi::mdbx_cursor_copy(other.cursor(), cursor);
let s = Self {
txn: other.txn.clone(),
cursor,
_marker: PhantomData,
};
mdbx_result(res)?;
Ok(s)
}
}
/// Returns a raw pointer to the underlying MDBX cursor.
///
/// The caller **must** ensure that the pointer is not used after the
/// lifetime of the cursor.
pub fn cursor(&self) -> *mut ffi::MDBX_cursor {
self.cursor
}
/// Retrieves a key/data pair from the cursor. Depending on the cursor op,
/// the current key may be returned.
fn get<Key, Value>(
&self,
key: Option<&[u8]>,
data: Option<&[u8]>,
op: MDBX_cursor_op,
) -> Result<(Option<Key>, Value, bool)>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
unsafe {
let mut key_val = slice_to_val(key);
let mut data_val = slice_to_val(data);
let key_ptr = key_val.iov_base;
let data_ptr = data_val.iov_base;
txn_execute(&*self.txn, |txn| {
let v = mdbx_result(ffi::mdbx_cursor_get(
self.cursor,
&mut key_val,
&mut data_val,
op,
))?;
assert_ne!(data_ptr, data_val.iov_base);
let key_out = {
// MDBX wrote in new key
if key_ptr != key_val.iov_base {
Some(Key::decode_val::<K>(txn, &key_val)?)
} else {
None
}
};
let data_out = Value::decode_val::<K>(txn, &data_val)?;
Ok((key_out, data_out, v))
})
}
}
fn get_value<Value>(
&mut self,
key: Option<&[u8]>,
data: Option<&[u8]>,
op: MDBX_cursor_op,
) -> Result<Option<Value>>
where
Value: TableObject<'txn>,
{
let (_, v, _) = mdbx_try_optional!(self.get::<(), Value>(key, data, op));
Ok(Some(v))
}
fn get_full<Key, Value>(
&mut self,
key: Option<&[u8]>,
data: Option<&[u8]>,
op: MDBX_cursor_op,
) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
let (k, v, _) = mdbx_try_optional!(self.get(key, data, op));
Ok(Some((k.unwrap(), v)))
}
/// Position at first key/data item.
pub fn first<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_FIRST)
}
/// [DatabaseFlags::DUP_SORT]-only: Position at first data item of current key.
pub fn first_dup<Value>(&mut self) -> Result<Option<Value>>
where
Value: TableObject<'txn>,
{
self.get_value(None, None, MDBX_FIRST_DUP)
}
/// [DatabaseFlags::DUP_SORT]-only: Position at key/data pair.
pub fn get_both<Value>(&mut self, k: &[u8], v: &[u8]) -> Result<Option<Value>>
where
Value: TableObject<'txn>,
{
self.get_value(Some(k), Some(v), MDBX_GET_BOTH)
}
/// [DatabaseFlags::DUP_SORT]-only: Position at given key and at first data greater than or equal to specified data.
pub fn get_both_range<Value>(&mut self, k: &[u8], v: &[u8]) -> Result<Option<Value>>
where
Value: TableObject<'txn>,
{
self.get_value(Some(k), Some(v), MDBX_GET_BOTH_RANGE)
}
/// Return key/data at current cursor position.
pub fn get_current<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_GET_CURRENT)
}
/// DupFixed-only: Return up to a page of duplicate data items from current cursor position.
/// Move cursor to prepare for [Self::next_multiple()].
pub fn get_multiple<Value>(&mut self) -> Result<Option<Value>>
where
Value: TableObject<'txn>,
{
self.get_value(None, None, MDBX_GET_MULTIPLE)
}
/// Position at last key/data item.
pub fn last<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_LAST)
}
/// DupSort-only: Position at last data item of current key.
pub fn last_dup<Value>(&mut self) -> Result<Option<Value>>
where
Value: TableObject<'txn>,
{
self.get_value(None, None, MDBX_LAST_DUP)
}
/// Position at next data item
#[allow(clippy::should_implement_trait)]
pub fn next<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_NEXT)
}
/// [DatabaseFlags::DUP_SORT]-only: Position at next data item of current key.
pub fn next_dup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_NEXT_DUP)
}
/// [DatabaseFlags::DUP_FIXED]-only: Return up to a page of duplicate data items from next cursor position. Move cursor to prepare for MDBX_NEXT_MULTIPLE.
pub fn next_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_NEXT_MULTIPLE)
}
/// Position at first data item of next key.
pub fn next_nodup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_NEXT_NODUP)
}
/// Position at previous data item.
pub fn prev<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_PREV)
}
/// [DatabaseFlags::DUP_SORT]-only: Position at previous data item of current key.
pub fn prev_dup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_PREV_DUP)
}
/// Position at last data item of previous key.
pub fn prev_nodup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_PREV_NODUP)
}
/// Position at specified key.
pub fn set<Value>(&mut self, key: &[u8]) -> Result<Option<Value>>
where
Value: TableObject<'txn>,
{
self.get_value(Some(key), None, MDBX_SET)
}
/// Position at specified key, return both key and data.
pub fn set_key<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(Some(key), None, MDBX_SET_KEY)
}
/// Position at first key greater than or equal to specified key.
pub fn set_range<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(Some(key), None, MDBX_SET_RANGE)
}
/// [DatabaseFlags::DUP_FIXED]-only: Position at previous page and return up to a page of duplicate data items.
pub fn prev_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
self.get_full(None, None, MDBX_PREV_MULTIPLE)
}
/// Position at first key-value pair greater than or equal to specified, return both key and data, and the return code depends on a exact match.
///
/// For non DupSort-ed collections this works the same as [Self::set_range()], but returns [false] if key found exactly and [true] if greater key was found.
///
/// For DupSort-ed a data value is taken into account for duplicates, i.e. for a pairs/tuples of a key and an each data value of duplicates.
/// Returns [false] if key-value pair found exactly and [true] if the next pair was returned.
pub fn set_lowerbound<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(bool, Key, Value)>>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
let (k, v, found) = mdbx_try_optional!(self.get(Some(key), None, MDBX_SET_LOWERBOUND));
Ok(Some((found, k.unwrap(), v)))
}
/// Iterate over database items. The iterator will begin with item next
/// after the cursor, and continue until the end of the database. For new
/// cursors, the iterator will begin with the first item in the database.
///
/// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the
/// duplicate data items of each key will be returned before moving on to
/// the next key.
pub fn iter<Key, Value>(&mut self) -> Iter<'txn, '_, K, Key, Value>
where
Self: Sized,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT)
}
/// Iterate over database items starting from the beginning of the database.
///
/// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the
/// duplicate data items of each key will be returned before moving on to
/// the next key.
pub fn iter_start<Key, Value>(&mut self) -> Iter<'txn, '_, K, Key, Value>
where
Self: Sized,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
Iter::new(self, ffi::MDBX_FIRST, ffi::MDBX_NEXT)
}
/// Iterate over database items starting from the given key.
///
/// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the
/// duplicate data items of each key will be returned before moving on to
/// the next key.
pub fn iter_from<Key, Value>(&mut self, key: &[u8]) -> Iter<'txn, '_, K, Key, Value>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
let res: Result<Option<((), ())>> = self.set_range(key);
if let Err(error) = res {
return Iter::Err(Some(error));
};
Iter::new(self, ffi::MDBX_GET_CURRENT, ffi::MDBX_NEXT)
}
/// Iterate over duplicate database items. The iterator will begin with the
/// item next after the cursor, and continue until the end of the database.
/// Each item will be returned as an iterator of its duplicates.
pub fn iter_dup<Key, Value>(&mut self) -> IterDup<'txn, '_, K, Key, Value>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
IterDup::new(self, ffi::MDBX_NEXT as u32)
}
/// Iterate over duplicate database items starting from the beginning of the
/// database. Each item will be returned as an iterator of its duplicates.
pub fn iter_dup_start<Key, Value>(&mut self) -> IterDup<'txn, '_, K, Key, Value>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
IterDup::new(self, ffi::MDBX_FIRST as u32)
}
/// Iterate over duplicate items in the database starting from the given
/// key. Each item will be returned as an iterator of its duplicates.
pub fn iter_dup_from<Key, Value>(&mut self, key: &[u8]) -> IterDup<'txn, '_, K, Key, Value>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
let res: Result<Option<((), ())>> = self.set_range(key);
if let Err(error) = res {
return IterDup::Err(Some(error));
};
IterDup::new(self, ffi::MDBX_GET_CURRENT as u32)
}
/// Iterate over the duplicates of the item in the database with the given key.
pub fn iter_dup_of<Key, Value>(&mut self, key: &[u8]) -> Iter<'txn, '_, K, Key, Value>
where
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
let res: Result<Option<()>> = self.set(key);
match res {
Ok(Some(_)) => (),
Ok(None) => {
let _: Result<Option<((), ())>> = self.last();
return Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT);
}
Err(error) => return Iter::Err(Some(error)),
};
Iter::new(self, ffi::MDBX_GET_CURRENT, ffi::MDBX_NEXT_DUP)
}
}
impl<'txn> Cursor<'txn, RW> {
/// Puts a key/data pair into the database. The cursor will be positioned at
/// the new data item, or on failure usually near it.
pub fn put(&mut self, key: &[u8], data: &[u8], flags: WriteFlags) -> Result<()> {
let key_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: key.len(),
iov_base: key.as_ptr() as *mut c_void,
};
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: data.len(),
iov_base: data.as_ptr() as *mut c_void,
};
mdbx_result(unsafe {
txn_execute(&*self.txn, |_| {
ffi::mdbx_cursor_put(self.cursor, &key_val, &mut data_val, flags.bits())
})
})?;
Ok(())
}
/// Deletes the current key/data pair.
///
/// ### Flags
///
/// [WriteFlags::NO_DUP_DATA] may be used to delete all data items for the
/// current key, if the database was opened with [DatabaseFlags::DUP_SORT].
pub fn del(&mut self, flags: WriteFlags) -> Result<()> {
mdbx_result(unsafe {
txn_execute(&*self.txn, |_| {
ffi::mdbx_cursor_del(self.cursor, flags.bits())
})
})?;
Ok(())
}
}
impl<'txn, K> Clone for Cursor<'txn, K>
where
K: TransactionKind,
{
fn clone(&self) -> Self {
txn_execute(&*self.txn, |_| Self::new_at_position(self).unwrap())
}
}
impl<'txn, K> fmt::Debug for Cursor<'txn, K>
where
K: TransactionKind,
{
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
f.debug_struct("Cursor").finish()
}
}
impl<'txn, K> Drop for Cursor<'txn, K>
where
K: TransactionKind,
{
fn drop(&mut self) {
txn_execute(&*self.txn, |_| unsafe {
ffi::mdbx_cursor_close(self.cursor)
})
}
}
unsafe fn slice_to_val(slice: Option<&[u8]>) -> ffi::MDBX_val {
match slice {
Some(slice) => ffi::MDBX_val {
iov_len: slice.len(),
iov_base: slice.as_ptr() as *mut c_void,
},
None => ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
},
}
}
unsafe impl<'txn, K> Send for Cursor<'txn, K> where K: TransactionKind {}
unsafe impl<'txn, K> Sync for Cursor<'txn, K> where K: TransactionKind {}
impl<'txn, K> IntoIterator for Cursor<'txn, K>
where
K: TransactionKind,
{
type Item = Result<(Cow<'txn, [u8]>, Cow<'txn, [u8]>)>;
type IntoIter = IntoIter<'txn, K, Cow<'txn, [u8]>, Cow<'txn, [u8]>>;
fn into_iter(self) -> Self::IntoIter {
IntoIter::new(self, MDBX_NEXT, MDBX_NEXT)
}
}
/// An iterator over the key/value pairs in an MDBX database.
#[derive(Debug)]
pub enum IntoIter<'txn, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
/// An iterator that returns an error on every call to [Iter::next()].
/// Cursor.iter*() creates an Iter of this type when MDBX returns an error
/// on retrieval of a cursor. Using this variant instead of returning
/// an error makes Cursor.iter()* methods infallible, so consumers only
/// need to check the result of Iter.next().
Err(Option<Error>),
/// An iterator that returns an Item on calls to [Iter::next()].
/// The Item is a [Result], so this variant
/// might still return an error, if retrieval of the key/value pair
/// fails for some reason.
Ok {
/// The MDBX cursor with which to iterate.
cursor: Cursor<'txn, K>,
/// The first operation to perform when the consumer calls [Iter::next()].
op: ffi::MDBX_cursor_op,
/// The next and subsequent operations to perform.
next_op: ffi::MDBX_cursor_op,
_marker: PhantomData<fn(&'txn (), K, Key, Value)>,
},
}
impl<'txn, K, Key, Value> IntoIter<'txn, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
/// Creates a new iterator backed by the given cursor.
fn new(cursor: Cursor<'txn, K>, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self {
IntoIter::Ok {
cursor,
op,
next_op,
_marker: PhantomData,
}
}
}
impl<'txn, K, Key, Value> Iterator for IntoIter<'txn, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
type Item = Result<(Key, Value)>;
fn next(&mut self) -> Option<Self::Item> {
match self {
Self::Ok {
cursor,
op,
next_op,
_marker,
} => {
let mut key = ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let mut data = ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let op = mem::replace(op, *next_op);
unsafe {
txn_execute(&*cursor.txn, |txn| {
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
ffi::MDBX_SUCCESS => {
let key = match Key::decode_val::<K>(txn, &key) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
let data = match Value::decode_val::<K>(txn, &data) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
Some(Ok((key, data)))
}
// MDBX_ENODATA can occur when the cursor was previously seeked to a non-existent value,
// e.g. iter_from with a key greater than all values in the database.
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
error => Some(Err(Error::from_err_code(error))),
}
})
}
}
Self::Err(err) => err.take().map(Err),
}
}
}
/// An iterator over the key/value pairs in an MDBX database.
#[derive(Debug)]
pub enum Iter<'txn, 'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
/// An iterator that returns an error on every call to [Iter::next()].
/// Cursor.iter*() creates an Iter of this type when MDBX returns an error
/// on retrieval of a cursor. Using this variant instead of returning
/// an error makes Cursor.iter()* methods infallible, so consumers only
/// need to check the result of Iter.next().
Err(Option<Error>),
/// An iterator that returns an Item on calls to [Iter::next()].
/// The Item is a [Result], so this variant
/// might still return an error, if retrieval of the key/value pair
/// fails for some reason.
Ok {
/// The MDBX cursor with which to iterate.
cursor: &'cur mut Cursor<'txn, K>,
/// The first operation to perform when the consumer calls [Iter::next()].
op: ffi::MDBX_cursor_op,
/// The next and subsequent operations to perform.
next_op: ffi::MDBX_cursor_op,
_marker: PhantomData<fn(&'txn (Key, Value))>,
},
}
impl<'txn, 'cur, K, Key, Value> Iter<'txn, 'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
/// Creates a new iterator backed by the given cursor.
fn new(
cursor: &'cur mut Cursor<'txn, K>,
op: ffi::MDBX_cursor_op,
next_op: ffi::MDBX_cursor_op,
) -> Self {
Iter::Ok {
cursor,
op,
next_op,
_marker: PhantomData,
}
}
}
impl<'txn, 'cur, K, Key, Value> Iterator for Iter<'txn, 'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
type Item = Result<(Key, Value)>;
fn next(&mut self) -> Option<Self::Item> {
match self {
Iter::Ok {
cursor,
op,
next_op,
..
} => {
let mut key = ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let mut data = ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let op = mem::replace(op, *next_op);
unsafe {
txn_execute(&*cursor.txn, |txn| {
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
ffi::MDBX_SUCCESS => {
let key = match Key::decode_val::<K>(txn, &key) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
let data = match Value::decode_val::<K>(txn, &data) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
Some(Ok((key, data)))
}
// MDBX_NODATA can occur when the cursor was previously seeked to a non-existent value,
// e.g. iter_from with a key greater than all values in the database.
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
error => Some(Err(Error::from_err_code(error))),
}
})
}
}
Iter::Err(err) => err.take().map(Err),
}
}
}
/// An iterator over the keys and duplicate values in an MDBX database.
///
/// The yielded items of the iterator are themselves iterators over the duplicate values for a
/// specific key.
pub enum IterDup<'txn, 'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
/// An iterator that returns an error on every call to Iter.next().
/// Cursor.iter*() creates an Iter of this type when MDBX returns an error
/// on retrieval of a cursor. Using this variant instead of returning
/// an error makes Cursor.iter()* methods infallible, so consumers only
/// need to check the result of Iter.next().
Err(Option<Error>),
/// An iterator that returns an Item on calls to Iter.next().
/// The Item is a Result<(&'txn [u8], &'txn [u8])>, so this variant
/// might still return an error, if retrieval of the key/value pair
/// fails for some reason.
Ok {
/// The MDBX cursor with which to iterate.
cursor: &'cur mut Cursor<'txn, K>,
/// The first operation to perform when the consumer calls Iter.next().
op: c_uint,
_marker: PhantomData<fn(&'txn (Key, Value))>,
},
}
impl<'txn, 'cur, K, Key, Value> IterDup<'txn, 'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
/// Creates a new iterator backed by the given cursor.
fn new(cursor: &'cur mut Cursor<'txn, K>, op: c_uint) -> Self {
IterDup::Ok {
cursor,
op,
_marker: PhantomData,
}
}
}
impl<'txn, 'cur, K, Key, Value> fmt::Debug for IterDup<'txn, 'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
f.debug_struct("IterDup").finish()
}
}
impl<'txn, 'cur, K, Key, Value> Iterator for IterDup<'txn, 'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject<'txn>,
Value: TableObject<'txn>,
{
type Item = IntoIter<'txn, K, Key, Value>;
fn next(&mut self) -> Option<Self::Item> {
match self {
IterDup::Ok { cursor, op, .. } => {
let mut key = ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let mut data = ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
let op = mem::replace(op, ffi::MDBX_NEXT_NODUP as u32);
txn_execute(&*cursor.txn, |_| {
let err_code =
unsafe { ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) };
if err_code == ffi::MDBX_SUCCESS {
Some(IntoIter::new(
Cursor::new_at_position(&**cursor).unwrap(),
ffi::MDBX_GET_CURRENT,
ffi::MDBX_NEXT_DUP,
))
} else {
None
}
})
}
IterDup::Err(err) => err.take().map(|e| IntoIter::Err(Some(e))),
}
}
}

View File

@ -0,0 +1,66 @@
use crate::{
environment::EnvironmentKind,
error::{mdbx_result, Result},
transaction::{txn_execute, TransactionKind},
Transaction,
};
use libc::c_uint;
use std::{ffi::CString, marker::PhantomData, ptr};
/// A handle to an individual database in an environment.
///
/// A database handle denotes the name and parameters of a database in an environment.
#[derive(Debug)]
pub struct Database<'txn> {
dbi: ffi::MDBX_dbi,
_marker: PhantomData<&'txn ()>,
}
impl<'txn> Database<'txn> {
/// Opens a new database handle in the given transaction.
///
/// Prefer using `Environment::open_db`, `Environment::create_db`, `TransactionExt::open_db`,
/// or `RwTransaction::create_db`.
pub(crate) fn new<'env, K: TransactionKind, E: EnvironmentKind>(
txn: &'txn Transaction<'env, K, E>,
name: Option<&str>,
flags: c_uint,
) -> Result<Self> {
let c_name = name.map(|n| CString::new(n).unwrap());
let name_ptr = if let Some(c_name) = &c_name {
c_name.as_ptr()
} else {
ptr::null()
};
let mut dbi: ffi::MDBX_dbi = 0;
mdbx_result(txn_execute(&*txn.txn_mutex(), |txn| unsafe {
ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi)
}))?;
Ok(Self::new_from_ptr(dbi))
}
pub(crate) fn new_from_ptr(dbi: ffi::MDBX_dbi) -> Self {
Self {
dbi,
_marker: PhantomData,
}
}
pub(crate) fn freelist_db() -> Self {
Database {
dbi: 0,
_marker: PhantomData,
}
}
/// Returns the underlying MDBX database handle.
///
/// The caller **must** ensure that the handle is not used after the lifetime of the
/// environment, or after the database has been closed.
pub fn dbi(&self) -> ffi::MDBX_dbi {
self.dbi
}
}
unsafe impl<'txn> Send for Database<'txn> {}
unsafe impl<'txn> Sync for Database<'txn> {}

View File

@ -0,0 +1,633 @@
use crate::{
database::Database,
error::{mdbx_result, Error, Result},
flags::EnvironmentFlags,
transaction::{RO, RW},
Mode, Transaction, TransactionKind,
};
use byteorder::{ByteOrder, NativeEndian};
use libc::c_uint;
use mem::size_of;
use std::{
ffi::CString,
fmt,
fmt::Debug,
marker::PhantomData,
mem,
ops::{Bound, RangeBounds},
os::unix::ffi::OsStrExt,
path::Path,
ptr, result,
sync::mpsc::{sync_channel, SyncSender},
thread::sleep,
time::Duration,
};
mod private {
use super::*;
pub trait Sealed {}
impl<'env> Sealed for NoWriteMap {}
impl<'env> Sealed for WriteMap {}
}
pub trait EnvironmentKind: private::Sealed + Debug + 'static {
const EXTRA_FLAGS: ffi::MDBX_env_flags_t;
}
#[derive(Debug)]
pub struct NoWriteMap;
#[derive(Debug)]
pub struct WriteMap;
impl EnvironmentKind for NoWriteMap {
const EXTRA_FLAGS: ffi::MDBX_env_flags_t = ffi::MDBX_ENV_DEFAULTS;
}
impl EnvironmentKind for WriteMap {
const EXTRA_FLAGS: ffi::MDBX_env_flags_t = ffi::MDBX_WRITEMAP;
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct TxnPtr(pub *mut ffi::MDBX_txn);
unsafe impl Send for TxnPtr {}
unsafe impl Sync for TxnPtr {}
#[derive(Copy, Clone, Debug)]
pub(crate) struct EnvPtr(pub *mut ffi::MDBX_env);
unsafe impl Send for EnvPtr {}
unsafe impl Sync for EnvPtr {}
pub(crate) enum TxnManagerMessage {
Begin {
parent: TxnPtr,
flags: ffi::MDBX_txn_flags_t,
sender: SyncSender<Result<TxnPtr>>,
},
Abort {
tx: TxnPtr,
sender: SyncSender<Result<bool>>,
},
Commit {
tx: TxnPtr,
sender: SyncSender<Result<bool>>,
},
}
/// An environment supports multiple databases, all residing in the same shared-memory map.
pub struct Environment<E>
where
E: EnvironmentKind,
{
env: *mut ffi::MDBX_env,
pub(crate) txn_manager: Option<SyncSender<TxnManagerMessage>>,
_marker: PhantomData<E>,
}
impl<E> Environment<E>
where
E: EnvironmentKind,
{
/// Creates a new builder for specifying options for opening an MDBX environment.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> EnvironmentBuilder<E> {
EnvironmentBuilder {
flags: EnvironmentFlags::default(),
max_readers: None,
max_dbs: None,
rp_augment_limit: None,
loose_limit: None,
dp_reserve_limit: None,
txn_dp_limit: None,
spill_max_denominator: None,
spill_min_denominator: None,
geometry: None,
_marker: PhantomData,
}
}
/// Returns a raw pointer to the underlying MDBX environment.
///
/// The caller **must** ensure that the pointer is not dereferenced after the lifetime of the
/// environment.
pub fn env(&self) -> *mut ffi::MDBX_env {
self.env
}
/// Create a read-only transaction for use with the environment.
pub fn begin_ro_txn(&self) -> Result<Transaction<'_, RO, E>> {
Transaction::new(self)
}
/// Create a read-write transaction for use with the environment. This method will block while
/// there are any other read-write transactions open on the environment.
pub fn begin_rw_txn(&self) -> Result<Transaction<'_, RW, E>> {
let sender = self.txn_manager.as_ref().ok_or(Error::Access)?;
let txn = loop {
let (tx, rx) = sync_channel(0);
sender
.send(TxnManagerMessage::Begin {
parent: TxnPtr(ptr::null_mut()),
flags: RW::OPEN_FLAGS,
sender: tx,
})
.unwrap();
let res = rx.recv().unwrap();
if let Err(Error::Busy) = &res {
sleep(Duration::from_millis(250));
continue;
}
break res;
}?;
Ok(Transaction::new_from_ptr(self, txn.0))
}
/// Flush the environment data buffers to disk.
pub fn sync(&self, force: bool) -> Result<bool> {
mdbx_result(unsafe { ffi::mdbx_env_sync_ex(self.env(), force, false) })
}
/// Retrieves statistics about this environment.
pub fn stat(&self) -> Result<Stat> {
unsafe {
let mut stat = Stat::new();
mdbx_result(ffi::mdbx_env_stat_ex(
self.env(),
ptr::null(),
stat.mdb_stat(),
size_of::<Stat>(),
))?;
Ok(stat)
}
}
/// Retrieves info about this environment.
pub fn info(&self) -> Result<Info> {
unsafe {
let mut info = Info(mem::zeroed());
mdbx_result(ffi::mdbx_env_info_ex(
self.env(),
ptr::null(),
&mut info.0,
size_of::<Info>(),
))?;
Ok(info)
}
}
/// Retrieves the total number of pages on the freelist.
///
/// Along with [Environment::info()], this can be used to calculate the exact number
/// of used pages as well as free pages in this environment.
///
/// ```
/// # use libmdbx::Environment;
/// # use libmdbx::NoWriteMap;
/// let dir = tempfile::tempdir().unwrap();
/// let env = Environment::<NoWriteMap>::new().open(dir.path()).unwrap();
/// let info = env.info().unwrap();
/// let stat = env.stat().unwrap();
/// let freelist = env.freelist().unwrap();
/// let last_pgno = info.last_pgno() + 1; // pgno is 0 based.
/// let total_pgs = info.map_size() / stat.page_size() as usize;
/// let pgs_in_use = last_pgno - freelist;
/// let pgs_free = total_pgs - pgs_in_use;
/// ```
///
/// Note:
///
/// * LMDB stores all the freelists in the designated database 0 in each environment,
/// and the freelist count is stored at the beginning of the value as `libc::size_t`
/// in the native byte order.
///
/// * It will create a read transaction to traverse the freelist database.
pub fn freelist(&self) -> Result<usize> {
let mut freelist: usize = 0;
let txn = self.begin_ro_txn()?;
let db = Database::freelist_db();
let cursor = txn.cursor(&db)?;
for result in cursor {
let (_key, value) = result?;
if value.len() < mem::size_of::<usize>() {
return Err(Error::Corrupted);
}
let s = &value[..mem::size_of::<usize>()];
if cfg!(target_pointer_width = "64") {
freelist += NativeEndian::read_u64(s) as usize;
} else {
freelist += NativeEndian::read_u32(s) as usize;
}
}
Ok(freelist)
}
}
/// Environment statistics.
///
/// Contains information about the size and layout of an MDBX environment or database.
#[repr(transparent)]
pub struct Stat(ffi::MDBX_stat);
impl Stat {
/// Create a new Stat with zero'd inner struct `ffi::MDB_stat`.
pub(crate) fn new() -> Stat {
unsafe { Stat(mem::zeroed()) }
}
/// Returns a mut pointer to `ffi::MDB_stat`.
pub(crate) fn mdb_stat(&mut self) -> *mut ffi::MDBX_stat {
&mut self.0
}
}
impl Stat {
/// Size of a database page. This is the same for all databases in the environment.
#[inline]
pub fn page_size(&self) -> u32 {
self.0.ms_psize
}
/// Depth (height) of the B-tree.
#[inline]
pub fn depth(&self) -> u32 {
self.0.ms_depth
}
/// Number of internal (non-leaf) pages.
#[inline]
pub fn branch_pages(&self) -> usize {
self.0.ms_branch_pages as usize
}
/// Number of leaf pages.
#[inline]
pub fn leaf_pages(&self) -> usize {
self.0.ms_leaf_pages as usize
}
/// Number of overflow pages.
#[inline]
pub fn overflow_pages(&self) -> usize {
self.0.ms_overflow_pages as usize
}
/// Number of data items.
#[inline]
pub fn entries(&self) -> usize {
self.0.ms_entries as usize
}
}
#[repr(transparent)]
pub struct GeometryInfo(ffi::MDBX_envinfo__bindgen_ty_1);
impl GeometryInfo {
pub fn min(&self) -> u64 {
self.0.lower
}
}
/// Environment information.
///
/// Contains environment information about the map size, readers, last txn id etc.
#[repr(transparent)]
pub struct Info(ffi::MDBX_envinfo);
impl Info {
pub fn geometry(&self) -> GeometryInfo {
GeometryInfo(self.0.mi_geo)
}
/// Size of memory map.
#[inline]
pub fn map_size(&self) -> usize {
self.0.mi_mapsize as usize
}
/// Last used page number
#[inline]
pub fn last_pgno(&self) -> usize {
self.0.mi_last_pgno as usize
}
/// Last transaction ID
#[inline]
pub fn last_txnid(&self) -> usize {
self.0.mi_recent_txnid as usize
}
/// Max reader slots in the environment
#[inline]
pub fn max_readers(&self) -> usize {
self.0.mi_maxreaders as usize
}
/// Max reader slots used in the environment
#[inline]
pub fn num_readers(&self) -> usize {
self.0.mi_numreaders as usize
}
}
unsafe impl<E> Send for Environment<E> where E: EnvironmentKind {}
unsafe impl<E> Sync for Environment<E> where E: EnvironmentKind {}
impl<E> fmt::Debug for Environment<E>
where
E: EnvironmentKind,
{
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
f.debug_struct("Environment").finish()
}
}
impl<E> Drop for Environment<E>
where
E: EnvironmentKind,
{
fn drop(&mut self) {
unsafe {
ffi::mdbx_env_close_ex(self.env, false);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//// Environment Builder
///////////////////////////////////////////////////////////////////////////////////////////////////
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum PageSize {
MinimalAcceptable,
Set(usize),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Geometry<R> {
pub size: Option<R>,
pub growth_step: Option<isize>,
pub shrink_threshold: Option<isize>,
pub page_size: Option<PageSize>,
}
impl<R> Default for Geometry<R> {
fn default() -> Self {
Self {
size: None,
growth_step: None,
shrink_threshold: None,
page_size: None,
}
}
}
/// Options for opening or creating an environment.
#[derive(Debug, Clone)]
pub struct EnvironmentBuilder<E>
where
E: EnvironmentKind,
{
flags: EnvironmentFlags,
max_readers: Option<c_uint>,
max_dbs: Option<u64>,
rp_augment_limit: Option<u64>,
loose_limit: Option<u64>,
dp_reserve_limit: Option<u64>,
txn_dp_limit: Option<u64>,
spill_max_denominator: Option<u64>,
spill_min_denominator: Option<u64>,
geometry: Option<Geometry<(Option<usize>, Option<usize>)>>,
_marker: PhantomData<E>,
}
impl<E> EnvironmentBuilder<E>
where
E: EnvironmentKind,
{
/// Open an environment.
///
/// Database files will be opened with 644 permissions.
pub fn open(&self, path: &Path) -> Result<Environment<E>> {
self.open_with_permissions(path, 0o644)
}
/// Open an environment with the provided UNIX permissions.
///
/// The path may not contain the null character.
pub fn open_with_permissions(
&self,
path: &Path,
mode: ffi::mdbx_mode_t,
) -> Result<Environment<E>> {
let mut env: *mut ffi::MDBX_env = ptr::null_mut();
unsafe {
mdbx_result(ffi::mdbx_env_create(&mut env))?;
if let Err(e) = (|| {
if let Some(geometry) = &self.geometry {
let mut min_size = -1;
let mut max_size = -1;
if let Some(size) = geometry.size {
if let Some(size) = size.0 {
min_size = size as isize;
}
if let Some(size) = size.1 {
max_size = size as isize;
}
}
mdbx_result(ffi::mdbx_env_set_geometry(
env,
min_size,
-1,
max_size,
geometry.growth_step.unwrap_or(-1),
geometry.shrink_threshold.unwrap_or(-1),
match geometry.page_size {
None => -1,
Some(PageSize::MinimalAcceptable) => 0,
Some(PageSize::Set(size)) => size as isize,
},
))?;
}
for (opt, v) in [
(ffi::MDBX_opt_max_db, self.max_dbs),
(ffi::MDBX_opt_rp_augment_limit, self.rp_augment_limit),
(ffi::MDBX_opt_loose_limit, self.loose_limit),
(ffi::MDBX_opt_dp_reserve_limit, self.dp_reserve_limit),
(ffi::MDBX_opt_txn_dp_limit, self.txn_dp_limit),
(
ffi::MDBX_opt_spill_max_denominator,
self.spill_max_denominator,
),
(
ffi::MDBX_opt_spill_min_denominator,
self.spill_min_denominator,
),
] {
if let Some(v) = v {
mdbx_result(ffi::mdbx_env_set_option(env, opt, v))?;
}
}
let path = match CString::new(path.as_os_str().as_bytes()) {
Ok(path) => path,
Err(..) => return Err(crate::Error::Invalid),
};
mdbx_result(ffi::mdbx_env_open(
env,
path.as_ptr(),
self.flags.make_flags() | E::EXTRA_FLAGS,
mode,
))?;
Ok(())
})() {
ffi::mdbx_env_close_ex(env, false);
return Err(e);
}
}
let mut env = Environment {
env,
txn_manager: None,
_marker: PhantomData,
};
if let Mode::ReadWrite { .. } = self.flags.mode {
let (tx, rx) = std::sync::mpsc::sync_channel(0);
let e = EnvPtr(env.env);
std::thread::spawn(move || loop {
match rx.recv() {
Ok(msg) => match msg {
TxnManagerMessage::Begin {
parent,
flags,
sender,
} => {
let e = e;
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
sender
.send(
mdbx_result(unsafe {
ffi::mdbx_txn_begin_ex(
e.0,
parent.0,
flags,
&mut txn,
ptr::null_mut(),
)
})
.map(|_| TxnPtr(txn)),
)
.unwrap()
}
TxnManagerMessage::Abort { tx, sender } => {
sender
.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) }))
.unwrap();
}
TxnManagerMessage::Commit { tx, sender } => {
sender
.send(mdbx_result(unsafe {
ffi::mdbx_txn_commit_ex(tx.0, ptr::null_mut())
}))
.unwrap();
}
},
Err(_) => return,
}
});
env.txn_manager = Some(tx);
}
Ok(env)
}
/// Sets the provided options in the environment.
pub fn set_flags(&mut self, flags: EnvironmentFlags) -> &mut Self {
self.flags = flags;
self
}
/// Sets the maximum number of threads or reader slots for the environment.
///
/// This defines the number of slots in the lock table that is used to track readers in the
/// the environment. The default is 126. Starting a read-only transaction normally ties a lock
/// table slot to the [Transaction] object until it or the [Environment] object is destroyed.
pub fn set_max_readers(&mut self, max_readers: c_uint) -> &mut Self {
self.max_readers = Some(max_readers);
self
}
/// Sets the maximum number of named databases for the environment.
///
/// This function is only needed if multiple databases will be used in the
/// environment. Simpler applications that use the environment as a single
/// unnamed database can ignore this option.
///
/// Currently a moderate number of slots are cheap but a huge number gets
/// expensive: 7-120 words per transaction, and every [Transaction::open_db()]
/// does a linear search of the opened slots.
pub fn set_max_dbs(&mut self, v: usize) -> &mut Self {
self.max_dbs = Some(v as u64);
self
}
pub fn set_rp_augment_limit(&mut self, v: u64) -> &mut Self {
self.rp_augment_limit = Some(v);
self
}
pub fn set_loose_limit(&mut self, v: u64) -> &mut Self {
self.loose_limit = Some(v);
self
}
pub fn set_dp_reserve_limit(&mut self, v: u64) -> &mut Self {
self.dp_reserve_limit = Some(v);
self
}
pub fn set_txn_dp_limit(&mut self, v: u64) -> &mut Self {
self.txn_dp_limit = Some(v);
self
}
pub fn set_spill_max_denominator(&mut self, v: u8) -> &mut Self {
self.spill_max_denominator = Some(v.into());
self
}
pub fn set_spill_min_denominator(&mut self, v: u8) -> &mut Self {
self.spill_min_denominator = Some(v.into());
self
}
/// Set all size-related parameters of environment, including page size and the min/max size of the memory map.
pub fn set_geometry<R: RangeBounds<usize>>(&mut self, geometry: Geometry<R>) -> &mut Self {
let convert_bound = |bound: Bound<&usize>| match bound {
Bound::Included(v) | Bound::Excluded(v) => Some(*v),
_ => None,
};
self.geometry = Some(Geometry {
size: geometry.size.map(|range| {
(
convert_bound(range.start_bound()),
convert_bound(range.end_bound()),
)
}),
growth_step: geometry.growth_step,
shrink_threshold: geometry.shrink_threshold,
page_size: geometry.page_size,
});
self
}
}

View File

@ -0,0 +1,162 @@
use libc::c_int;
use std::{ffi::CStr, fmt, result, str};
/// An MDBX error kind.
#[derive(Debug)]
pub enum Error {
KeyExist,
NotFound,
NoData,
PageNotFound,
Corrupted,
Panic,
VersionMismatch,
Invalid,
MapFull,
DbsFull,
ReadersFull,
TxnFull,
CursorFull,
PageFull,
UnableExtendMapsize,
Incompatible,
BadRslot,
BadTxn,
BadValSize,
BadDbi,
Problem,
Busy,
Multival,
WannaRecovery,
KeyMismatch,
InvalidValue,
Access,
TooLarge,
DecodeError(Box<dyn std::error::Error + Send + Sync + 'static>),
Other(c_int),
}
impl Error {
/// Converts a raw error code to an [Error].
pub fn from_err_code(err_code: c_int) -> Error {
match err_code {
ffi::MDBX_KEYEXIST => Error::KeyExist,
ffi::MDBX_NOTFOUND => Error::NotFound,
ffi::MDBX_ENODATA => Error::NoData,
ffi::MDBX_PAGE_NOTFOUND => Error::PageNotFound,
ffi::MDBX_CORRUPTED => Error::Corrupted,
ffi::MDBX_PANIC => Error::Panic,
ffi::MDBX_VERSION_MISMATCH => Error::VersionMismatch,
ffi::MDBX_INVALID => Error::Invalid,
ffi::MDBX_MAP_FULL => Error::MapFull,
ffi::MDBX_DBS_FULL => Error::DbsFull,
ffi::MDBX_READERS_FULL => Error::ReadersFull,
ffi::MDBX_TXN_FULL => Error::TxnFull,
ffi::MDBX_CURSOR_FULL => Error::CursorFull,
ffi::MDBX_PAGE_FULL => Error::PageFull,
ffi::MDBX_UNABLE_EXTEND_MAPSIZE => Error::UnableExtendMapsize,
ffi::MDBX_INCOMPATIBLE => Error::Incompatible,
ffi::MDBX_BAD_RSLOT => Error::BadRslot,
ffi::MDBX_BAD_TXN => Error::BadTxn,
ffi::MDBX_BAD_VALSIZE => Error::BadValSize,
ffi::MDBX_BAD_DBI => Error::BadDbi,
ffi::MDBX_PROBLEM => Error::Problem,
ffi::MDBX_BUSY => Error::Busy,
ffi::MDBX_EMULTIVAL => Error::Multival,
ffi::MDBX_WANNA_RECOVERY => Error::WannaRecovery,
ffi::MDBX_EKEYMISMATCH => Error::KeyMismatch,
ffi::MDBX_EINVAL => Error::InvalidValue,
ffi::MDBX_EACCESS => Error::Access,
ffi::MDBX_TOO_LARGE => Error::TooLarge,
other => Error::Other(other),
}
}
/// Converts an [Error] to the raw error code.
fn to_err_code(&self) -> c_int {
match self {
Error::KeyExist => ffi::MDBX_KEYEXIST,
Error::NotFound => ffi::MDBX_NOTFOUND,
Error::PageNotFound => ffi::MDBX_PAGE_NOTFOUND,
Error::Corrupted => ffi::MDBX_CORRUPTED,
Error::Panic => ffi::MDBX_PANIC,
Error::VersionMismatch => ffi::MDBX_VERSION_MISMATCH,
Error::Invalid => ffi::MDBX_INVALID,
Error::MapFull => ffi::MDBX_MAP_FULL,
Error::DbsFull => ffi::MDBX_DBS_FULL,
Error::ReadersFull => ffi::MDBX_READERS_FULL,
Error::TxnFull => ffi::MDBX_TXN_FULL,
Error::CursorFull => ffi::MDBX_CURSOR_FULL,
Error::PageFull => ffi::MDBX_PAGE_FULL,
Error::UnableExtendMapsize => ffi::MDBX_UNABLE_EXTEND_MAPSIZE,
Error::Incompatible => ffi::MDBX_INCOMPATIBLE,
Error::BadRslot => ffi::MDBX_BAD_RSLOT,
Error::BadTxn => ffi::MDBX_BAD_TXN,
Error::BadValSize => ffi::MDBX_BAD_VALSIZE,
Error::BadDbi => ffi::MDBX_BAD_DBI,
Error::Problem => ffi::MDBX_PROBLEM,
Error::Busy => ffi::MDBX_BUSY,
Error::Multival => ffi::MDBX_EMULTIVAL,
Error::WannaRecovery => ffi::MDBX_WANNA_RECOVERY,
Error::KeyMismatch => ffi::MDBX_EKEYMISMATCH,
Error::InvalidValue => ffi::MDBX_EINVAL,
Error::Access => ffi::MDBX_EACCESS,
Error::TooLarge => ffi::MDBX_TOO_LARGE,
Error::Other(err_code) => *err_code,
_ => unreachable!(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::DecodeError(reason) => write!(fmt, "{}", reason),
other => {
write!(fmt, "{}", unsafe {
let err = ffi::mdbx_strerror(other.to_err_code());
str::from_utf8_unchecked(CStr::from_ptr(err).to_bytes())
})
}
}
}
}
impl std::error::Error for Error {}
/// An MDBX result.
pub type Result<T> = result::Result<T, Error>;
pub fn mdbx_result(err_code: c_int) -> Result<bool> {
match err_code {
ffi::MDBX_SUCCESS => Ok(false),
ffi::MDBX_RESULT_TRUE => Ok(true),
other => Err(Error::from_err_code(other)),
}
}
#[macro_export]
macro_rules! mdbx_try_optional {
($expr:expr) => {{
match $expr {
Err(Error::NotFound | Error::NoData) => return Ok(None),
Err(e) => return Err(e),
Ok(v) => v,
}
}};
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_description() {
assert_eq!("Permission denied", Error::from_err_code(13).to_string());
assert_eq!(
"MDBX_INVALID: File is not an MDBX file",
Error::Invalid.to_string()
);
}
}

View File

@ -0,0 +1,193 @@
use bitflags::bitflags;
use ffi::*;
use libc::c_uint;
/// MDBX sync mode
#[derive(Clone, Copy, Debug)]
pub enum SyncMode {
/// Default robust and durable sync mode.
/// Metadata is written and flushed to disk after a data is written and flushed, which guarantees the integrity of the database in the event of a crash at any time.
Durable,
/// Don't sync the meta-page after commit.
///
/// Flush system buffers to disk only once per transaction commit, omit the metadata flush.
/// Defer that until the system flushes files to disk, or next non-read-only commit or [Environment::sync()](crate::Environment::sync).
/// Depending on the platform and hardware, with [SyncMode::NoMetaSync] you may get a doubling of write performance.
///
/// This trade-off maintains database integrity, but a system crash may undo the last committed transaction.
/// I.e. it preserves the ACI (atomicity, consistency, isolation) but not D (durability) database property.
NoMetaSync,
/// Don't sync anything but keep previous steady commits.
///
/// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system buffers to disk when committing a transaction.
/// But there is a huge difference in how are recycled the MVCC snapshots corresponding to previous "steady" transactions (see below).
///
/// With [crate::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use asynchronous mmap-flushes to disk.
/// Asynchronous mmap-flushes means that actually all writes will scheduled and performed by operation system on it own manner, i.e. unordered.
/// MDBX itself just notify operating system that it would be nice to write data to disk, but no more.
///
/// Depending on the platform and hardware, with [SyncMode::SafeNoSync] you may get a multiple increase of write performance, even 10 times or more.
///
/// In contrast to [SyncMode::UtterlyNoSync] mode, with [SyncMode::SafeNoSync] flag MDBX will keeps untouched pages within B-tree of the last transaction "steady" which was synced to disk completely.
/// This has big implications for both data durability and (unfortunately) performance:
///
/// A system crash can't corrupt the database, but you will lose the last transactions; because MDBX will rollback to last steady commit since it kept explicitly.
/// The last steady transaction makes an effect similar to "long-lived" read transaction since prevents reuse of pages freed by newer write transactions, thus the any data changes will be placed in newly allocated pages.
/// To avoid rapid database growth, the system will sync data and issue a steady commit-point to resume reuse pages, each time there is insufficient space and before increasing the size of the file on disk.
/// In other words, with [SyncMode::SafeNoSync] flag MDBX protects you from the whole database corruption, at the cost increasing database size and/or number of disk IOPs.
/// So, [SyncMode::SafeNoSync] flag could be used with [Environment::sync()](crate::Environment::sync) as alternatively for batch committing or nested transaction (in some cases).
///
/// The number and volume of of disk IOPs with [SyncMode::SafeNoSync] flag will exactly the as without any no-sync flags.
/// However, you should expect a larger process's work set and significantly worse a locality of reference, due to the more intensive allocation of previously unused pages and increase the size of the database.
SafeNoSync,
/// Don't sync anything and wipe previous steady commits.
///
/// Don't flush system buffers to disk when committing a transaction.
/// This optimization means a system crash can corrupt the database, if buffers are not yet flushed to disk.
/// Depending on the platform and hardware, with [SyncMode::UtterlyNoSync] you may get a multiple increase of write performance, even 100 times or more.
///
/// If the filesystem preserves write order (which is rare and never provided unless explicitly noted) and the [WriteMap](crate::WriteMap) and [EnvironmentFlags::liforeclaim] flags are not used,
/// then a system crash can't corrupt the database, but you can lose the last transactions, if at least one buffer is not yet flushed to disk.
/// The risk is governed by how often the system flushes dirty buffers to disk and how often [Environment::sync()](crate::Environment::sync) is called.
/// So, transactions exhibit ACI (atomicity, consistency, isolation) properties and only lose D (durability).
/// I.e. database integrity is maintained, but a system crash may undo the final transactions.
///
/// Otherwise, if the filesystem not preserves write order (which is typically) or [WriteMap](crate::WriteMap) or [EnvironmentFlags::liforeclaim] flags are used, you should expect the corrupted database after a system crash.
///
/// So, most important thing about [SyncMode::UtterlyNoSync]:
///
/// A system crash immediately after commit the write transaction high likely lead to database corruption.
/// Successful completion of [Environment::sync(force=true)](crate::Environment::sync) after one or more committed transactions guarantees consistency and durability.
/// BUT by committing two or more transactions you back database into a weak state, in which a system crash may lead to database corruption!
/// In case single transaction after [Environment::sync()](crate::Environment::sync), you may lose transaction itself, but not a whole database.
/// Nevertheless, [SyncMode::UtterlyNoSync] provides "weak" durability in case of an application crash (but no durability on system failure),
/// and therefore may be very useful in scenarios where data durability is not required over a system failure (e.g for short-lived data), or if you can take such risk.
UtterlyNoSync,
}
impl Default for SyncMode {
fn default() -> Self {
Self::Durable
}
}
#[derive(Clone, Copy, Debug)]
pub enum Mode {
ReadOnly,
ReadWrite { sync_mode: SyncMode },
}
impl Default for Mode {
fn default() -> Self {
Self::ReadWrite {
sync_mode: SyncMode::default(),
}
}
}
impl From<Mode> for EnvironmentFlags {
fn from(mode: Mode) -> Self {
Self {
mode,
..Default::default()
}
}
}
#[derive(Clone, Copy, Debug, Default)]
pub struct EnvironmentFlags {
pub no_sub_dir: bool,
pub exclusive: bool,
pub accede: bool,
pub mode: Mode,
pub no_rdahead: bool,
pub no_meminit: bool,
pub coalesce: bool,
pub liforeclaim: bool,
}
impl EnvironmentFlags {
pub(crate) fn make_flags(&self) -> ffi::MDBX_env_flags_t {
let mut flags = 0;
if self.no_sub_dir {
flags |= ffi::MDBX_NOSUBDIR;
}
if self.exclusive {
flags |= ffi::MDBX_EXCLUSIVE;
}
if self.accede {
flags |= ffi::MDBX_ACCEDE;
}
match self.mode {
Mode::ReadOnly => {
flags |= ffi::MDBX_RDONLY;
}
Mode::ReadWrite { sync_mode } => {
flags |= match sync_mode {
SyncMode::Durable => ffi::MDBX_SYNC_DURABLE,
SyncMode::NoMetaSync => ffi::MDBX_NOMETASYNC,
SyncMode::SafeNoSync => ffi::MDBX_SAFE_NOSYNC,
SyncMode::UtterlyNoSync => ffi::MDBX_UTTERLY_NOSYNC,
};
}
}
if self.no_rdahead {
flags |= ffi::MDBX_NORDAHEAD;
}
if self.no_meminit {
flags |= ffi::MDBX_NOMEMINIT;
}
if self.coalesce {
flags |= ffi::MDBX_COALESCE;
}
if self.liforeclaim {
flags |= ffi::MDBX_LIFORECLAIM;
}
flags |= ffi::MDBX_NOTLS;
flags
}
}
bitflags! {
#[doc="Database options."]
#[derive(Default)]
pub struct DatabaseFlags: c_uint {
const REVERSE_KEY = MDBX_REVERSEKEY as u32;
const DUP_SORT = MDBX_DUPSORT as u32;
const INTEGER_KEY = MDBX_INTEGERKEY as u32;
const DUP_FIXED = MDBX_DUPFIXED as u32;
const INTEGER_DUP = MDBX_INTEGERDUP as u32;
const REVERSE_DUP = MDBX_REVERSEDUP as u32;
const CREATE = MDBX_CREATE as u32;
const ACCEDE = MDBX_DB_ACCEDE as u32;
}
}
bitflags! {
#[doc="Write options."]
#[derive(Default)]
pub struct WriteFlags: c_uint {
const UPSERT = MDBX_UPSERT as u32;
const NO_OVERWRITE = MDBX_NOOVERWRITE as u32;
const NO_DUP_DATA = MDBX_NODUPDATA as u32;
const CURRENT = MDBX_CURRENT as u32;
const ALLDUPS = MDBX_ALLDUPS as u32;
const RESERVE = MDBX_RESERVE as u32;
const APPEND = MDBX_APPEND as u32;
const APPEND_DUP = MDBX_APPENDDUP as u32;
const MULTIPLE = MDBX_MULTIPLE as u32;
}
}

View File

@ -0,0 +1,64 @@
#![allow(clippy::type_complexity)]
#![doc = include_str!("../README.md")]
pub use crate::{
codec::*,
cursor::{Cursor, Iter, IterDup},
database::Database,
environment::{
Environment, EnvironmentBuilder, EnvironmentKind, Geometry, Info, NoWriteMap, Stat,
WriteMap,
},
error::{Error, Result},
flags::*,
transaction::{Transaction, TransactionKind, RO, RW},
};
mod codec;
mod cursor;
mod database;
mod environment;
mod error;
mod flags;
mod transaction;
#[cfg(test)]
mod test_utils {
use super::*;
use byteorder::{ByteOrder, LittleEndian};
use tempfile::tempdir;
type Environment = crate::Environment<NoWriteMap>;
/// Regression test for https://github.com/danburkert/lmdb-rs/issues/21.
/// This test reliably segfaults when run against lmbdb compiled with opt level -O3 and newer
/// GCC compilers.
#[test]
fn issue_21_regression() {
const HEIGHT_KEY: [u8; 1] = [0];
let dir = tempdir().unwrap();
let env = {
let mut builder = Environment::new();
builder.set_max_dbs(2);
builder.set_geometry(Geometry {
size: Some(1_000_000..1_000_000),
..Default::default()
});
builder.open(dir.path()).expect("open mdbx env")
};
for height in 0..1000 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, height);
let tx = env.begin_rw_txn().expect("begin_rw_txn");
let index = tx
.create_db(None, DatabaseFlags::DUP_SORT)
.expect("open index db");
tx.put(&index, &HEIGHT_KEY, &value, WriteFlags::empty())
.expect("tx.put");
tx.commit().expect("tx.commit");
}
}
}

View File

@ -0,0 +1,492 @@
use crate::{
database::Database,
environment::{Environment, EnvironmentKind, NoWriteMap, TxnManagerMessage, TxnPtr},
error::{mdbx_result, Result},
flags::{DatabaseFlags, WriteFlags},
Cursor, Error, Stat, TableObject,
};
use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE};
use indexmap::IndexSet;
use libc::{c_uint, c_void};
use parking_lot::Mutex;
use std::{
fmt,
fmt::Debug,
marker::PhantomData,
mem::size_of,
ptr, result, slice,
sync::{mpsc::sync_channel, Arc},
};
mod private {
use super::*;
pub trait Sealed {}
impl<'env> Sealed for RO {}
impl<'env> Sealed for RW {}
}
pub trait TransactionKind: private::Sealed + Debug + 'static {
#[doc(hidden)]
const ONLY_CLEAN: bool;
#[doc(hidden)]
const OPEN_FLAGS: MDBX_txn_flags_t;
}
#[derive(Debug)]
pub struct RO;
#[derive(Debug)]
pub struct RW;
impl TransactionKind for RO {
const ONLY_CLEAN: bool = true;
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_RDONLY;
}
impl TransactionKind for RW {
const ONLY_CLEAN: bool = false;
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_READWRITE;
}
/// An MDBX transaction.
///
/// All database operations require a transaction.
pub struct Transaction<'env, K, E>
where
K: TransactionKind,
E: EnvironmentKind,
{
txn: Arc<Mutex<*mut ffi::MDBX_txn>>,
primed_dbis: Mutex<IndexSet<ffi::MDBX_dbi>>,
committed: bool,
env: &'env Environment<E>,
_marker: PhantomData<fn(K)>,
}
impl<'env, K, E> Transaction<'env, K, E>
where
K: TransactionKind,
E: EnvironmentKind,
{
pub(crate) fn new(env: &'env Environment<E>) -> Result<Self> {
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
unsafe {
mdbx_result(ffi::mdbx_txn_begin_ex(
env.env(),
ptr::null_mut(),
K::OPEN_FLAGS,
&mut txn,
ptr::null_mut(),
))?;
Ok(Self::new_from_ptr(env, txn))
}
}
pub(crate) fn new_from_ptr(env: &'env Environment<E>, txn: *mut ffi::MDBX_txn) -> Self {
Self {
txn: Arc::new(Mutex::new(txn)),
primed_dbis: Mutex::new(IndexSet::new()),
committed: false,
env,
_marker: PhantomData,
}
}
/// Returns a raw pointer to the underlying MDBX transaction.
///
/// The caller **must** ensure that the pointer is not used after the
/// lifetime of the transaction.
pub(crate) fn txn_mutex(&self) -> Arc<Mutex<*mut ffi::MDBX_txn>> {
self.txn.clone()
}
pub fn txn(&self) -> *mut ffi::MDBX_txn {
*self.txn.lock()
}
/// Returns a raw pointer to the MDBX environment.
pub fn env(&self) -> &Environment<E> {
self.env
}
/// Returns the transaction id.
pub fn id(&self) -> u64 {
txn_execute(&self.txn, |txn| unsafe { ffi::mdbx_txn_id(txn) })
}
/// Gets an item from a database.
///
/// This function retrieves the data associated with the given key in the
/// database. If the database supports duplicate keys
/// ([DatabaseFlags::DUP_SORT]) then the first data item for the key will be
/// returned. Retrieval of other items requires the use of
/// [Cursor]. If the item is not in the database, then
/// [None] will be returned.
pub fn get<'txn, Key>(&'txn self, db: &Database<'txn>, key: &[u8]) -> Result<Option<Key>>
where
Key: TableObject<'txn>,
{
let key_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: key.len(),
iov_base: key.as_ptr() as *mut c_void,
};
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: 0,
iov_base: ptr::null_mut(),
};
txn_execute(&self.txn, |txn| unsafe {
match ffi::mdbx_get(txn, db.dbi(), &key_val, &mut data_val) {
ffi::MDBX_SUCCESS => Key::decode_val::<K>(txn, &data_val).map(Some),
ffi::MDBX_NOTFOUND => Ok(None),
err_code => Err(Error::from_err_code(err_code)),
}
})
}
/// Commits the transaction.
///
/// Any pending operations will be saved.
pub fn commit(self) -> Result<bool> {
self.commit_and_rebind_open_dbs().map(|v| v.0)
}
pub fn prime_for_permaopen(&self, db: Database<'_>) {
self.primed_dbis.lock().insert(db.dbi());
}
/// Commits the transaction and returns table handles permanently open for the lifetime of `Environment`.
pub fn commit_and_rebind_open_dbs(mut self) -> Result<(bool, Vec<Database<'env>>)> {
let txnlck = self.txn.lock();
let txn = *txnlck;
let result = if K::ONLY_CLEAN {
mdbx_result(unsafe { ffi::mdbx_txn_commit_ex(txn, ptr::null_mut()) })
} else {
let (sender, rx) = sync_channel(0);
self.env
.txn_manager
.as_ref()
.unwrap()
.send(TxnManagerMessage::Commit {
tx: TxnPtr(txn),
sender,
})
.unwrap();
rx.recv().unwrap()
};
self.committed = true;
result.map(|v| {
(
v,
self.primed_dbis
.lock()
.iter()
.map(|&dbi| Database::new_from_ptr(dbi))
.collect(),
)
})
}
/// Opens a handle to an MDBX database.
///
/// If `name` is [None], then the returned handle will be for the default database.
///
/// If `name` is not [None], then the returned handle will be for a named database. In this
/// case the environment must be configured to allow named databases through
/// [EnvironmentBuilder::set_max_dbs()](crate::EnvironmentBuilder::set_max_dbs).
///
/// The returned database handle may be shared among any transaction in the environment.
///
/// The database name may not contain the null character.
pub fn open_db<'txn>(&'txn self, name: Option<&str>) -> Result<Database<'txn>> {
Database::new(self, name, 0)
}
/// Gets the option flags for the given database in the transaction.
pub fn db_flags<'txn>(&'txn self, db: &Database<'txn>) -> Result<DatabaseFlags> {
let mut flags: c_uint = 0;
unsafe {
mdbx_result(txn_execute(&self.txn, |txn| {
ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut())
}))?;
}
Ok(DatabaseFlags::from_bits_truncate(flags))
}
/// Retrieves database statistics.
pub fn db_stat<'txn>(&'txn self, db: &Database<'txn>) -> Result<Stat> {
unsafe {
let mut stat = Stat::new();
mdbx_result(txn_execute(&self.txn, |txn| {
ffi::mdbx_dbi_stat(txn, db.dbi(), stat.mdb_stat(), size_of::<Stat>())
}))?;
Ok(stat)
}
}
/// Open a new cursor on the given database.
pub fn cursor<'txn>(&'txn self, db: &Database<'txn>) -> Result<Cursor<'txn, K>> {
Cursor::new(self, db)
}
}
pub(crate) fn txn_execute<F: FnOnce(*mut ffi::MDBX_txn) -> T, T>(
txn: &Mutex<*mut ffi::MDBX_txn>,
f: F,
) -> T {
let lck = txn.lock();
(f)(*lck)
}
impl<'env, E> Transaction<'env, RW, E>
where
E: EnvironmentKind,
{
fn open_db_with_flags<'txn>(
&'txn self,
name: Option<&str>,
flags: DatabaseFlags,
) -> Result<Database<'txn>> {
Database::new(self, name, flags.bits())
}
/// Opens a handle to an MDBX database, creating the database if necessary.
///
/// If the database is already created, the given option flags will be added to it.
///
/// If `name` is [None], then the returned handle will be for the default database.
///
/// If `name` is not [None], then the returned handle will be for a named database. In this
/// case the environment must be configured to allow named databases through
/// [EnvironmentBuilder::set_max_dbs()](crate::EnvironmentBuilder::set_max_dbs).
///
/// This function will fail with [Error::BadRslot](crate::error::Error::BadRslot) if called by a thread with an open
/// transaction.
pub fn create_db<'txn>(
&'txn self,
name: Option<&str>,
flags: DatabaseFlags,
) -> Result<Database<'txn>> {
self.open_db_with_flags(name, flags | DatabaseFlags::CREATE)
}
/// Stores an item into a database.
///
/// This function stores key/data pairs in the database. The default
/// behavior is to enter the new key/data pair, replacing any previously
/// existing key if duplicates are disallowed, or adding a duplicate data
/// item if duplicates are allowed ([DatabaseFlags::DUP_SORT]).
pub fn put<'txn>(
&'txn self,
db: &Database<'txn>,
key: impl AsRef<[u8]>,
data: impl AsRef<[u8]>,
flags: WriteFlags,
) -> Result<()> {
let key = key.as_ref();
let data = data.as_ref();
let key_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: key.len(),
iov_base: key.as_ptr() as *mut c_void,
};
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: data.len(),
iov_base: data.as_ptr() as *mut c_void,
};
mdbx_result(txn_execute(&self.txn, |txn| unsafe {
ffi::mdbx_put(txn, db.dbi(), &key_val, &mut data_val, flags.bits())
}))?;
Ok(())
}
/// Returns a buffer which can be used to write a value into the item at the
/// given key and with the given length. The buffer must be completely
/// filled by the caller.
pub fn reserve<'txn>(
&'txn self,
db: &Database<'txn>,
key: impl AsRef<[u8]>,
len: usize,
flags: WriteFlags,
) -> Result<&'txn mut [u8]> {
let key = key.as_ref();
let key_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: key.len(),
iov_base: key.as_ptr() as *mut c_void,
};
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: len,
iov_base: ptr::null_mut::<c_void>(),
};
unsafe {
mdbx_result(txn_execute(&self.txn, |txn| {
ffi::mdbx_put(
txn,
db.dbi(),
&key_val,
&mut data_val,
flags.bits() | ffi::MDBX_RESERVE,
)
}))?;
Ok(slice::from_raw_parts_mut(
data_val.iov_base as *mut u8,
data_val.iov_len,
))
}
}
/// Delete items from a database.
/// This function removes key/data pairs from the database.
///
/// The data parameter is NOT ignored regardless the database does support sorted duplicate data items or not.
/// If the data parameter is [Some] only the matching data item will be deleted.
/// Otherwise, if data parameter is [None], any/all value(s) for specified key will be deleted.
///
/// Returns `true` if the key/value pair was present.
pub fn del<'txn>(
&'txn self,
db: &Database<'txn>,
key: impl AsRef<[u8]>,
data: Option<&[u8]>,
) -> Result<bool> {
let key = key.as_ref();
let key_val: ffi::MDBX_val = ffi::MDBX_val {
iov_len: key.len(),
iov_base: key.as_ptr() as *mut c_void,
};
let data_val: Option<ffi::MDBX_val> = data.map(|data| ffi::MDBX_val {
iov_len: data.len(),
iov_base: data.as_ptr() as *mut c_void,
});
mdbx_result({
txn_execute(&self.txn, |txn| {
if let Some(d) = data_val {
unsafe { ffi::mdbx_del(txn, db.dbi(), &key_val, &d) }
} else {
unsafe { ffi::mdbx_del(txn, db.dbi(), &key_val, ptr::null()) }
}
})
})
.map(|_| true)
.or_else(|e| match e {
Error::NotFound => Ok(false),
other => Err(other),
})
}
/// Empties the given database. All items will be removed.
pub fn clear_db<'txn>(&'txn self, db: &Database<'txn>) -> Result<()> {
mdbx_result(txn_execute(&self.txn, |txn| unsafe {
ffi::mdbx_drop(txn, db.dbi(), false)
}))?;
Ok(())
}
/// Drops the database from the environment.
///
/// # Safety
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi BEFORE calling this function.
pub unsafe fn drop_db<'txn>(&'txn self, db: Database<'txn>) -> Result<()> {
mdbx_result(txn_execute(&self.txn, |txn| {
ffi::mdbx_drop(txn, db.dbi(), true)
}))?;
Ok(())
}
}
impl<'env, E> Transaction<'env, RO, E>
where
E: EnvironmentKind,
{
/// Closes the database handle.
///
/// # Safety
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi BEFORE calling this function.
pub unsafe fn close_db(&self, db: Database<'_>) -> Result<()> {
mdbx_result(ffi::mdbx_dbi_close(self.env.env(), db.dbi()))?;
Ok(())
}
}
impl<'env> Transaction<'env, RW, NoWriteMap> {
/// Begins a new nested transaction inside of this transaction.
pub fn begin_nested_txn(&mut self) -> Result<Transaction<'_, RW, NoWriteMap>> {
txn_execute(&self.txn, |txn| {
let (tx, rx) = sync_channel(0);
self.env
.txn_manager
.as_ref()
.unwrap()
.send(TxnManagerMessage::Begin {
parent: TxnPtr(txn),
flags: RW::OPEN_FLAGS,
sender: tx,
})
.unwrap();
rx.recv()
.unwrap()
.map(|ptr| Transaction::new_from_ptr(self.env, ptr.0))
})
}
}
impl<'env, K, E> fmt::Debug for Transaction<'env, K, E>
where
K: TransactionKind,
E: EnvironmentKind,
{
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
f.debug_struct("RoTransaction").finish()
}
}
impl<'env, K, E> Drop for Transaction<'env, K, E>
where
K: TransactionKind,
E: EnvironmentKind,
{
fn drop(&mut self) {
txn_execute(&self.txn, |txn| {
if !self.committed {
if K::ONLY_CLEAN {
unsafe {
ffi::mdbx_txn_abort(txn);
}
} else {
let (sender, rx) = sync_channel(0);
self.env
.txn_manager
.as_ref()
.unwrap()
.send(TxnManagerMessage::Abort {
tx: TxnPtr(txn),
sender,
})
.unwrap();
rx.recv().unwrap().unwrap();
}
}
})
}
}
unsafe impl<'env, K, E> Send for Transaction<'env, K, E>
where
K: TransactionKind,
E: EnvironmentKind,
{
}
unsafe impl<'env, K, E> Sync for Transaction<'env, K, E>
where
K: TransactionKind,
E: EnvironmentKind,
{
}

View File

@ -0,0 +1,440 @@
use libmdbx::*;
use std::borrow::Cow;
use tempfile::tempdir;
type Environment = libmdbx::Environment<NoWriteMap>;
#[test]
fn test_get() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(None, txn.cursor(&db).unwrap().first::<(), ()>().unwrap());
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val3", WriteFlags::empty()).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.next().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.prev().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.last().unwrap(), Some((*b"key3", *b"val3")));
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
assert_eq!(cursor.set_key(b"key3").unwrap(), Some((*b"key3", *b"val3")));
assert_eq!(
cursor.set_range(b"key2\0").unwrap(),
Some((*b"key3", *b"val3"))
);
}
#[test]
fn test_get_dup() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val3", WriteFlags::empty()).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.first_dup().unwrap(), Some(*b"val1"));
assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.next_nodup().unwrap(), Some((*b"key2", *b"val1")));
assert_eq!(cursor.next().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.prev().unwrap(), Some((*b"key2", *b"val1")));
assert_eq!(cursor.next_dup().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.next_dup().unwrap(), Some((*b"key2", *b"val3")));
assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None);
assert_eq!(cursor.prev_dup().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.last_dup().unwrap(), Some(*b"val3"));
assert_eq!(cursor.prev_nodup().unwrap(), Some((*b"key1", *b"val3")));
assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None);
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
assert_eq!(cursor.set(b"key2").unwrap(), Some(*b"val1"));
assert_eq!(
cursor.set_range(b"key1\0").unwrap(),
Some((*b"key2", *b"val1"))
);
assert_eq!(cursor.get_both(b"key1", b"val3").unwrap(), Some(*b"val3"));
assert_eq!(cursor.get_both_range::<()>(b"key1", b"val4").unwrap(), None);
assert_eq!(
cursor.get_both_range(b"key2", b"val").unwrap(),
Some(*b"val1")
);
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val3")));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val2")));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val1")));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.last().unwrap(), Some((*b"key1", *b"val3")));
}
#[test]
fn test_get_dupfixed() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn
.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED)
.unwrap();
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val4", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val5", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val6", WriteFlags::empty()).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.get_multiple().unwrap(), Some(*b"val1val2val3"));
assert_eq!(cursor.next_multiple::<(), ()>().unwrap(), None);
}
#[test]
fn test_iter() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let items: Vec<(_, _)> = vec![
(*b"key1", *b"val1"),
(*b"key2", *b"val2"),
(*b"key3", *b"val3"),
(*b"key5", *b"val5"),
];
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
for (key, data) in &items {
txn.put(&db, key, data, WriteFlags::empty()).unwrap();
}
assert!(!txn.commit().unwrap());
}
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
// Because Result implements FromIterator, we can collect the iterator
// of items of type Result<_, E> into a Result<Vec<_, E>> by specifying
// the collection type via the turbofish syntax.
assert_eq!(items, cursor.iter().collect::<Result<Vec<_>>>().unwrap());
// Alternately, we can collect it into an appropriately typed variable.
let retr: Result<Vec<_>> = cursor.iter_start().collect();
assert_eq!(items, retr.unwrap());
cursor.set::<()>(b"key2").unwrap();
assert_eq!(
items.clone().into_iter().skip(2).collect::<Vec<_>>(),
cursor.iter().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
items,
cursor.iter_start().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
items.clone().into_iter().skip(1).collect::<Vec<_>>(),
cursor
.iter_from(b"key2")
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
items.into_iter().skip(3).collect::<Vec<_>>(),
cursor
.iter_from(b"key4")
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
Vec::<((), ())>::new(),
cursor
.iter_from(b"key6")
.collect::<Result<Vec<_>>>()
.unwrap()
);
}
#[test]
fn test_iter_empty_database() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert!(cursor.iter::<(), ()>().next().is_none());
assert!(cursor.iter_start::<(), ()>().next().is_none());
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
}
#[test]
fn test_iter_empty_dup_database() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.commit().unwrap();
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert!(cursor.iter::<(), ()>().next().is_none());
assert!(cursor.iter_start::<(), ()>().next().is_none());
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
assert!(cursor.iter_dup::<(), ()>().flatten().next().is_none());
assert!(cursor.iter_dup_start::<(), ()>().flatten().next().is_none());
assert!(cursor
.iter_dup_from::<(), ()>(b"foo")
.flatten()
.next()
.is_none());
assert!(cursor.iter_dup_of::<(), ()>(b"foo").next().is_none());
}
#[test]
fn test_iter_dup() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.commit().unwrap();
let items: Vec<(_, _)> = [
(b"a", b"1"),
(b"a", b"2"),
(b"a", b"3"),
(b"b", b"1"),
(b"b", b"2"),
(b"b", b"3"),
(b"c", b"1"),
(b"c", b"2"),
(b"c", b"3"),
(b"e", b"1"),
(b"e", b"2"),
(b"e", b"3"),
]
.iter()
.map(|&(&k, &v)| (k, v))
.collect();
{
let txn = env.begin_rw_txn().unwrap();
for (key, data) in items.clone() {
let db = txn.open_db(None).unwrap();
txn.put(&db, key, data, WriteFlags::empty()).unwrap();
}
txn.commit().unwrap();
}
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(
items,
cursor
.iter_dup()
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
cursor.set::<()>(b"b").unwrap();
assert_eq!(
items.iter().copied().skip(4).collect::<Vec<_>>(),
cursor
.iter_dup()
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
items,
cursor
.iter_dup_start()
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
items
.iter()
.copied()
.into_iter()
.skip(3)
.collect::<Vec<_>>(),
cursor
.iter_dup_from(b"b")
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
items
.iter()
.copied()
.into_iter()
.skip(3)
.collect::<Vec<_>>(),
cursor
.iter_dup_from(b"ab")
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
items
.iter()
.copied()
.into_iter()
.skip(9)
.collect::<Vec<_>>(),
cursor
.iter_dup_from(b"d")
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
Vec::<([u8; 1], [u8; 1])>::new(),
cursor
.iter_dup_from(b"f")
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
items.iter().copied().skip(3).take(3).collect::<Vec<_>>(),
cursor
.iter_dup_of(b"b")
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(0, cursor.iter_dup_of::<(), ()>(b"foo").count());
}
#[test]
fn test_iter_del_get() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let items = vec![(*b"a", *b"1"), (*b"b", *b"2")];
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
assert_eq!(
txn.cursor(&db)
.unwrap()
.iter_dup_of::<(), ()>(b"a")
.collect::<Result<Vec<_>>>()
.unwrap()
.len(),
0
);
txn.commit().unwrap();
}
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
for (key, data) in &items {
txn.put(&db, key, data, WriteFlags::empty()).unwrap();
}
txn.commit().unwrap();
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(
items,
cursor
.iter_dup()
.flatten()
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(
items.iter().copied().take(1).collect::<Vec<(_, _)>>(),
cursor
.iter_dup_of(b"a")
.collect::<Result<Vec<_>>>()
.unwrap()
);
assert_eq!(cursor.set(b"a").unwrap(), Some(*b"1"));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(
cursor
.iter_dup_of::<(), ()>(b"a")
.collect::<Result<Vec<_>>>()
.unwrap()
.len(),
0
);
}
#[test]
fn test_put_del() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
cursor.put(b"key1", b"val1", WriteFlags::empty()).unwrap();
cursor.put(b"key2", b"val2", WriteFlags::empty()).unwrap();
cursor.put(b"key3", b"val3", WriteFlags::empty()).unwrap();
assert_eq!(
cursor.get_current().unwrap().unwrap(),
(
Cow::Borrowed(b"key3" as &[u8]),
Cow::Borrowed(b"val3" as &[u8])
)
);
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.get_current::<Vec<u8>, Vec<u8>>().unwrap(), None);
assert_eq!(
cursor.last().unwrap().unwrap(),
(
Cow::Borrowed(b"key2" as &[u8]),
Cow::Borrowed(b"val2" as &[u8])
)
);
}

View File

@ -0,0 +1,185 @@
use byteorder::{ByteOrder, LittleEndian};
use libmdbx::*;
use tempfile::tempdir;
type Environment = libmdbx::Environment<NoWriteMap>;
#[test]
fn test_open() {
let dir = tempdir().unwrap();
// opening non-existent env with read-only should fail
assert!(Environment::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.is_err());
// opening non-existent env should succeed
assert!(Environment::new().open(dir.path()).is_ok());
// opening env with read-only should succeed
assert!(Environment::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.is_ok());
}
#[test]
fn test_begin_txn() {
let dir = tempdir().unwrap();
{
// writable environment
let env = Environment::new().open(dir.path()).unwrap();
assert!(env.begin_rw_txn().is_ok());
assert!(env.begin_ro_txn().is_ok());
}
{
// read-only environment
let env = Environment::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.unwrap();
assert!(env.begin_rw_txn().is_err());
assert!(env.begin_ro_txn().is_ok());
}
}
#[test]
fn test_open_db() {
let dir = tempdir().unwrap();
let env = Environment::new().set_max_dbs(1).open(dir.path()).unwrap();
let txn = env.begin_ro_txn().unwrap();
assert!(txn.open_db(None).is_ok());
assert!(txn.open_db(Some("testdb")).is_err());
}
#[test]
fn test_create_db() {
let dir = tempdir().unwrap();
let env = Environment::new().set_max_dbs(11).open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
assert!(txn.open_db(Some("testdb")).is_err());
assert!(txn
.create_db(Some("testdb"), DatabaseFlags::empty())
.is_ok());
assert!(txn.open_db(Some("testdb")).is_ok())
}
#[test]
fn test_close_database() {
let dir = tempdir().unwrap();
let env = Environment::new().set_max_dbs(10).open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
txn.create_db(Some("db"), DatabaseFlags::empty()).unwrap();
txn.open_db(Some("db")).unwrap();
}
#[test]
fn test_sync() {
let dir = tempdir().unwrap();
{
let env = Environment::new().open(dir.path()).unwrap();
env.sync(true).unwrap();
}
{
let env = Environment::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.unwrap();
env.sync(true).unwrap_err();
}
}
#[test]
fn test_stat() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
// Stats should be empty initially.
let stat = env.stat().unwrap();
assert_eq!(stat.depth(), 0);
assert_eq!(stat.branch_pages(), 0);
assert_eq!(stat.leaf_pages(), 0);
assert_eq!(stat.overflow_pages(), 0);
assert_eq!(stat.entries(), 0);
// Write a few small values.
for i in 0..64 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, i);
let tx = env.begin_rw_txn().expect("begin_rw_txn");
tx.put(
&tx.open_db(None).unwrap(),
&value,
&value,
WriteFlags::default(),
)
.expect("tx.put");
tx.commit().expect("tx.commit");
}
// Stats should now reflect inserted values.
let stat = env.stat().unwrap();
assert_eq!(stat.depth(), 1);
assert_eq!(stat.branch_pages(), 0);
assert_eq!(stat.leaf_pages(), 1);
assert_eq!(stat.overflow_pages(), 0);
assert_eq!(stat.entries(), 64);
}
#[test]
fn test_info() {
let map_size = 1024 * 1024;
let dir = tempdir().unwrap();
let env = Environment::new()
.set_geometry(Geometry {
size: Some(map_size..),
..Default::default()
})
.open(dir.path())
.unwrap();
let info = env.info().unwrap();
assert_eq!(info.geometry().min(), map_size as u64);
// assert_eq!(info.last_pgno(), 1);
// assert_eq!(info.last_txnid(), 0);
assert_eq!(info.num_readers(), 0);
}
#[test]
fn test_freelist() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let mut freelist = env.freelist().unwrap();
assert_eq!(freelist, 0);
// Write a few small values.
for i in 0..64 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, i);
let tx = env.begin_rw_txn().expect("begin_rw_txn");
tx.put(
&tx.open_db(None).unwrap(),
&value,
&value,
WriteFlags::default(),
)
.expect("tx.put");
tx.commit().expect("tx.commit");
}
let tx = env.begin_rw_txn().expect("begin_rw_txn");
tx.clear_db(&tx.open_db(None).unwrap()).expect("clear");
tx.commit().expect("tx.commit");
// Freelist should not be empty after clear_db.
freelist = env.freelist().unwrap();
assert!(freelist > 0);
}

View File

@ -0,0 +1,408 @@
use libmdbx::*;
use std::{
borrow::Cow,
io::Write,
sync::{Arc, Barrier},
thread::{self, JoinHandle},
};
use tempfile::tempdir;
type Environment = libmdbx::Environment<NoWriteMap>;
#[test]
fn test_put_get_del() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(&db, b"key1").unwrap(), Some(*b"val1"));
assert_eq!(txn.get(&db, b"key2").unwrap(), Some(*b"val2"));
assert_eq!(txn.get(&db, b"key3").unwrap(), Some(*b"val3"));
assert_eq!(txn.get::<()>(&db, b"key").unwrap(), None);
txn.del(&db, b"key1", None).unwrap();
assert_eq!(txn.get::<()>(&db, b"key1").unwrap(), None);
}
#[test]
fn test_put_get_del_multi() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val3", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
{
let mut cur = txn.cursor(&db).unwrap();
let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1");
let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::<Vec<_>>();
assert_eq!(vals, vec![*b"val1", *b"val2", *b"val3"]);
}
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.del(&db, b"key1", Some(b"val2")).unwrap();
txn.del(&db, b"key2", None).unwrap();
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
{
let mut cur = txn.cursor(&db).unwrap();
let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1");
let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::<Vec<_>>();
assert_eq!(vals, vec![*b"val1", *b"val3"]);
let iter = cur.iter_dup_of::<(), ()>(b"key2");
assert_eq!(0, iter.count());
}
txn.commit().unwrap();
}
#[test]
fn test_put_get_del_empty_key() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, Default::default()).unwrap();
txn.put(&db, b"", b"hello", WriteFlags::empty()).unwrap();
assert_eq!(txn.get(&db, b"").unwrap(), Some(*b"hello"));
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(&db, b"").unwrap(), Some(*b"hello"));
txn.put(&db, b"", b"", WriteFlags::empty()).unwrap();
assert_eq!(txn.get(&db, b"").unwrap(), Some(*b""));
}
#[test]
fn test_reserve() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
{
let mut writer = txn.reserve(&db, b"key1", 4, WriteFlags::empty()).unwrap();
writer.write_all(b"val1").unwrap();
}
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(&db, b"key1").unwrap(), Some(*b"val1"));
assert_eq!(txn.get::<()>(&db, b"key").unwrap(), None);
txn.del(&db, b"key1", None).unwrap();
assert_eq!(txn.get::<()>(&db, b"key1").unwrap(), None);
}
#[test]
fn test_nested_txn() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let mut txn = env.begin_rw_txn().unwrap();
txn.put(
&txn.open_db(None).unwrap(),
b"key1",
b"val1",
WriteFlags::empty(),
)
.unwrap();
{
let nested = txn.begin_nested_txn().unwrap();
let db = nested.open_db(None).unwrap();
nested
.put(&db, b"key2", b"val2", WriteFlags::empty())
.unwrap();
assert_eq!(nested.get(&db, b"key1").unwrap(), Some(*b"val1"));
assert_eq!(nested.get(&db, b"key2").unwrap(), Some(*b"val2"));
}
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(&db, b"key1").unwrap(), Some(*b"val1"));
assert_eq!(txn.get::<()>(&db, b"key2").unwrap(), None);
}
#[test]
fn test_clear_db() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
{
let txn = env.begin_rw_txn().unwrap();
txn.put(
&txn.open_db(None).unwrap(),
b"key",
b"val",
WriteFlags::empty(),
)
.unwrap();
assert!(!txn.commit().unwrap());
}
{
let txn = env.begin_rw_txn().unwrap();
txn.clear_db(&txn.open_db(None).unwrap()).unwrap();
assert!(!txn.commit().unwrap());
}
let txn = env.begin_ro_txn().unwrap();
assert_eq!(
txn.get::<()>(&txn.open_db(None).unwrap(), b"key").unwrap(),
None
);
}
#[test]
fn test_drop_db() {
let dir = tempdir().unwrap();
{
let env = Environment::new().set_max_dbs(2).open(dir.path()).unwrap();
{
let txn = env.begin_rw_txn().unwrap();
txn.put(
&txn.create_db(Some("test"), DatabaseFlags::empty()).unwrap(),
b"key",
b"val",
WriteFlags::empty(),
)
.unwrap();
// Workaround for MDBX dbi drop issue
txn.create_db(Some("canary"), DatabaseFlags::empty())
.unwrap();
assert!(!txn.commit().unwrap());
}
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(Some("test")).unwrap();
unsafe {
txn.drop_db(db).unwrap();
}
assert!(matches!(
txn.open_db(Some("test")).unwrap_err(),
Error::NotFound
));
assert!(!txn.commit().unwrap());
}
}
let env = Environment::new().set_max_dbs(2).open(dir.path()).unwrap();
let txn = env.begin_ro_txn().unwrap();
txn.open_db(Some("canary")).unwrap();
assert!(matches!(
txn.open_db(Some("test")).unwrap_err(),
Error::NotFound
));
}
#[test]
fn test_concurrent_readers_single_writer() {
let dir = tempdir().unwrap();
let env: Arc<Environment> = Arc::new(Environment::new().open(dir.path()).unwrap());
let n = 10usize; // Number of concurrent readers
let barrier = Arc::new(Barrier::new(n + 1));
let mut threads: Vec<JoinHandle<bool>> = Vec::with_capacity(n);
let key = b"key";
let val = b"val";
for _ in 0..n {
let reader_env = env.clone();
let reader_barrier = barrier.clone();
threads.push(thread::spawn(move || {
{
let txn = reader_env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get::<()>(&db, key).unwrap(), None);
}
reader_barrier.wait();
reader_barrier.wait();
{
let txn = reader_env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.get::<[u8; 3]>(&db, key).unwrap().unwrap() == *val
}
}));
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
println!("wait2");
barrier.wait();
txn.put(&db, key, val, WriteFlags::empty()).unwrap();
txn.commit().unwrap();
println!("wait1");
barrier.wait();
assert!(threads.into_iter().all(|b| b.join().unwrap()))
}
#[test]
fn test_concurrent_writers() {
let dir = tempdir().unwrap();
let env = Arc::new(Environment::new().open(dir.path()).unwrap());
let n = 10usize; // Number of concurrent writers
let mut threads: Vec<JoinHandle<bool>> = Vec::with_capacity(n);
let key = "key";
let val = "val";
for i in 0..n {
let writer_env = env.clone();
threads.push(thread::spawn(move || {
let txn = writer_env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(
&db,
&format!("{}{}", key, i),
&format!("{}{}", val, i),
WriteFlags::empty(),
)
.unwrap();
txn.commit().is_ok()
}));
}
assert!(threads.into_iter().all(|b| b.join().unwrap()));
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
for i in 0..n {
assert_eq!(
Cow::<Vec<u8>>::Owned(format!("{}{}", val, i).into_bytes()),
txn.get(&db, format!("{}{}", key, i).as_bytes())
.unwrap()
.unwrap()
);
}
}
#[test]
fn test_stat() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let stat = txn.db_stat(&db).unwrap();
assert_eq!(stat.entries(), 3);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.del(&db, b"key1", None).unwrap();
txn.del(&db, b"key2", None).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let stat = txn.db_stat(&db).unwrap();
assert_eq!(stat.entries(), 1);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(&db, b"key4", b"val4", WriteFlags::empty()).unwrap();
txn.put(&db, b"key5", b"val5", WriteFlags::empty()).unwrap();
txn.put(&db, b"key6", b"val6", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let stat = txn.db_stat(&db).unwrap();
assert_eq!(stat.entries(), 4);
}
}
#[test]
fn test_stat_dupsort() {
let dir = tempdir().unwrap();
let env = Environment::new().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key2", b"val3", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap();
assert_eq!(stat.entries(), 9);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.del(&db, b"key1", Some(b"val2")).unwrap();
txn.del(&db, b"key2", None).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap();
assert_eq!(stat.entries(), 5);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(&db, b"key4", b"val1", WriteFlags::empty()).unwrap();
txn.put(&db, b"key4", b"val2", WriteFlags::empty()).unwrap();
txn.put(&db, b"key4", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap();
assert_eq!(stat.entries(), 8);
}
}