mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
chore(libmbx): fmt, clippy and deny list updated (#134)
* feat(db): Add mdbx-rs apache licenced code 55e234 * feat(db): replace mdbx with reth-mdbx, metadata changes * chore(db): bump mdbx-sys to 0.12.1 * remove libmdbx from cargo deny * cargo fmt * cargo clippy * one more clippy error
This commit is contained in:
@ -3,11 +3,11 @@
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
use crate::utils::*;
|
use crate::utils::*;
|
||||||
use reth_libmdbx::{self, TransactionKind, WriteFlags, RO, RW};
|
|
||||||
use reth_interfaces::db::{
|
use reth_interfaces::db::{
|
||||||
DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, DupSort, DupWalker, Encode, Error, Table,
|
DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, DupSort, DupWalker, Encode, Error, Table,
|
||||||
Walker,
|
Walker,
|
||||||
};
|
};
|
||||||
|
use reth_libmdbx::{self, TransactionKind, WriteFlags, RO, RW};
|
||||||
|
|
||||||
/// Alias type for a `(key, value)` result coming from a cursor.
|
/// Alias type for a `(key, value)` result coming from a cursor.
|
||||||
pub type PairResult<T> = Result<Option<(<T as Table>::Key, <T as Table>::Value)>, Error>;
|
pub type PairResult<T> = Result<Option<(<T as Table>::Key, <T as Table>::Value)>, Error>;
|
||||||
|
|||||||
@ -1,14 +1,14 @@
|
|||||||
//! Module that interacts with MDBX.
|
//! Module that interacts with MDBX.
|
||||||
|
|
||||||
use crate::utils::default_page_size;
|
use crate::utils::default_page_size;
|
||||||
use reth_libmdbx::{
|
|
||||||
DatabaseFlags, Environment, EnvironmentFlags, EnvironmentKind, Geometry, Mode, PageSize,
|
|
||||||
SyncMode, RO, RW,
|
|
||||||
};
|
|
||||||
use reth_interfaces::db::{
|
use reth_interfaces::db::{
|
||||||
tables::{TableType, TABLES},
|
tables::{TableType, TABLES},
|
||||||
Database, DatabaseGAT, Error,
|
Database, DatabaseGAT, Error,
|
||||||
};
|
};
|
||||||
|
use reth_libmdbx::{
|
||||||
|
DatabaseFlags, Environment, EnvironmentFlags, EnvironmentKind, Geometry, Mode, PageSize,
|
||||||
|
SyncMode, RO, RW,
|
||||||
|
};
|
||||||
use std::{ops::Deref, path::Path};
|
use std::{ops::Deref, path::Path};
|
||||||
|
|
||||||
pub mod cursor;
|
pub mod cursor;
|
||||||
@ -134,11 +134,11 @@ pub mod test_utils {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{test_utils, Env, EnvKind};
|
use super::{test_utils, Env, EnvKind};
|
||||||
use reth_libmdbx::{NoWriteMap, WriteMap};
|
|
||||||
use reth_interfaces::db::{
|
use reth_interfaces::db::{
|
||||||
tables::{Headers, PlainAccountState, PlainStorageState},
|
tables::{Headers, PlainAccountState, PlainStorageState},
|
||||||
Database, DbCursorRO, DbDupCursorRO, DbTx, DbTxMut,
|
Database, DbCursorRO, DbDupCursorRO, DbTx, DbTxMut,
|
||||||
};
|
};
|
||||||
|
use reth_libmdbx::{NoWriteMap, WriteMap};
|
||||||
use reth_primitives::{Account, Address, Header, StorageEntry, H256, U256};
|
use reth_primitives::{Account, Address, Header, StorageEntry, H256, U256};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|||||||
@ -1,8 +1,8 @@
|
|||||||
//! Transaction wrapper for libmdbx-sys.
|
//! Transaction wrapper for libmdbx-sys.
|
||||||
|
|
||||||
use crate::{kv::cursor::Cursor, utils::decode_one};
|
use crate::{kv::cursor::Cursor, utils::decode_one};
|
||||||
use reth_libmdbx::{EnvironmentKind, Transaction, TransactionKind, WriteFlags, RW};
|
|
||||||
use reth_interfaces::db::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT, DupSort, Encode, Error, Table};
|
use reth_interfaces::db::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT, DupSort, Encode, Error, Table};
|
||||||
|
use reth_libmdbx::{EnvironmentKind, Transaction, TransactionKind, WriteFlags, RW};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
/// Wrapper for the libmdbx transaction.
|
/// Wrapper for the libmdbx transaction.
|
||||||
|
|||||||
@ -19,16 +19,14 @@ fn bench_get_seq_iter(c: &mut Criterion) {
|
|||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
|
|
||||||
for (key_len, data_len) in cursor
|
for (key_len, data_len) in
|
||||||
.iter::<ObjectLength, ObjectLength>()
|
cursor.iter::<ObjectLength, ObjectLength>().map(Result::unwrap)
|
||||||
.map(Result::unwrap)
|
|
||||||
{
|
{
|
||||||
i = i + *key_len + *data_len;
|
i = i + *key_len + *data_len;
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
for (key_len, data_len) in cursor
|
for (key_len, data_len) in
|
||||||
.iter::<ObjectLength, ObjectLength>()
|
cursor.iter::<ObjectLength, ObjectLength>().filter_map(Result::ok)
|
||||||
.filter_map(Result::ok)
|
|
||||||
{
|
{
|
||||||
i = i + *key_len + *data_len;
|
i = i + *key_len + *data_len;
|
||||||
count += 1;
|
count += 1;
|
||||||
@ -64,9 +62,7 @@ fn bench_get_seq_cursor(c: &mut Criterion) {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.iter::<ObjectLength, ObjectLength>()
|
.iter::<ObjectLength, ObjectLength>()
|
||||||
.map(Result::unwrap)
|
.map(Result::unwrap)
|
||||||
.fold((0, 0), |(i, count), (key, val)| {
|
.fold((0, 0), |(i, count), (key, val)| (i + *key + *val, count + 1));
|
||||||
(i + *key + *val, count + 1)
|
|
||||||
});
|
|
||||||
|
|
||||||
black_box(i);
|
black_box(i);
|
||||||
assert_eq!(count, n);
|
assert_eq!(count, n);
|
||||||
@ -83,14 +79,8 @@ fn bench_get_seq_raw(c: &mut Criterion) {
|
|||||||
let _txn = env.begin_ro_txn().unwrap();
|
let _txn = env.begin_ro_txn().unwrap();
|
||||||
let txn = _txn.txn();
|
let txn = _txn.txn();
|
||||||
|
|
||||||
let mut key = MDBX_val {
|
let mut key = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_len: 0,
|
let mut data = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let mut data = MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let mut cursor: *mut MDBX_cursor = ptr::null_mut();
|
let mut cursor: *mut MDBX_cursor = ptr::null_mut();
|
||||||
|
|
||||||
c.bench_function("bench_get_seq_raw", |b| {
|
c.bench_function("bench_get_seq_raw", |b| {
|
||||||
@ -111,10 +101,5 @@ fn bench_get_seq_raw(c: &mut Criterion) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(
|
criterion_group!(benches, bench_get_seq_iter, bench_get_seq_cursor, bench_get_seq_raw);
|
||||||
benches,
|
|
||||||
bench_get_seq_iter,
|
|
||||||
bench_get_seq_cursor,
|
|
||||||
bench_get_seq_raw
|
|
||||||
);
|
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
|||||||
@ -3,9 +3,9 @@ mod utils;
|
|||||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||||
use ffi::*;
|
use ffi::*;
|
||||||
use libc::size_t;
|
use libc::size_t;
|
||||||
use reth_libmdbx::{ObjectLength, WriteFlags};
|
|
||||||
use rand::{prelude::SliceRandom, SeedableRng};
|
use rand::{prelude::SliceRandom, SeedableRng};
|
||||||
use rand_xorshift::XorShiftRng;
|
use rand_xorshift::XorShiftRng;
|
||||||
|
use reth_libmdbx::{ObjectLength, WriteFlags};
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use utils::*;
|
use utils::*;
|
||||||
|
|
||||||
@ -22,10 +22,7 @@ fn bench_get_rand(c: &mut Criterion) {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut i = 0usize;
|
let mut i = 0usize;
|
||||||
for key in &keys {
|
for key in &keys {
|
||||||
i += *txn
|
i += *txn.get::<ObjectLength>(&db, key.as_bytes()).unwrap().unwrap();
|
||||||
.get::<ObjectLength>(&db, key.as_bytes())
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
black_box(i);
|
black_box(i);
|
||||||
})
|
})
|
||||||
@ -44,14 +41,8 @@ fn bench_get_rand_raw(c: &mut Criterion) {
|
|||||||
let dbi = db.dbi();
|
let dbi = db.dbi();
|
||||||
let txn = _txn.txn();
|
let txn = _txn.txn();
|
||||||
|
|
||||||
let mut key_val: MDBX_val = MDBX_val {
|
let mut key_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_len: 0,
|
let mut data_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let mut data_val: MDBX_val = MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
|
|
||||||
c.bench_function("bench_get_rand_raw", |b| {
|
c.bench_function("bench_get_rand_raw", |b| {
|
||||||
b.iter(|| unsafe {
|
b.iter(|| unsafe {
|
||||||
@ -101,14 +92,8 @@ fn bench_put_rand_raw(c: &mut Criterion) {
|
|||||||
let dbi = _env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi();
|
let dbi = _env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi();
|
||||||
let env = _env.env();
|
let env = _env.env();
|
||||||
|
|
||||||
let mut key_val: MDBX_val = MDBX_val {
|
let mut key_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_len: 0,
|
let mut data_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let mut data_val: MDBX_val = MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
|
|
||||||
c.bench_function("bench_put_rand_raw", |b| {
|
c.bench_function("bench_put_rand_raw", |b| {
|
||||||
b.iter(|| unsafe {
|
b.iter(|| unsafe {
|
||||||
@ -130,11 +115,5 @@ fn bench_put_rand_raw(c: &mut Criterion) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(
|
criterion_group!(benches, bench_get_rand, bench_get_rand_raw, bench_put_rand, bench_put_rand_raw);
|
||||||
benches,
|
|
||||||
bench_get_rand,
|
|
||||||
bench_get_rand_raw,
|
|
||||||
bench_put_rand,
|
|
||||||
bench_put_rand_raw
|
|
||||||
);
|
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
|||||||
@ -17,8 +17,7 @@ pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment<NoWriteMap>) {
|
|||||||
let txn = env.begin_rw_txn().unwrap();
|
let txn = env.begin_rw_txn().unwrap();
|
||||||
let db = txn.open_db(None).unwrap();
|
let db = txn.open_db(None).unwrap();
|
||||||
for i in 0..num_rows {
|
for i in 0..num_rows {
|
||||||
txn.put(&db, &get_key(i), &get_data(i), WriteFlags::empty())
|
txn.put(&db, &get_key(i), &get_data(i), WriteFlags::empty()).unwrap();
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
txn.commit().unwrap();
|
txn.commit().unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,42 +7,42 @@ struct Callbacks;
|
|||||||
impl ParseCallbacks for Callbacks {
|
impl ParseCallbacks for Callbacks {
|
||||||
fn int_macro(&self, name: &str, _value: i64) -> Option<IntKind> {
|
fn int_macro(&self, name: &str, _value: i64) -> Option<IntKind> {
|
||||||
match name {
|
match name {
|
||||||
"MDBX_SUCCESS"
|
"MDBX_SUCCESS" |
|
||||||
| "MDBX_KEYEXIST"
|
"MDBX_KEYEXIST" |
|
||||||
| "MDBX_NOTFOUND"
|
"MDBX_NOTFOUND" |
|
||||||
| "MDBX_PAGE_NOTFOUND"
|
"MDBX_PAGE_NOTFOUND" |
|
||||||
| "MDBX_CORRUPTED"
|
"MDBX_CORRUPTED" |
|
||||||
| "MDBX_PANIC"
|
"MDBX_PANIC" |
|
||||||
| "MDBX_VERSION_MISMATCH"
|
"MDBX_VERSION_MISMATCH" |
|
||||||
| "MDBX_INVALID"
|
"MDBX_INVALID" |
|
||||||
| "MDBX_MAP_FULL"
|
"MDBX_MAP_FULL" |
|
||||||
| "MDBX_DBS_FULL"
|
"MDBX_DBS_FULL" |
|
||||||
| "MDBX_READERS_FULL"
|
"MDBX_READERS_FULL" |
|
||||||
| "MDBX_TLS_FULL"
|
"MDBX_TLS_FULL" |
|
||||||
| "MDBX_TXN_FULL"
|
"MDBX_TXN_FULL" |
|
||||||
| "MDBX_CURSOR_FULL"
|
"MDBX_CURSOR_FULL" |
|
||||||
| "MDBX_PAGE_FULL"
|
"MDBX_PAGE_FULL" |
|
||||||
| "MDBX_MAP_RESIZED"
|
"MDBX_MAP_RESIZED" |
|
||||||
| "MDBX_INCOMPATIBLE"
|
"MDBX_INCOMPATIBLE" |
|
||||||
| "MDBX_BAD_RSLOT"
|
"MDBX_BAD_RSLOT" |
|
||||||
| "MDBX_BAD_TXN"
|
"MDBX_BAD_TXN" |
|
||||||
| "MDBX_BAD_VALSIZE"
|
"MDBX_BAD_VALSIZE" |
|
||||||
| "MDBX_BAD_DBI"
|
"MDBX_BAD_DBI" |
|
||||||
| "MDBX_LOG_DONTCHANGE"
|
"MDBX_LOG_DONTCHANGE" |
|
||||||
| "MDBX_DBG_DONTCHANGE"
|
"MDBX_DBG_DONTCHANGE" |
|
||||||
| "MDBX_RESULT_TRUE"
|
"MDBX_RESULT_TRUE" |
|
||||||
| "MDBX_UNABLE_EXTEND_MAPSIZE"
|
"MDBX_UNABLE_EXTEND_MAPSIZE" |
|
||||||
| "MDBX_PROBLEM"
|
"MDBX_PROBLEM" |
|
||||||
| "MDBX_LAST_LMDB_ERRCODE"
|
"MDBX_LAST_LMDB_ERRCODE" |
|
||||||
| "MDBX_BUSY"
|
"MDBX_BUSY" |
|
||||||
| "MDBX_EMULTIVAL"
|
"MDBX_EMULTIVAL" |
|
||||||
| "MDBX_EBADSIGN"
|
"MDBX_EBADSIGN" |
|
||||||
| "MDBX_WANNA_RECOVERY"
|
"MDBX_WANNA_RECOVERY" |
|
||||||
| "MDBX_EKEYMISMATCH"
|
"MDBX_EKEYMISMATCH" |
|
||||||
| "MDBX_TOO_LARGE"
|
"MDBX_TOO_LARGE" |
|
||||||
| "MDBX_THREAD_MISMATCH"
|
"MDBX_THREAD_MISMATCH" |
|
||||||
| "MDBX_TXN_OVERLAPPING"
|
"MDBX_TXN_OVERLAPPING" |
|
||||||
| "MDBX_LAST_ERRCODE" => Some(IntKind::Int),
|
"MDBX_LAST_ERRCODE" => Some(IntKind::Int),
|
||||||
_ => Some(IntKind::UInt),
|
_ => Some(IntKind::UInt),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,9 +70,7 @@ fn main() {
|
|||||||
.generate()
|
.generate()
|
||||||
.expect("Unable to generate bindings");
|
.expect("Unable to generate bindings");
|
||||||
|
|
||||||
bindings
|
bindings.write_to_file(out_path.join("bindings.rs")).expect("Couldn't write bindings!");
|
||||||
.write_to_file(out_path.join("bindings.rs"))
|
|
||||||
.expect("Couldn't write bindings!");
|
|
||||||
|
|
||||||
let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap());
|
let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap());
|
||||||
mdbx.push("libmdbx");
|
mdbx.push("libmdbx");
|
||||||
|
|||||||
@ -37,11 +37,7 @@ impl<'tx> TableObject<'tx> for Cow<'tx, [u8]> {
|
|||||||
|
|
||||||
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
|
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
|
||||||
|
|
||||||
Ok(if is_dirty {
|
Ok(if is_dirty { Cow::Owned(s.to_vec()) } else { Cow::Borrowed(s) })
|
||||||
Cow::Owned(s.to_vec())
|
|
||||||
} else {
|
|
||||||
Cow::Borrowed(s)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,9 +102,7 @@ impl<'tx, const LEN: usize> TableObject<'tx> for [u8; LEN] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if data_val.len() != LEN {
|
if data_val.len() != LEN {
|
||||||
return Err(Error::DecodeError(Box::new(InvalidSize::<LEN> {
|
return Err(Error::DecodeError(Box::new(InvalidSize::<LEN> { got: data_val.len() })))
|
||||||
got: data_val.len(),
|
|
||||||
})));
|
|
||||||
}
|
}
|
||||||
let mut a = [0; LEN];
|
let mut a = [0; LEN];
|
||||||
a[..].copy_from_slice(data_val);
|
a[..].copy_from_slice(data_val);
|
||||||
|
|||||||
@ -38,15 +38,11 @@ where
|
|||||||
|
|
||||||
let txn = txn.txn_mutex();
|
let txn = txn.txn_mutex();
|
||||||
unsafe {
|
unsafe {
|
||||||
mdbx_result(txn_execute(&*txn, |txn| {
|
mdbx_result(txn_execute(&txn, |txn| {
|
||||||
ffi::mdbx_cursor_open(txn, db.dbi(), &mut cursor)
|
ffi::mdbx_cursor_open(txn, db.dbi(), &mut cursor)
|
||||||
}))?;
|
}))?;
|
||||||
}
|
}
|
||||||
Ok(Self {
|
Ok(Self { txn, cursor, _marker: PhantomData })
|
||||||
txn,
|
|
||||||
cursor,
|
|
||||||
_marker: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_at_position(other: &Self) -> Result<Self> {
|
fn new_at_position(other: &Self) -> Result<Self> {
|
||||||
@ -55,11 +51,7 @@ where
|
|||||||
|
|
||||||
let res = ffi::mdbx_cursor_copy(other.cursor(), cursor);
|
let res = ffi::mdbx_cursor_copy(other.cursor(), cursor);
|
||||||
|
|
||||||
let s = Self {
|
let s = Self { txn: other.txn.clone(), cursor, _marker: PhantomData };
|
||||||
txn: other.txn.clone(),
|
|
||||||
cursor,
|
|
||||||
_marker: PhantomData,
|
|
||||||
};
|
|
||||||
|
|
||||||
mdbx_result(res)?;
|
mdbx_result(res)?;
|
||||||
|
|
||||||
@ -92,7 +84,7 @@ where
|
|||||||
let mut data_val = slice_to_val(data);
|
let mut data_val = slice_to_val(data);
|
||||||
let key_ptr = key_val.iov_base;
|
let key_ptr = key_val.iov_base;
|
||||||
let data_ptr = data_val.iov_base;
|
let data_ptr = data_val.iov_base;
|
||||||
txn_execute(&*self.txn, |txn| {
|
txn_execute(&self.txn, |txn| {
|
||||||
let v = mdbx_result(ffi::mdbx_cursor_get(
|
let v = mdbx_result(ffi::mdbx_cursor_get(
|
||||||
self.cursor,
|
self.cursor,
|
||||||
&mut key_val,
|
&mut key_val,
|
||||||
@ -168,7 +160,8 @@ where
|
|||||||
self.get_value(Some(k), Some(v), MDBX_GET_BOTH)
|
self.get_value(Some(k), Some(v), MDBX_GET_BOTH)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [DatabaseFlags::DUP_SORT]-only: Position at given key and at first data greater than or equal to specified data.
|
/// [DatabaseFlags::DUP_SORT]-only: Position at given key and at first data greater than or
|
||||||
|
/// equal to specified data.
|
||||||
pub fn get_both_range<Value>(&mut self, k: &[u8], v: &[u8]) -> Result<Option<Value>>
|
pub fn get_both_range<Value>(&mut self, k: &[u8], v: &[u8]) -> Result<Option<Value>>
|
||||||
where
|
where
|
||||||
Value: TableObject<'txn>,
|
Value: TableObject<'txn>,
|
||||||
@ -230,7 +223,8 @@ where
|
|||||||
self.get_full(None, None, MDBX_NEXT_DUP)
|
self.get_full(None, None, MDBX_NEXT_DUP)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [DatabaseFlags::DUP_FIXED]-only: Return up to a page of duplicate data items from next cursor position. Move cursor to prepare for MDBX_NEXT_MULTIPLE.
|
/// [DatabaseFlags::DUP_FIXED]-only: Return up to a page of duplicate data items from next
|
||||||
|
/// cursor position. Move cursor to prepare for MDBX_NEXT_MULTIPLE.
|
||||||
pub fn next_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
|
pub fn next_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
|
||||||
where
|
where
|
||||||
Key: TableObject<'txn>,
|
Key: TableObject<'txn>,
|
||||||
@ -301,7 +295,8 @@ where
|
|||||||
self.get_full(Some(key), None, MDBX_SET_RANGE)
|
self.get_full(Some(key), None, MDBX_SET_RANGE)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [DatabaseFlags::DUP_FIXED]-only: Position at previous page and return up to a page of duplicate data items.
|
/// [DatabaseFlags::DUP_FIXED]-only: Position at previous page and return up to a page of
|
||||||
|
/// duplicate data items.
|
||||||
pub fn prev_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
|
pub fn prev_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
|
||||||
where
|
where
|
||||||
Key: TableObject<'txn>,
|
Key: TableObject<'txn>,
|
||||||
@ -310,12 +305,15 @@ where
|
|||||||
self.get_full(None, None, MDBX_PREV_MULTIPLE)
|
self.get_full(None, None, MDBX_PREV_MULTIPLE)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Position at first key-value pair greater than or equal to specified, return both key and data, and the return code depends on a exact match.
|
/// Position at first key-value pair greater than or equal to specified, return both key and
|
||||||
|
/// data, and the return code depends on a exact match.
|
||||||
///
|
///
|
||||||
/// For non DupSort-ed collections this works the same as [Self::set_range()], but returns [false] if key found exactly and [true] if greater key was found.
|
/// For non DupSort-ed collections this works the same as [Self::set_range()], but returns
|
||||||
|
/// [false] if key found exactly and [true] if greater key was found.
|
||||||
///
|
///
|
||||||
/// For DupSort-ed a data value is taken into account for duplicates, i.e. for a pairs/tuples of a key and an each data value of duplicates.
|
/// For DupSort-ed a data value is taken into account for duplicates, i.e. for a pairs/tuples of
|
||||||
/// Returns [false] if key-value pair found exactly and [true] if the next pair was returned.
|
/// a key and an each data value of duplicates. Returns [false] if key-value pair found
|
||||||
|
/// exactly and [true] if the next pair was returned.
|
||||||
pub fn set_lowerbound<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(bool, Key, Value)>>
|
pub fn set_lowerbound<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(bool, Key, Value)>>
|
||||||
where
|
where
|
||||||
Key: TableObject<'txn>,
|
Key: TableObject<'txn>,
|
||||||
@ -368,7 +366,7 @@ where
|
|||||||
{
|
{
|
||||||
let res: Result<Option<((), ())>> = self.set_range(key);
|
let res: Result<Option<((), ())>> = self.set_range(key);
|
||||||
if let Err(error) = res {
|
if let Err(error) = res {
|
||||||
return Iter::Err(Some(error));
|
return Iter::Err(Some(error))
|
||||||
};
|
};
|
||||||
Iter::new(self, ffi::MDBX_GET_CURRENT, ffi::MDBX_NEXT)
|
Iter::new(self, ffi::MDBX_GET_CURRENT, ffi::MDBX_NEXT)
|
||||||
}
|
}
|
||||||
@ -381,7 +379,7 @@ where
|
|||||||
Key: TableObject<'txn>,
|
Key: TableObject<'txn>,
|
||||||
Value: TableObject<'txn>,
|
Value: TableObject<'txn>,
|
||||||
{
|
{
|
||||||
IterDup::new(self, ffi::MDBX_NEXT as u32)
|
IterDup::new(self, ffi::MDBX_NEXT)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate over duplicate database items starting from the beginning of the
|
/// Iterate over duplicate database items starting from the beginning of the
|
||||||
@ -391,7 +389,7 @@ where
|
|||||||
Key: TableObject<'txn>,
|
Key: TableObject<'txn>,
|
||||||
Value: TableObject<'txn>,
|
Value: TableObject<'txn>,
|
||||||
{
|
{
|
||||||
IterDup::new(self, ffi::MDBX_FIRST as u32)
|
IterDup::new(self, ffi::MDBX_FIRST)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate over duplicate items in the database starting from the given
|
/// Iterate over duplicate items in the database starting from the given
|
||||||
@ -403,9 +401,9 @@ where
|
|||||||
{
|
{
|
||||||
let res: Result<Option<((), ())>> = self.set_range(key);
|
let res: Result<Option<((), ())>> = self.set_range(key);
|
||||||
if let Err(error) = res {
|
if let Err(error) = res {
|
||||||
return IterDup::Err(Some(error));
|
return IterDup::Err(Some(error))
|
||||||
};
|
};
|
||||||
IterDup::new(self, ffi::MDBX_GET_CURRENT as u32)
|
IterDup::new(self, ffi::MDBX_GET_CURRENT)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate over the duplicates of the item in the database with the given key.
|
/// Iterate over the duplicates of the item in the database with the given key.
|
||||||
@ -419,7 +417,7 @@ where
|
|||||||
Ok(Some(_)) => (),
|
Ok(Some(_)) => (),
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
let _: Result<Option<((), ())>> = self.last();
|
let _: Result<Option<((), ())>> = self.last();
|
||||||
return Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT);
|
return Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT)
|
||||||
}
|
}
|
||||||
Err(error) => return Iter::Err(Some(error)),
|
Err(error) => return Iter::Err(Some(error)),
|
||||||
};
|
};
|
||||||
@ -431,16 +429,12 @@ impl<'txn> Cursor<'txn, RW> {
|
|||||||
/// Puts a key/data pair into the database. The cursor will be positioned at
|
/// Puts a key/data pair into the database. The cursor will be positioned at
|
||||||
/// the new data item, or on failure usually near it.
|
/// the new data item, or on failure usually near it.
|
||||||
pub fn put(&mut self, key: &[u8], data: &[u8], flags: WriteFlags) -> Result<()> {
|
pub fn put(&mut self, key: &[u8], data: &[u8], flags: WriteFlags) -> Result<()> {
|
||||||
let key_val: ffi::MDBX_val = ffi::MDBX_val {
|
let key_val: ffi::MDBX_val =
|
||||||
iov_len: key.len(),
|
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
|
||||||
iov_base: key.as_ptr() as *mut c_void,
|
let mut data_val: ffi::MDBX_val =
|
||||||
};
|
ffi::MDBX_val { iov_len: data.len(), iov_base: data.as_ptr() as *mut c_void };
|
||||||
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
|
|
||||||
iov_len: data.len(),
|
|
||||||
iov_base: data.as_ptr() as *mut c_void,
|
|
||||||
};
|
|
||||||
mdbx_result(unsafe {
|
mdbx_result(unsafe {
|
||||||
txn_execute(&*self.txn, |_| {
|
txn_execute(&self.txn, |_| {
|
||||||
ffi::mdbx_cursor_put(self.cursor, &key_val, &mut data_val, flags.bits())
|
ffi::mdbx_cursor_put(self.cursor, &key_val, &mut data_val, flags.bits())
|
||||||
})
|
})
|
||||||
})?;
|
})?;
|
||||||
@ -456,9 +450,7 @@ impl<'txn> Cursor<'txn, RW> {
|
|||||||
/// current key, if the database was opened with [DatabaseFlags::DUP_SORT].
|
/// current key, if the database was opened with [DatabaseFlags::DUP_SORT].
|
||||||
pub fn del(&mut self, flags: WriteFlags) -> Result<()> {
|
pub fn del(&mut self, flags: WriteFlags) -> Result<()> {
|
||||||
mdbx_result(unsafe {
|
mdbx_result(unsafe {
|
||||||
txn_execute(&*self.txn, |_| {
|
txn_execute(&self.txn, |_| ffi::mdbx_cursor_del(self.cursor, flags.bits()))
|
||||||
ffi::mdbx_cursor_del(self.cursor, flags.bits())
|
|
||||||
})
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -470,7 +462,7 @@ where
|
|||||||
K: TransactionKind,
|
K: TransactionKind,
|
||||||
{
|
{
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
txn_execute(&*self.txn, |_| Self::new_at_position(self).unwrap())
|
txn_execute(&self.txn, |_| Self::new_at_position(self).unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -488,22 +480,16 @@ where
|
|||||||
K: TransactionKind,
|
K: TransactionKind,
|
||||||
{
|
{
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
txn_execute(&*self.txn, |_| unsafe {
|
txn_execute(&self.txn, |_| unsafe { ffi::mdbx_cursor_close(self.cursor) })
|
||||||
ffi::mdbx_cursor_close(self.cursor)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn slice_to_val(slice: Option<&[u8]>) -> ffi::MDBX_val {
|
unsafe fn slice_to_val(slice: Option<&[u8]>) -> ffi::MDBX_val {
|
||||||
match slice {
|
match slice {
|
||||||
Some(slice) => ffi::MDBX_val {
|
Some(slice) => {
|
||||||
iov_len: slice.len(),
|
ffi::MDBX_val { iov_len: slice.len(), iov_base: slice.as_ptr() as *mut c_void }
|
||||||
iov_base: slice.as_ptr() as *mut c_void,
|
}
|
||||||
},
|
None => ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() },
|
||||||
None => ffi::MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -563,12 +549,7 @@ where
|
|||||||
{
|
{
|
||||||
/// Creates a new iterator backed by the given cursor.
|
/// Creates a new iterator backed by the given cursor.
|
||||||
fn new(cursor: Cursor<'txn, K>, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self {
|
fn new(cursor: Cursor<'txn, K>, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self {
|
||||||
IntoIter::Ok {
|
IntoIter::Ok { cursor, op, next_op, _marker: PhantomData }
|
||||||
cursor,
|
|
||||||
op,
|
|
||||||
next_op,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -582,23 +563,12 @@ where
|
|||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
match self {
|
match self {
|
||||||
Self::Ok {
|
Self::Ok { cursor, op, next_op, _marker } => {
|
||||||
cursor,
|
let mut key = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
op,
|
let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
next_op,
|
|
||||||
_marker,
|
|
||||||
} => {
|
|
||||||
let mut key = ffi::MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let mut data = ffi::MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let op = mem::replace(op, *next_op);
|
let op = mem::replace(op, *next_op);
|
||||||
unsafe {
|
unsafe {
|
||||||
txn_execute(&*cursor.txn, |txn| {
|
txn_execute(&cursor.txn, |txn| {
|
||||||
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
|
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
|
||||||
ffi::MDBX_SUCCESS => {
|
ffi::MDBX_SUCCESS => {
|
||||||
let key = match Key::decode_val::<K>(txn, &key) {
|
let key = match Key::decode_val::<K>(txn, &key) {
|
||||||
@ -611,8 +581,9 @@ where
|
|||||||
};
|
};
|
||||||
Some(Ok((key, data)))
|
Some(Ok((key, data)))
|
||||||
}
|
}
|
||||||
// MDBX_ENODATA can occur when the cursor was previously seeked to a non-existent value,
|
// MDBX_ENODATA can occur when the cursor was previously seeked to a
|
||||||
// e.g. iter_from with a key greater than all values in the database.
|
// non-existent value, e.g. iter_from with a
|
||||||
|
// key greater than all values in the database.
|
||||||
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
|
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
|
||||||
error => Some(Err(Error::from_err_code(error))),
|
error => Some(Err(Error::from_err_code(error))),
|
||||||
}
|
}
|
||||||
@ -669,12 +640,7 @@ where
|
|||||||
op: ffi::MDBX_cursor_op,
|
op: ffi::MDBX_cursor_op,
|
||||||
next_op: ffi::MDBX_cursor_op,
|
next_op: ffi::MDBX_cursor_op,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Iter::Ok {
|
Iter::Ok { cursor, op, next_op, _marker: PhantomData }
|
||||||
cursor,
|
|
||||||
op,
|
|
||||||
next_op,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -688,23 +654,12 @@ where
|
|||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
match self {
|
match self {
|
||||||
Iter::Ok {
|
Iter::Ok { cursor, op, next_op, .. } => {
|
||||||
cursor,
|
let mut key = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
op,
|
let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
next_op,
|
|
||||||
..
|
|
||||||
} => {
|
|
||||||
let mut key = ffi::MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let mut data = ffi::MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let op = mem::replace(op, *next_op);
|
let op = mem::replace(op, *next_op);
|
||||||
unsafe {
|
unsafe {
|
||||||
txn_execute(&*cursor.txn, |txn| {
|
txn_execute(&cursor.txn, |txn| {
|
||||||
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
|
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
|
||||||
ffi::MDBX_SUCCESS => {
|
ffi::MDBX_SUCCESS => {
|
||||||
let key = match Key::decode_val::<K>(txn, &key) {
|
let key = match Key::decode_val::<K>(txn, &key) {
|
||||||
@ -717,8 +672,9 @@ where
|
|||||||
};
|
};
|
||||||
Some(Ok((key, data)))
|
Some(Ok((key, data)))
|
||||||
}
|
}
|
||||||
// MDBX_NODATA can occur when the cursor was previously seeked to a non-existent value,
|
// MDBX_NODATA can occur when the cursor was previously seeked to a
|
||||||
// e.g. iter_from with a key greater than all values in the database.
|
// non-existent value, e.g. iter_from with a
|
||||||
|
// key greater than all values in the database.
|
||||||
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
|
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
|
||||||
error => Some(Err(Error::from_err_code(error))),
|
error => Some(Err(Error::from_err_code(error))),
|
||||||
}
|
}
|
||||||
@ -770,11 +726,7 @@ where
|
|||||||
{
|
{
|
||||||
/// Creates a new iterator backed by the given cursor.
|
/// Creates a new iterator backed by the given cursor.
|
||||||
fn new(cursor: &'cur mut Cursor<'txn, K>, op: c_uint) -> Self {
|
fn new(cursor: &'cur mut Cursor<'txn, K>, op: c_uint) -> Self {
|
||||||
IterDup::Ok {
|
IterDup::Ok { cursor, op, _marker: PhantomData }
|
||||||
cursor,
|
|
||||||
op,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -800,17 +752,11 @@ where
|
|||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
match self {
|
match self {
|
||||||
IterDup::Ok { cursor, op, .. } => {
|
IterDup::Ok { cursor, op, .. } => {
|
||||||
let mut key = ffi::MDBX_val {
|
let mut key = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_len: 0,
|
let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
iov_base: ptr::null_mut(),
|
let op = mem::replace(op, ffi::MDBX_NEXT_NODUP);
|
||||||
};
|
|
||||||
let mut data = ffi::MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
let op = mem::replace(op, ffi::MDBX_NEXT_NODUP as u32);
|
|
||||||
|
|
||||||
txn_execute(&*cursor.txn, |_| {
|
txn_execute(&cursor.txn, |_| {
|
||||||
let err_code =
|
let err_code =
|
||||||
unsafe { ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) };
|
unsafe { ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) };
|
||||||
|
|
||||||
|
|||||||
@ -27,30 +27,20 @@ impl<'txn> Database<'txn> {
|
|||||||
flags: c_uint,
|
flags: c_uint,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
let c_name = name.map(|n| CString::new(n).unwrap());
|
let c_name = name.map(|n| CString::new(n).unwrap());
|
||||||
let name_ptr = if let Some(c_name) = &c_name {
|
let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() };
|
||||||
c_name.as_ptr()
|
|
||||||
} else {
|
|
||||||
ptr::null()
|
|
||||||
};
|
|
||||||
let mut dbi: ffi::MDBX_dbi = 0;
|
let mut dbi: ffi::MDBX_dbi = 0;
|
||||||
mdbx_result(txn_execute(&*txn.txn_mutex(), |txn| unsafe {
|
mdbx_result(txn_execute(&txn.txn_mutex(), |txn| unsafe {
|
||||||
ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi)
|
ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi)
|
||||||
}))?;
|
}))?;
|
||||||
Ok(Self::new_from_ptr(dbi))
|
Ok(Self::new_from_ptr(dbi))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn new_from_ptr(dbi: ffi::MDBX_dbi) -> Self {
|
pub(crate) fn new_from_ptr(dbi: ffi::MDBX_dbi) -> Self {
|
||||||
Self {
|
Self { dbi, _marker: PhantomData }
|
||||||
dbi,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn freelist_db() -> Self {
|
pub(crate) fn freelist_db() -> Self {
|
||||||
Database {
|
Database { dbi: 0, _marker: PhantomData }
|
||||||
dbi: 0,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the underlying MDBX database handle.
|
/// Returns the underlying MDBX database handle.
|
||||||
|
|||||||
@ -28,8 +28,8 @@ mod private {
|
|||||||
|
|
||||||
pub trait Sealed {}
|
pub trait Sealed {}
|
||||||
|
|
||||||
impl<'env> Sealed for NoWriteMap {}
|
impl Sealed for NoWriteMap {}
|
||||||
impl<'env> Sealed for WriteMap {}
|
impl Sealed for WriteMap {}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait EnvironmentKind: private::Sealed + Debug + 'static {
|
pub trait EnvironmentKind: private::Sealed + Debug + 'static {
|
||||||
@ -59,19 +59,9 @@ unsafe impl Send for EnvPtr {}
|
|||||||
unsafe impl Sync for EnvPtr {}
|
unsafe impl Sync for EnvPtr {}
|
||||||
|
|
||||||
pub(crate) enum TxnManagerMessage {
|
pub(crate) enum TxnManagerMessage {
|
||||||
Begin {
|
Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender<Result<TxnPtr>> },
|
||||||
parent: TxnPtr,
|
Abort { tx: TxnPtr, sender: SyncSender<Result<bool>> },
|
||||||
flags: ffi::MDBX_txn_flags_t,
|
Commit { tx: TxnPtr, sender: SyncSender<Result<bool>> },
|
||||||
sender: SyncSender<Result<TxnPtr>>,
|
|
||||||
},
|
|
||||||
Abort {
|
|
||||||
tx: TxnPtr,
|
|
||||||
sender: SyncSender<Result<bool>>,
|
|
||||||
},
|
|
||||||
Commit {
|
|
||||||
tx: TxnPtr,
|
|
||||||
sender: SyncSender<Result<bool>>,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An environment supports multiple databases, all residing in the same shared-memory map.
|
/// An environment supports multiple databases, all residing in the same shared-memory map.
|
||||||
@ -135,10 +125,10 @@ where
|
|||||||
let res = rx.recv().unwrap();
|
let res = rx.recv().unwrap();
|
||||||
if let Err(Error::Busy) = &res {
|
if let Err(Error::Busy) = &res {
|
||||||
sleep(Duration::from_millis(250));
|
sleep(Duration::from_millis(250));
|
||||||
continue;
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
break res;
|
break res
|
||||||
}?;
|
}?;
|
||||||
Ok(Transaction::new_from_ptr(self, txn.0))
|
Ok(Transaction::new_from_ptr(self, txn.0))
|
||||||
}
|
}
|
||||||
@ -197,9 +187,9 @@ where
|
|||||||
///
|
///
|
||||||
/// Note:
|
/// Note:
|
||||||
///
|
///
|
||||||
/// * LMDB stores all the freelists in the designated database 0 in each environment,
|
/// * LMDB stores all the freelists in the designated database 0 in each environment, and the
|
||||||
/// and the freelist count is stored at the beginning of the value as `libc::size_t`
|
/// freelist count is stored at the beginning of the value as `libc::size_t` in the native
|
||||||
/// in the native byte order.
|
/// byte order.
|
||||||
///
|
///
|
||||||
/// * It will create a read transaction to traverse the freelist database.
|
/// * It will create a read transaction to traverse the freelist database.
|
||||||
pub fn freelist(&self) -> Result<usize> {
|
pub fn freelist(&self) -> Result<usize> {
|
||||||
@ -211,7 +201,7 @@ where
|
|||||||
for result in cursor {
|
for result in cursor {
|
||||||
let (_key, value) = result?;
|
let (_key, value) = result?;
|
||||||
if value.len() < mem::size_of::<usize>() {
|
if value.len() < mem::size_of::<usize>() {
|
||||||
return Err(Error::Corrupted);
|
return Err(Error::Corrupted)
|
||||||
}
|
}
|
||||||
|
|
||||||
let s = &value[..mem::size_of::<usize>()];
|
let s = &value[..mem::size_of::<usize>()];
|
||||||
@ -376,12 +366,7 @@ pub struct Geometry<R> {
|
|||||||
|
|
||||||
impl<R> Default for Geometry<R> {
|
impl<R> Default for Geometry<R> {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self { size: None, growth_step: None, shrink_threshold: None, page_size: None }
|
||||||
size: None,
|
|
||||||
growth_step: None,
|
|
||||||
shrink_threshold: None,
|
|
||||||
page_size: None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,14 +446,8 @@ where
|
|||||||
(ffi::MDBX_opt_loose_limit, self.loose_limit),
|
(ffi::MDBX_opt_loose_limit, self.loose_limit),
|
||||||
(ffi::MDBX_opt_dp_reserve_limit, self.dp_reserve_limit),
|
(ffi::MDBX_opt_dp_reserve_limit, self.dp_reserve_limit),
|
||||||
(ffi::MDBX_opt_txn_dp_limit, self.txn_dp_limit),
|
(ffi::MDBX_opt_txn_dp_limit, self.txn_dp_limit),
|
||||||
(
|
(ffi::MDBX_opt_spill_max_denominator, self.spill_max_denominator),
|
||||||
ffi::MDBX_opt_spill_max_denominator,
|
(ffi::MDBX_opt_spill_min_denominator, self.spill_min_denominator),
|
||||||
self.spill_max_denominator,
|
|
||||||
),
|
|
||||||
(
|
|
||||||
ffi::MDBX_opt_spill_min_denominator,
|
|
||||||
self.spill_min_denominator,
|
|
||||||
),
|
|
||||||
] {
|
] {
|
||||||
if let Some(v) = v {
|
if let Some(v) = v {
|
||||||
mdbx_result(ffi::mdbx_env_set_option(env, opt, v))?;
|
mdbx_result(ffi::mdbx_env_set_option(env, opt, v))?;
|
||||||
@ -490,15 +469,11 @@ where
|
|||||||
})() {
|
})() {
|
||||||
ffi::mdbx_env_close_ex(env, false);
|
ffi::mdbx_env_close_ex(env, false);
|
||||||
|
|
||||||
return Err(e);
|
return Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut env = Environment {
|
let mut env = Environment { env, txn_manager: None, _marker: PhantomData };
|
||||||
env,
|
|
||||||
txn_manager: None,
|
|
||||||
_marker: PhantomData,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Mode::ReadWrite { .. } = self.flags.mode {
|
if let Mode::ReadWrite { .. } = self.flags.mode {
|
||||||
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
||||||
@ -506,11 +481,7 @@ where
|
|||||||
std::thread::spawn(move || loop {
|
std::thread::spawn(move || loop {
|
||||||
match rx.recv() {
|
match rx.recv() {
|
||||||
Ok(msg) => match msg {
|
Ok(msg) => match msg {
|
||||||
TxnManagerMessage::Begin {
|
TxnManagerMessage::Begin { parent, flags, sender } => {
|
||||||
parent,
|
|
||||||
flags,
|
|
||||||
sender,
|
|
||||||
} => {
|
|
||||||
let e = e;
|
let e = e;
|
||||||
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
|
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
|
||||||
sender
|
sender
|
||||||
@ -529,9 +500,7 @@ where
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
TxnManagerMessage::Abort { tx, sender } => {
|
TxnManagerMessage::Abort { tx, sender } => {
|
||||||
sender
|
sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap();
|
||||||
.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) }))
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
TxnManagerMessage::Commit { tx, sender } => {
|
TxnManagerMessage::Commit { tx, sender } => {
|
||||||
sender
|
sender
|
||||||
@ -611,7 +580,8 @@ where
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set all size-related parameters of environment, including page size and the min/max size of the memory map.
|
/// Set all size-related parameters of environment, including page size and the min/max size of
|
||||||
|
/// the memory map.
|
||||||
pub fn set_geometry<R: RangeBounds<usize>>(&mut self, geometry: Geometry<R>) -> &mut Self {
|
pub fn set_geometry<R: RangeBounds<usize>>(&mut self, geometry: Geometry<R>) -> &mut Self {
|
||||||
let convert_bound = |bound: Bound<&usize>| match bound {
|
let convert_bound = |bound: Bound<&usize>| match bound {
|
||||||
Bound::Included(v) | Bound::Excluded(v) => Some(*v),
|
Bound::Included(v) | Bound::Excluded(v) => Some(*v),
|
||||||
@ -619,10 +589,7 @@ where
|
|||||||
};
|
};
|
||||||
self.geometry = Some(Geometry {
|
self.geometry = Some(Geometry {
|
||||||
size: geometry.size.map(|range| {
|
size: geometry.size.map(|range| {
|
||||||
(
|
(convert_bound(range.start_bound()), convert_bound(range.end_bound()))
|
||||||
convert_bound(range.start_bound()),
|
|
||||||
convert_bound(range.end_bound()),
|
|
||||||
)
|
|
||||||
}),
|
}),
|
||||||
growth_step: geometry.growth_step,
|
growth_step: geometry.growth_step,
|
||||||
shrink_threshold: geometry.shrink_threshold,
|
shrink_threshold: geometry.shrink_threshold,
|
||||||
|
|||||||
@ -111,7 +111,7 @@ impl Error {
|
|||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
Error::DecodeError(reason) => write!(fmt, "{}", reason),
|
Error::DecodeError(reason) => write!(fmt, "{reason}"),
|
||||||
other => {
|
other => {
|
||||||
write!(fmt, "{}", unsafe {
|
write!(fmt, "{}", unsafe {
|
||||||
let err = ffi::mdbx_strerror(other.to_err_code());
|
let err = ffi::mdbx_strerror(other.to_err_code());
|
||||||
@ -154,9 +154,6 @@ mod test {
|
|||||||
fn test_description() {
|
fn test_description() {
|
||||||
assert_eq!("Permission denied", Error::from_err_code(13).to_string());
|
assert_eq!("Permission denied", Error::from_err_code(13).to_string());
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!("MDBX_INVALID: File is not an MDBX file", Error::Invalid.to_string());
|
||||||
"MDBX_INVALID: File is not an MDBX file",
|
|
||||||
Error::Invalid.to_string()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,65 +6,94 @@ use libc::c_uint;
|
|||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub enum SyncMode {
|
pub enum SyncMode {
|
||||||
/// Default robust and durable sync mode.
|
/// Default robust and durable sync mode.
|
||||||
/// Metadata is written and flushed to disk after a data is written and flushed, which guarantees the integrity of the database in the event of a crash at any time.
|
/// Metadata is written and flushed to disk after a data is written and flushed, which
|
||||||
|
/// guarantees the integrity of the database in the event of a crash at any time.
|
||||||
Durable,
|
Durable,
|
||||||
|
|
||||||
/// Don't sync the meta-page after commit.
|
/// Don't sync the meta-page after commit.
|
||||||
///
|
///
|
||||||
/// Flush system buffers to disk only once per transaction commit, omit the metadata flush.
|
/// Flush system buffers to disk only once per transaction commit, omit the metadata flush.
|
||||||
/// Defer that until the system flushes files to disk, or next non-read-only commit or [Environment::sync()](crate::Environment::sync).
|
/// Defer that until the system flushes files to disk, or next non-read-only commit or
|
||||||
/// Depending on the platform and hardware, with [SyncMode::NoMetaSync] you may get a doubling of write performance.
|
/// [Environment::sync()](crate::Environment::sync). Depending on the platform and
|
||||||
|
/// hardware, with [SyncMode::NoMetaSync] you may get a doubling of write performance.
|
||||||
///
|
///
|
||||||
/// This trade-off maintains database integrity, but a system crash may undo the last committed transaction.
|
/// This trade-off maintains database integrity, but a system crash may undo the last committed
|
||||||
/// I.e. it preserves the ACI (atomicity, consistency, isolation) but not D (durability) database property.
|
/// transaction. I.e. it preserves the ACI (atomicity, consistency, isolation) but not D
|
||||||
|
/// (durability) database property.
|
||||||
NoMetaSync,
|
NoMetaSync,
|
||||||
|
|
||||||
/// Don't sync anything but keep previous steady commits.
|
/// Don't sync anything but keep previous steady commits.
|
||||||
///
|
///
|
||||||
/// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system buffers to disk when committing a transaction.
|
/// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system
|
||||||
/// But there is a huge difference in how are recycled the MVCC snapshots corresponding to previous "steady" transactions (see below).
|
/// buffers to disk when committing a transaction. But there is a huge difference in how
|
||||||
|
/// are recycled the MVCC snapshots corresponding to previous "steady" transactions (see
|
||||||
|
/// below).
|
||||||
///
|
///
|
||||||
/// With [crate::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use asynchronous mmap-flushes to disk.
|
/// With [crate::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use asynchronous
|
||||||
/// Asynchronous mmap-flushes means that actually all writes will scheduled and performed by operation system on it own manner, i.e. unordered.
|
/// mmap-flushes to disk. Asynchronous mmap-flushes means that actually all writes will
|
||||||
/// MDBX itself just notify operating system that it would be nice to write data to disk, but no more.
|
/// scheduled and performed by operation system on it own manner, i.e. unordered.
|
||||||
|
/// MDBX itself just notify operating system that it would be nice to write data to disk, but
|
||||||
|
/// no more.
|
||||||
///
|
///
|
||||||
/// Depending on the platform and hardware, with [SyncMode::SafeNoSync] you may get a multiple increase of write performance, even 10 times or more.
|
/// Depending on the platform and hardware, with [SyncMode::SafeNoSync] you may get a multiple
|
||||||
|
/// increase of write performance, even 10 times or more.
|
||||||
///
|
///
|
||||||
/// In contrast to [SyncMode::UtterlyNoSync] mode, with [SyncMode::SafeNoSync] flag MDBX will keeps untouched pages within B-tree of the last transaction "steady" which was synced to disk completely.
|
/// In contrast to [SyncMode::UtterlyNoSync] mode, with [SyncMode::SafeNoSync] flag MDBX will
|
||||||
/// This has big implications for both data durability and (unfortunately) performance:
|
/// keeps untouched pages within B-tree of the last transaction "steady" which was synced to
|
||||||
|
/// disk completely. This has big implications for both data durability and (unfortunately)
|
||||||
|
/// performance:
|
||||||
///
|
///
|
||||||
/// A system crash can't corrupt the database, but you will lose the last transactions; because MDBX will rollback to last steady commit since it kept explicitly.
|
/// A system crash can't corrupt the database, but you will lose the last transactions; because
|
||||||
/// The last steady transaction makes an effect similar to "long-lived" read transaction since prevents reuse of pages freed by newer write transactions, thus the any data changes will be placed in newly allocated pages.
|
/// MDBX will rollback to last steady commit since it kept explicitly. The last steady
|
||||||
/// To avoid rapid database growth, the system will sync data and issue a steady commit-point to resume reuse pages, each time there is insufficient space and before increasing the size of the file on disk.
|
/// transaction makes an effect similar to "long-lived" read transaction since prevents reuse
|
||||||
/// In other words, with [SyncMode::SafeNoSync] flag MDBX protects you from the whole database corruption, at the cost increasing database size and/or number of disk IOPs.
|
/// of pages freed by newer write transactions, thus the any data changes will be placed in
|
||||||
/// So, [SyncMode::SafeNoSync] flag could be used with [Environment::sync()](crate::Environment::sync) as alternatively for batch committing or nested transaction (in some cases).
|
/// newly allocated pages. To avoid rapid database growth, the system will sync data and
|
||||||
|
/// issue a steady commit-point to resume reuse pages, each time there is insufficient space
|
||||||
|
/// and before increasing the size of the file on disk. In other words, with
|
||||||
|
/// [SyncMode::SafeNoSync] flag MDBX protects you from the whole database corruption, at the
|
||||||
|
/// cost increasing database size and/or number of disk IOPs. So, [SyncMode::SafeNoSync]
|
||||||
|
/// flag could be used with [Environment::sync()](crate::Environment::sync) as alternatively
|
||||||
|
/// for batch committing or nested transaction (in some cases).
|
||||||
///
|
///
|
||||||
/// The number and volume of of disk IOPs with [SyncMode::SafeNoSync] flag will exactly the as without any no-sync flags.
|
/// The number and volume of of disk IOPs with [SyncMode::SafeNoSync] flag will exactly the as
|
||||||
/// However, you should expect a larger process's work set and significantly worse a locality of reference, due to the more intensive allocation of previously unused pages and increase the size of the database.
|
/// without any no-sync flags. However, you should expect a larger process's work set and
|
||||||
|
/// significantly worse a locality of reference, due to the more intensive allocation of
|
||||||
|
/// previously unused pages and increase the size of the database.
|
||||||
SafeNoSync,
|
SafeNoSync,
|
||||||
|
|
||||||
/// Don't sync anything and wipe previous steady commits.
|
/// Don't sync anything and wipe previous steady commits.
|
||||||
///
|
///
|
||||||
/// Don't flush system buffers to disk when committing a transaction.
|
/// Don't flush system buffers to disk when committing a transaction.
|
||||||
/// This optimization means a system crash can corrupt the database, if buffers are not yet flushed to disk.
|
/// This optimization means a system crash can corrupt the database, if buffers are not yet
|
||||||
/// Depending on the platform and hardware, with [SyncMode::UtterlyNoSync] you may get a multiple increase of write performance, even 100 times or more.
|
/// flushed to disk. Depending on the platform and hardware, with [SyncMode::UtterlyNoSync]
|
||||||
|
/// you may get a multiple increase of write performance, even 100 times or more.
|
||||||
///
|
///
|
||||||
/// If the filesystem preserves write order (which is rare and never provided unless explicitly noted) and the [WriteMap](crate::WriteMap) and [EnvironmentFlags::liforeclaim] flags are not used,
|
/// If the filesystem preserves write order (which is rare and never provided unless explicitly
|
||||||
/// then a system crash can't corrupt the database, but you can lose the last transactions, if at least one buffer is not yet flushed to disk.
|
/// noted) and the [WriteMap](crate::WriteMap) and [EnvironmentFlags::liforeclaim] flags are
|
||||||
/// The risk is governed by how often the system flushes dirty buffers to disk and how often [Environment::sync()](crate::Environment::sync) is called.
|
/// not used, then a system crash can't corrupt the database, but you can lose the last
|
||||||
/// So, transactions exhibit ACI (atomicity, consistency, isolation) properties and only lose D (durability).
|
/// transactions, if at least one buffer is not yet flushed to disk. The risk is governed
|
||||||
|
/// by how often the system flushes dirty buffers to disk and how often
|
||||||
|
/// [Environment::sync()](crate::Environment::sync) is called. So, transactions exhibit ACI
|
||||||
|
/// (atomicity, consistency, isolation) properties and only lose D (durability).
|
||||||
/// I.e. database integrity is maintained, but a system crash may undo the final transactions.
|
/// I.e. database integrity is maintained, but a system crash may undo the final transactions.
|
||||||
///
|
///
|
||||||
/// Otherwise, if the filesystem not preserves write order (which is typically) or [WriteMap](crate::WriteMap) or [EnvironmentFlags::liforeclaim] flags are used, you should expect the corrupted database after a system crash.
|
/// Otherwise, if the filesystem not preserves write order (which is typically) or
|
||||||
|
/// [WriteMap](crate::WriteMap) or [EnvironmentFlags::liforeclaim] flags are used, you should
|
||||||
|
/// expect the corrupted database after a system crash.
|
||||||
///
|
///
|
||||||
/// So, most important thing about [SyncMode::UtterlyNoSync]:
|
/// So, most important thing about [SyncMode::UtterlyNoSync]:
|
||||||
///
|
///
|
||||||
/// A system crash immediately after commit the write transaction high likely lead to database corruption.
|
/// A system crash immediately after commit the write transaction high likely lead to database
|
||||||
/// Successful completion of [Environment::sync(force=true)](crate::Environment::sync) after one or more committed transactions guarantees consistency and durability.
|
/// corruption. Successful completion of
|
||||||
/// BUT by committing two or more transactions you back database into a weak state, in which a system crash may lead to database corruption!
|
/// [Environment::sync(force=true)](crate::Environment::sync) after one or more committed
|
||||||
/// In case single transaction after [Environment::sync()](crate::Environment::sync), you may lose transaction itself, but not a whole database.
|
/// transactions guarantees consistency and durability. BUT by committing two or more
|
||||||
/// Nevertheless, [SyncMode::UtterlyNoSync] provides "weak" durability in case of an application crash (but no durability on system failure),
|
/// transactions you back database into a weak state, in which a system crash may lead to
|
||||||
/// and therefore may be very useful in scenarios where data durability is not required over a system failure (e.g for short-lived data), or if you can take such risk.
|
/// database corruption! In case single transaction after
|
||||||
|
/// [Environment::sync()](crate::Environment::sync), you may lose transaction itself, but not a
|
||||||
|
/// whole database. Nevertheless, [SyncMode::UtterlyNoSync] provides "weak" durability in
|
||||||
|
/// case of an application crash (but no durability on system failure), and therefore may
|
||||||
|
/// be very useful in scenarios where data durability is not required over a system failure
|
||||||
|
/// (e.g for short-lived data), or if you can take such risk.
|
||||||
UtterlyNoSync,
|
UtterlyNoSync,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,18 +111,13 @@ pub enum Mode {
|
|||||||
|
|
||||||
impl Default for Mode {
|
impl Default for Mode {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::ReadWrite {
|
Self::ReadWrite { sync_mode: SyncMode::default() }
|
||||||
sync_mode: SyncMode::default(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Mode> for EnvironmentFlags {
|
impl From<Mode> for EnvironmentFlags {
|
||||||
fn from(mode: Mode) -> Self {
|
fn from(mode: Mode) -> Self {
|
||||||
Self {
|
Self { mode, ..Default::default() }
|
||||||
mode,
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,14 +189,14 @@ bitflags! {
|
|||||||
#[doc="Database options."]
|
#[doc="Database options."]
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct DatabaseFlags: c_uint {
|
pub struct DatabaseFlags: c_uint {
|
||||||
const REVERSE_KEY = MDBX_REVERSEKEY as u32;
|
const REVERSE_KEY = MDBX_REVERSEKEY;
|
||||||
const DUP_SORT = MDBX_DUPSORT as u32;
|
const DUP_SORT = MDBX_DUPSORT;
|
||||||
const INTEGER_KEY = MDBX_INTEGERKEY as u32;
|
const INTEGER_KEY = MDBX_INTEGERKEY;
|
||||||
const DUP_FIXED = MDBX_DUPFIXED as u32;
|
const DUP_FIXED = MDBX_DUPFIXED;
|
||||||
const INTEGER_DUP = MDBX_INTEGERDUP as u32;
|
const INTEGER_DUP = MDBX_INTEGERDUP;
|
||||||
const REVERSE_DUP = MDBX_REVERSEDUP as u32;
|
const REVERSE_DUP = MDBX_REVERSEDUP;
|
||||||
const CREATE = MDBX_CREATE as u32;
|
const CREATE = MDBX_CREATE;
|
||||||
const ACCEDE = MDBX_DB_ACCEDE as u32;
|
const ACCEDE = MDBX_DB_ACCEDE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,14 +204,14 @@ bitflags! {
|
|||||||
#[doc="Write options."]
|
#[doc="Write options."]
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct WriteFlags: c_uint {
|
pub struct WriteFlags: c_uint {
|
||||||
const UPSERT = MDBX_UPSERT as u32;
|
const UPSERT = MDBX_UPSERT;
|
||||||
const NO_OVERWRITE = MDBX_NOOVERWRITE as u32;
|
const NO_OVERWRITE = MDBX_NOOVERWRITE;
|
||||||
const NO_DUP_DATA = MDBX_NODUPDATA as u32;
|
const NO_DUP_DATA = MDBX_NODUPDATA;
|
||||||
const CURRENT = MDBX_CURRENT as u32;
|
const CURRENT = MDBX_CURRENT;
|
||||||
const ALLDUPS = MDBX_ALLDUPS as u32;
|
const ALLDUPS = MDBX_ALLDUPS;
|
||||||
const RESERVE = MDBX_RESERVE as u32;
|
const RESERVE = MDBX_RESERVE;
|
||||||
const APPEND = MDBX_APPEND as u32;
|
const APPEND = MDBX_APPEND;
|
||||||
const APPEND_DUP = MDBX_APPENDDUP as u32;
|
const APPEND_DUP = MDBX_APPENDDUP;
|
||||||
const MULTIPLE = MDBX_MULTIPLE as u32;
|
const MULTIPLE = MDBX_MULTIPLE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,8 +23,8 @@ mod private {
|
|||||||
|
|
||||||
pub trait Sealed {}
|
pub trait Sealed {}
|
||||||
|
|
||||||
impl<'env> Sealed for RO {}
|
impl Sealed for RO {}
|
||||||
impl<'env> Sealed for RW {}
|
impl Sealed for RW {}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait TransactionKind: private::Sealed + Debug + 'static {
|
pub trait TransactionKind: private::Sealed + Debug + 'static {
|
||||||
@ -127,14 +127,9 @@ where
|
|||||||
where
|
where
|
||||||
Key: TableObject<'txn>,
|
Key: TableObject<'txn>,
|
||||||
{
|
{
|
||||||
let key_val: ffi::MDBX_val = ffi::MDBX_val {
|
let key_val: ffi::MDBX_val =
|
||||||
iov_len: key.len(),
|
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
|
||||||
iov_base: key.as_ptr() as *mut c_void,
|
let mut data_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
|
||||||
};
|
|
||||||
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
|
|
||||||
iov_len: 0,
|
|
||||||
iov_base: ptr::null_mut(),
|
|
||||||
};
|
|
||||||
|
|
||||||
txn_execute(&self.txn, |txn| unsafe {
|
txn_execute(&self.txn, |txn| unsafe {
|
||||||
match ffi::mdbx_get(txn, db.dbi(), &key_val, &mut data_val) {
|
match ffi::mdbx_get(txn, db.dbi(), &key_val, &mut data_val) {
|
||||||
@ -156,7 +151,8 @@ where
|
|||||||
self.primed_dbis.lock().insert(db.dbi());
|
self.primed_dbis.lock().insert(db.dbi());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Commits the transaction and returns table handles permanently open for the lifetime of `Environment`.
|
/// Commits the transaction and returns table handles permanently open for the lifetime of
|
||||||
|
/// `Environment`.
|
||||||
pub fn commit_and_rebind_open_dbs(mut self) -> Result<(bool, Vec<Database<'env>>)> {
|
pub fn commit_and_rebind_open_dbs(mut self) -> Result<(bool, Vec<Database<'env>>)> {
|
||||||
let txnlck = self.txn.lock();
|
let txnlck = self.txn.lock();
|
||||||
let txn = *txnlck;
|
let txn = *txnlck;
|
||||||
@ -168,23 +164,13 @@ where
|
|||||||
.txn_manager
|
.txn_manager
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.send(TxnManagerMessage::Commit {
|
.send(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender })
|
||||||
tx: TxnPtr(txn),
|
|
||||||
sender,
|
|
||||||
})
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
rx.recv().unwrap()
|
rx.recv().unwrap()
|
||||||
};
|
};
|
||||||
self.committed = true;
|
self.committed = true;
|
||||||
result.map(|v| {
|
result.map(|v| {
|
||||||
(
|
(v, self.primed_dbis.lock().iter().map(|&dbi| Database::new_from_ptr(dbi)).collect())
|
||||||
v,
|
|
||||||
self.primed_dbis
|
|
||||||
.lock()
|
|
||||||
.iter()
|
|
||||||
.map(|&dbi| Database::new_from_ptr(dbi))
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,8 +247,8 @@ where
|
|||||||
/// case the environment must be configured to allow named databases through
|
/// case the environment must be configured to allow named databases through
|
||||||
/// [EnvironmentBuilder::set_max_dbs()](crate::EnvironmentBuilder::set_max_dbs).
|
/// [EnvironmentBuilder::set_max_dbs()](crate::EnvironmentBuilder::set_max_dbs).
|
||||||
///
|
///
|
||||||
/// This function will fail with [Error::BadRslot](crate::error::Error::BadRslot) if called by a thread with an open
|
/// This function will fail with [Error::BadRslot](crate::error::Error::BadRslot) if called by a
|
||||||
/// transaction.
|
/// thread with an open transaction.
|
||||||
pub fn create_db<'txn>(
|
pub fn create_db<'txn>(
|
||||||
&'txn self,
|
&'txn self,
|
||||||
name: Option<&str>,
|
name: Option<&str>,
|
||||||
@ -286,14 +272,10 @@ where
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let key = key.as_ref();
|
let key = key.as_ref();
|
||||||
let data = data.as_ref();
|
let data = data.as_ref();
|
||||||
let key_val: ffi::MDBX_val = ffi::MDBX_val {
|
let key_val: ffi::MDBX_val =
|
||||||
iov_len: key.len(),
|
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
|
||||||
iov_base: key.as_ptr() as *mut c_void,
|
let mut data_val: ffi::MDBX_val =
|
||||||
};
|
ffi::MDBX_val { iov_len: data.len(), iov_base: data.as_ptr() as *mut c_void };
|
||||||
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
|
|
||||||
iov_len: data.len(),
|
|
||||||
iov_base: data.as_ptr() as *mut c_void,
|
|
||||||
};
|
|
||||||
mdbx_result(txn_execute(&self.txn, |txn| unsafe {
|
mdbx_result(txn_execute(&self.txn, |txn| unsafe {
|
||||||
ffi::mdbx_put(txn, db.dbi(), &key_val, &mut data_val, flags.bits())
|
ffi::mdbx_put(txn, db.dbi(), &key_val, &mut data_val, flags.bits())
|
||||||
}))?;
|
}))?;
|
||||||
@ -312,14 +294,10 @@ where
|
|||||||
flags: WriteFlags,
|
flags: WriteFlags,
|
||||||
) -> Result<&'txn mut [u8]> {
|
) -> Result<&'txn mut [u8]> {
|
||||||
let key = key.as_ref();
|
let key = key.as_ref();
|
||||||
let key_val: ffi::MDBX_val = ffi::MDBX_val {
|
let key_val: ffi::MDBX_val =
|
||||||
iov_len: key.len(),
|
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
|
||||||
iov_base: key.as_ptr() as *mut c_void,
|
let mut data_val: ffi::MDBX_val =
|
||||||
};
|
ffi::MDBX_val { iov_len: len, iov_base: ptr::null_mut::<c_void>() };
|
||||||
let mut data_val: ffi::MDBX_val = ffi::MDBX_val {
|
|
||||||
iov_len: len,
|
|
||||||
iov_base: ptr::null_mut::<c_void>(),
|
|
||||||
};
|
|
||||||
unsafe {
|
unsafe {
|
||||||
mdbx_result(txn_execute(&self.txn, |txn| {
|
mdbx_result(txn_execute(&self.txn, |txn| {
|
||||||
ffi::mdbx_put(
|
ffi::mdbx_put(
|
||||||
@ -330,19 +308,17 @@ where
|
|||||||
flags.bits() | ffi::MDBX_RESERVE,
|
flags.bits() | ffi::MDBX_RESERVE,
|
||||||
)
|
)
|
||||||
}))?;
|
}))?;
|
||||||
Ok(slice::from_raw_parts_mut(
|
Ok(slice::from_raw_parts_mut(data_val.iov_base as *mut u8, data_val.iov_len))
|
||||||
data_val.iov_base as *mut u8,
|
|
||||||
data_val.iov_len,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete items from a database.
|
/// Delete items from a database.
|
||||||
/// This function removes key/data pairs from the database.
|
/// This function removes key/data pairs from the database.
|
||||||
///
|
///
|
||||||
/// The data parameter is NOT ignored regardless the database does support sorted duplicate data items or not.
|
/// The data parameter is NOT ignored regardless the database does support sorted duplicate data
|
||||||
/// If the data parameter is [Some] only the matching data item will be deleted.
|
/// items or not. If the data parameter is [Some] only the matching data item will be
|
||||||
/// Otherwise, if data parameter is [None], any/all value(s) for specified key will be deleted.
|
/// deleted. Otherwise, if data parameter is [None], any/all value(s) for specified key will
|
||||||
|
/// be deleted.
|
||||||
///
|
///
|
||||||
/// Returns `true` if the key/value pair was present.
|
/// Returns `true` if the key/value pair was present.
|
||||||
pub fn del<'txn>(
|
pub fn del<'txn>(
|
||||||
@ -352,10 +328,8 @@ where
|
|||||||
data: Option<&[u8]>,
|
data: Option<&[u8]>,
|
||||||
) -> Result<bool> {
|
) -> Result<bool> {
|
||||||
let key = key.as_ref();
|
let key = key.as_ref();
|
||||||
let key_val: ffi::MDBX_val = ffi::MDBX_val {
|
let key_val: ffi::MDBX_val =
|
||||||
iov_len: key.len(),
|
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
|
||||||
iov_base: key.as_ptr() as *mut c_void,
|
|
||||||
};
|
|
||||||
let data_val: Option<ffi::MDBX_val> = data.map(|data| ffi::MDBX_val {
|
let data_val: Option<ffi::MDBX_val> = data.map(|data| ffi::MDBX_val {
|
||||||
iov_len: data.len(),
|
iov_len: data.len(),
|
||||||
iov_base: data.as_ptr() as *mut c_void,
|
iov_base: data.as_ptr() as *mut c_void,
|
||||||
@ -379,9 +353,7 @@ where
|
|||||||
|
|
||||||
/// Empties the given database. All items will be removed.
|
/// Empties the given database. All items will be removed.
|
||||||
pub fn clear_db<'txn>(&'txn self, db: &Database<'txn>) -> Result<()> {
|
pub fn clear_db<'txn>(&'txn self, db: &Database<'txn>) -> Result<()> {
|
||||||
mdbx_result(txn_execute(&self.txn, |txn| unsafe {
|
mdbx_result(txn_execute(&self.txn, |txn| unsafe { ffi::mdbx_drop(txn, db.dbi(), false) }))?;
|
||||||
ffi::mdbx_drop(txn, db.dbi(), false)
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -389,11 +361,10 @@ where
|
|||||||
/// Drops the database from the environment.
|
/// Drops the database from the environment.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi BEFORE calling this function.
|
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
|
||||||
|
/// BEFORE calling this function.
|
||||||
pub unsafe fn drop_db<'txn>(&'txn self, db: Database<'txn>) -> Result<()> {
|
pub unsafe fn drop_db<'txn>(&'txn self, db: Database<'txn>) -> Result<()> {
|
||||||
mdbx_result(txn_execute(&self.txn, |txn| {
|
mdbx_result(txn_execute(&self.txn, |txn| ffi::mdbx_drop(txn, db.dbi(), true)))?;
|
||||||
ffi::mdbx_drop(txn, db.dbi(), true)
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -406,7 +377,8 @@ where
|
|||||||
/// Closes the database handle.
|
/// Closes the database handle.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi BEFORE calling this function.
|
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
|
||||||
|
/// BEFORE calling this function.
|
||||||
pub unsafe fn close_db(&self, db: Database<'_>) -> Result<()> {
|
pub unsafe fn close_db(&self, db: Database<'_>) -> Result<()> {
|
||||||
mdbx_result(ffi::mdbx_dbi_close(self.env.env(), db.dbi()))?;
|
mdbx_result(ffi::mdbx_dbi_close(self.env.env(), db.dbi()))?;
|
||||||
|
|
||||||
@ -430,9 +402,7 @@ impl<'env> Transaction<'env, RW, NoWriteMap> {
|
|||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
rx.recv()
|
rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env, ptr.0))
|
||||||
.unwrap()
|
|
||||||
.map(|ptr| Transaction::new_from_ptr(self.env, ptr.0))
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -465,10 +435,7 @@ where
|
|||||||
.txn_manager
|
.txn_manager
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.send(TxnManagerMessage::Abort {
|
.send(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender })
|
||||||
tx: TxnPtr(txn),
|
|
||||||
sender,
|
|
||||||
})
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
rx.recv().unwrap().unwrap();
|
rx.recv().unwrap().unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,10 +26,7 @@ fn test_get() {
|
|||||||
assert_eq!(cursor.last().unwrap(), Some((*b"key3", *b"val3")));
|
assert_eq!(cursor.last().unwrap(), Some((*b"key3", *b"val3")));
|
||||||
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
|
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
|
||||||
assert_eq!(cursor.set_key(b"key3").unwrap(), Some((*b"key3", *b"val3")));
|
assert_eq!(cursor.set_key(b"key3").unwrap(), Some((*b"key3", *b"val3")));
|
||||||
assert_eq!(
|
assert_eq!(cursor.set_range(b"key2\0").unwrap(), Some((*b"key3", *b"val3")));
|
||||||
cursor.set_range(b"key2\0").unwrap(),
|
|
||||||
Some((*b"key3", *b"val3"))
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -62,16 +59,10 @@ fn test_get_dup() {
|
|||||||
assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None);
|
assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None);
|
||||||
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
|
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
|
||||||
assert_eq!(cursor.set(b"key2").unwrap(), Some(*b"val1"));
|
assert_eq!(cursor.set(b"key2").unwrap(), Some(*b"val1"));
|
||||||
assert_eq!(
|
assert_eq!(cursor.set_range(b"key1\0").unwrap(), Some((*b"key2", *b"val1")));
|
||||||
cursor.set_range(b"key1\0").unwrap(),
|
|
||||||
Some((*b"key2", *b"val1"))
|
|
||||||
);
|
|
||||||
assert_eq!(cursor.get_both(b"key1", b"val3").unwrap(), Some(*b"val3"));
|
assert_eq!(cursor.get_both(b"key1", b"val3").unwrap(), Some(*b"val3"));
|
||||||
assert_eq!(cursor.get_both_range::<()>(b"key1", b"val4").unwrap(), None);
|
assert_eq!(cursor.get_both_range::<()>(b"key1", b"val4").unwrap(), None);
|
||||||
assert_eq!(
|
assert_eq!(cursor.get_both_range(b"key2", b"val").unwrap(), Some(*b"val1"));
|
||||||
cursor.get_both_range(b"key2", b"val").unwrap(),
|
|
||||||
Some(*b"val1")
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val3")));
|
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val3")));
|
||||||
cursor.del(WriteFlags::empty()).unwrap();
|
cursor.del(WriteFlags::empty()).unwrap();
|
||||||
@ -88,9 +79,7 @@ fn test_get_dupfixed() {
|
|||||||
let env = Environment::new().open(dir.path()).unwrap();
|
let env = Environment::new().open(dir.path()).unwrap();
|
||||||
|
|
||||||
let txn = env.begin_rw_txn().unwrap();
|
let txn = env.begin_rw_txn().unwrap();
|
||||||
let db = txn
|
let db = txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED).unwrap();
|
||||||
.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED)
|
|
||||||
.unwrap();
|
|
||||||
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
|
txn.put(&db, b"key1", b"val1", WriteFlags::empty()).unwrap();
|
||||||
txn.put(&db, b"key1", b"val2", WriteFlags::empty()).unwrap();
|
txn.put(&db, b"key1", b"val2", WriteFlags::empty()).unwrap();
|
||||||
txn.put(&db, b"key1", b"val3", WriteFlags::empty()).unwrap();
|
txn.put(&db, b"key1", b"val3", WriteFlags::empty()).unwrap();
|
||||||
@ -144,33 +133,21 @@ fn test_iter() {
|
|||||||
cursor.iter().collect::<Result<Vec<_>>>().unwrap()
|
cursor.iter().collect::<Result<Vec<_>>>().unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(items, cursor.iter_start().collect::<Result<Vec<_>>>().unwrap());
|
||||||
items,
|
|
||||||
cursor.iter_start().collect::<Result<Vec<_>>>().unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
items.clone().into_iter().skip(1).collect::<Vec<_>>(),
|
items.clone().into_iter().skip(1).collect::<Vec<_>>(),
|
||||||
cursor
|
cursor.iter_from(b"key2").collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_from(b"key2")
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
items.into_iter().skip(3).collect::<Vec<_>>(),
|
items.into_iter().skip(3).collect::<Vec<_>>(),
|
||||||
cursor
|
cursor.iter_from(b"key4").collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_from(b"key4")
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Vec::<((), ())>::new(),
|
Vec::<((), ())>::new(),
|
||||||
cursor
|
cursor.iter_from(b"key6").collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_from(b"key6")
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,11 +183,7 @@ fn test_iter_empty_dup_database() {
|
|||||||
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
|
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
|
||||||
assert!(cursor.iter_dup::<(), ()>().flatten().next().is_none());
|
assert!(cursor.iter_dup::<(), ()>().flatten().next().is_none());
|
||||||
assert!(cursor.iter_dup_start::<(), ()>().flatten().next().is_none());
|
assert!(cursor.iter_dup_start::<(), ()>().flatten().next().is_none());
|
||||||
assert!(cursor
|
assert!(cursor.iter_dup_from::<(), ()>(b"foo").flatten().next().is_none());
|
||||||
.iter_dup_from::<(), ()>(b"foo")
|
|
||||||
.flatten()
|
|
||||||
.next()
|
|
||||||
.is_none());
|
|
||||||
assert!(cursor.iter_dup_of::<(), ()>(b"foo").next().is_none());
|
assert!(cursor.iter_dup_of::<(), ()>(b"foo").next().is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,91 +226,39 @@ fn test_iter_dup() {
|
|||||||
let txn = env.begin_ro_txn().unwrap();
|
let txn = env.begin_ro_txn().unwrap();
|
||||||
let db = txn.open_db(None).unwrap();
|
let db = txn.open_db(None).unwrap();
|
||||||
let mut cursor = txn.cursor(&db).unwrap();
|
let mut cursor = txn.cursor(&db).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(items, cursor.iter_dup().flatten().collect::<Result<Vec<_>>>().unwrap());
|
||||||
items,
|
|
||||||
cursor
|
|
||||||
.iter_dup()
|
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
cursor.set::<()>(b"b").unwrap();
|
cursor.set::<()>(b"b").unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
items.iter().copied().skip(4).collect::<Vec<_>>(),
|
items.iter().copied().skip(4).collect::<Vec<_>>(),
|
||||||
cursor
|
cursor.iter_dup().flatten().collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_dup()
|
);
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
assert_eq!(items, cursor.iter_dup_start().flatten().collect::<Result<Vec<_>>>().unwrap());
|
||||||
.unwrap()
|
|
||||||
|
assert_eq!(
|
||||||
|
items.iter().copied().into_iter().skip(3).collect::<Vec<_>>(),
|
||||||
|
cursor.iter_dup_from(b"b").flatten().collect::<Result<Vec<_>>>().unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
items,
|
items.iter().copied().into_iter().skip(3).collect::<Vec<_>>(),
|
||||||
cursor
|
cursor.iter_dup_from(b"ab").flatten().collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_dup_start()
|
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
items
|
items.iter().copied().into_iter().skip(9).collect::<Vec<_>>(),
|
||||||
.iter()
|
cursor.iter_dup_from(b"d").flatten().collect::<Result<Vec<_>>>().unwrap()
|
||||||
.copied()
|
|
||||||
.into_iter()
|
|
||||||
.skip(3)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
cursor
|
|
||||||
.iter_dup_from(b"b")
|
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
items
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.into_iter()
|
|
||||||
.skip(3)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
cursor
|
|
||||||
.iter_dup_from(b"ab")
|
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
items
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.into_iter()
|
|
||||||
.skip(9)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
cursor
|
|
||||||
.iter_dup_from(b"d")
|
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Vec::<([u8; 1], [u8; 1])>::new(),
|
Vec::<([u8; 1], [u8; 1])>::new(),
|
||||||
cursor
|
cursor.iter_dup_from(b"f").flatten().collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_dup_from(b"f")
|
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
items.iter().copied().skip(3).take(3).collect::<Vec<_>>(),
|
items.iter().copied().skip(3).take(3).collect::<Vec<_>>(),
|
||||||
cursor
|
cursor.iter_dup_of(b"b").collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_dup_of(b"b")
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(0, cursor.iter_dup_of::<(), ()>(b"foo").count());
|
assert_eq!(0, cursor.iter_dup_of::<(), ()>(b"foo").count());
|
||||||
@ -376,35 +297,18 @@ fn test_iter_del_get() {
|
|||||||
let txn = env.begin_rw_txn().unwrap();
|
let txn = env.begin_rw_txn().unwrap();
|
||||||
let db = txn.open_db(None).unwrap();
|
let db = txn.open_db(None).unwrap();
|
||||||
let mut cursor = txn.cursor(&db).unwrap();
|
let mut cursor = txn.cursor(&db).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(items, cursor.iter_dup().flatten().collect::<Result<Vec<_>>>().unwrap());
|
||||||
items,
|
|
||||||
cursor
|
|
||||||
.iter_dup()
|
|
||||||
.flatten()
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
items.iter().copied().take(1).collect::<Vec<(_, _)>>(),
|
items.iter().copied().take(1).collect::<Vec<(_, _)>>(),
|
||||||
cursor
|
cursor.iter_dup_of(b"a").collect::<Result<Vec<_>>>().unwrap()
|
||||||
.iter_dup_of(b"a")
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(cursor.set(b"a").unwrap(), Some(*b"1"));
|
assert_eq!(cursor.set(b"a").unwrap(), Some(*b"1"));
|
||||||
|
|
||||||
cursor.del(WriteFlags::empty()).unwrap();
|
cursor.del(WriteFlags::empty()).unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(cursor.iter_dup_of::<(), ()>(b"a").collect::<Result<Vec<_>>>().unwrap().len(), 0);
|
||||||
cursor
|
|
||||||
.iter_dup_of::<(), ()>(b"a")
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
.unwrap()
|
|
||||||
.len(),
|
|
||||||
0
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -422,19 +326,13 @@ fn test_put_del() {
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
cursor.get_current().unwrap().unwrap(),
|
cursor.get_current().unwrap().unwrap(),
|
||||||
(
|
(Cow::Borrowed(b"key3" as &[u8]), Cow::Borrowed(b"val3" as &[u8]))
|
||||||
Cow::Borrowed(b"key3" as &[u8]),
|
|
||||||
Cow::Borrowed(b"val3" as &[u8])
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
cursor.del(WriteFlags::empty()).unwrap();
|
cursor.del(WriteFlags::empty()).unwrap();
|
||||||
assert_eq!(cursor.get_current::<Vec<u8>, Vec<u8>>().unwrap(), None);
|
assert_eq!(cursor.get_current::<Vec<u8>, Vec<u8>>().unwrap(), None);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
cursor.last().unwrap().unwrap(),
|
cursor.last().unwrap().unwrap(),
|
||||||
(
|
(Cow::Borrowed(b"key2" as &[u8]), Cow::Borrowed(b"val2" as &[u8]))
|
||||||
Cow::Borrowed(b"key2" as &[u8]),
|
|
||||||
Cow::Borrowed(b"val2" as &[u8])
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,19 +9,13 @@ fn test_open() {
|
|||||||
let dir = tempdir().unwrap();
|
let dir = tempdir().unwrap();
|
||||||
|
|
||||||
// opening non-existent env with read-only should fail
|
// opening non-existent env with read-only should fail
|
||||||
assert!(Environment::new()
|
assert!(Environment::new().set_flags(Mode::ReadOnly.into()).open(dir.path()).is_err());
|
||||||
.set_flags(Mode::ReadOnly.into())
|
|
||||||
.open(dir.path())
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
// opening non-existent env should succeed
|
// opening non-existent env should succeed
|
||||||
assert!(Environment::new().open(dir.path()).is_ok());
|
assert!(Environment::new().open(dir.path()).is_ok());
|
||||||
|
|
||||||
// opening env with read-only should succeed
|
// opening env with read-only should succeed
|
||||||
assert!(Environment::new()
|
assert!(Environment::new().set_flags(Mode::ReadOnly.into()).open(dir.path()).is_ok());
|
||||||
.set_flags(Mode::ReadOnly.into())
|
|
||||||
.open(dir.path())
|
|
||||||
.is_ok());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -38,10 +32,7 @@ fn test_begin_txn() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
// read-only environment
|
// read-only environment
|
||||||
let env = Environment::new()
|
let env = Environment::new().set_flags(Mode::ReadOnly.into()).open(dir.path()).unwrap();
|
||||||
.set_flags(Mode::ReadOnly.into())
|
|
||||||
.open(dir.path())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert!(env.begin_rw_txn().is_err());
|
assert!(env.begin_rw_txn().is_err());
|
||||||
assert!(env.begin_ro_txn().is_ok());
|
assert!(env.begin_ro_txn().is_ok());
|
||||||
@ -65,9 +56,7 @@ fn test_create_db() {
|
|||||||
|
|
||||||
let txn = env.begin_rw_txn().unwrap();
|
let txn = env.begin_rw_txn().unwrap();
|
||||||
assert!(txn.open_db(Some("testdb")).is_err());
|
assert!(txn.open_db(Some("testdb")).is_err());
|
||||||
assert!(txn
|
assert!(txn.create_db(Some("testdb"), DatabaseFlags::empty()).is_ok());
|
||||||
.create_db(Some("testdb"), DatabaseFlags::empty())
|
|
||||||
.is_ok());
|
|
||||||
assert!(txn.open_db(Some("testdb")).is_ok())
|
assert!(txn.open_db(Some("testdb")).is_ok())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,10 +78,7 @@ fn test_sync() {
|
|||||||
env.sync(true).unwrap();
|
env.sync(true).unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let env = Environment::new()
|
let env = Environment::new().set_flags(Mode::ReadOnly.into()).open(dir.path()).unwrap();
|
||||||
.set_flags(Mode::ReadOnly.into())
|
|
||||||
.open(dir.path())
|
|
||||||
.unwrap();
|
|
||||||
env.sync(true).unwrap_err();
|
env.sync(true).unwrap_err();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,13 +101,7 @@ fn test_stat() {
|
|||||||
let mut value = [0u8; 8];
|
let mut value = [0u8; 8];
|
||||||
LittleEndian::write_u64(&mut value, i);
|
LittleEndian::write_u64(&mut value, i);
|
||||||
let tx = env.begin_rw_txn().expect("begin_rw_txn");
|
let tx = env.begin_rw_txn().expect("begin_rw_txn");
|
||||||
tx.put(
|
tx.put(&tx.open_db(None).unwrap(), &value, &value, WriteFlags::default()).expect("tx.put");
|
||||||
&tx.open_db(None).unwrap(),
|
|
||||||
&value,
|
|
||||||
&value,
|
|
||||||
WriteFlags::default(),
|
|
||||||
)
|
|
||||||
.expect("tx.put");
|
|
||||||
tx.commit().expect("tx.commit");
|
tx.commit().expect("tx.commit");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,10 +119,7 @@ fn test_info() {
|
|||||||
let map_size = 1024 * 1024;
|
let map_size = 1024 * 1024;
|
||||||
let dir = tempdir().unwrap();
|
let dir = tempdir().unwrap();
|
||||||
let env = Environment::new()
|
let env = Environment::new()
|
||||||
.set_geometry(Geometry {
|
.set_geometry(Geometry { size: Some(map_size..), ..Default::default() })
|
||||||
size: Some(map_size..),
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.open(dir.path())
|
.open(dir.path())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@ -166,13 +143,7 @@ fn test_freelist() {
|
|||||||
let mut value = [0u8; 8];
|
let mut value = [0u8; 8];
|
||||||
LittleEndian::write_u64(&mut value, i);
|
LittleEndian::write_u64(&mut value, i);
|
||||||
let tx = env.begin_rw_txn().expect("begin_rw_txn");
|
let tx = env.begin_rw_txn().expect("begin_rw_txn");
|
||||||
tx.put(
|
tx.put(&tx.open_db(None).unwrap(), &value, &value, WriteFlags::default()).expect("tx.put");
|
||||||
&tx.open_db(None).unwrap(),
|
|
||||||
&value,
|
|
||||||
&value,
|
|
||||||
WriteFlags::default(),
|
|
||||||
)
|
|
||||||
.expect("tx.put");
|
|
||||||
tx.commit().expect("tx.commit");
|
tx.commit().expect("tx.commit");
|
||||||
}
|
}
|
||||||
let tx = env.begin_rw_txn().expect("begin_rw_txn");
|
let tx = env.begin_rw_txn().expect("begin_rw_txn");
|
||||||
|
|||||||
@ -126,20 +126,12 @@ fn test_nested_txn() {
|
|||||||
let env = Environment::new().open(dir.path()).unwrap();
|
let env = Environment::new().open(dir.path()).unwrap();
|
||||||
|
|
||||||
let mut txn = env.begin_rw_txn().unwrap();
|
let mut txn = env.begin_rw_txn().unwrap();
|
||||||
txn.put(
|
txn.put(&txn.open_db(None).unwrap(), b"key1", b"val1", WriteFlags::empty()).unwrap();
|
||||||
&txn.open_db(None).unwrap(),
|
|
||||||
b"key1",
|
|
||||||
b"val1",
|
|
||||||
WriteFlags::empty(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let nested = txn.begin_nested_txn().unwrap();
|
let nested = txn.begin_nested_txn().unwrap();
|
||||||
let db = nested.open_db(None).unwrap();
|
let db = nested.open_db(None).unwrap();
|
||||||
nested
|
nested.put(&db, b"key2", b"val2", WriteFlags::empty()).unwrap();
|
||||||
.put(&db, b"key2", b"val2", WriteFlags::empty())
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(nested.get(&db, b"key1").unwrap(), Some(*b"val1"));
|
assert_eq!(nested.get(&db, b"key1").unwrap(), Some(*b"val1"));
|
||||||
assert_eq!(nested.get(&db, b"key2").unwrap(), Some(*b"val2"));
|
assert_eq!(nested.get(&db, b"key2").unwrap(), Some(*b"val2"));
|
||||||
}
|
}
|
||||||
@ -156,13 +148,7 @@ fn test_clear_db() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let txn = env.begin_rw_txn().unwrap();
|
let txn = env.begin_rw_txn().unwrap();
|
||||||
txn.put(
|
txn.put(&txn.open_db(None).unwrap(), b"key", b"val", WriteFlags::empty()).unwrap();
|
||||||
&txn.open_db(None).unwrap(),
|
|
||||||
b"key",
|
|
||||||
b"val",
|
|
||||||
WriteFlags::empty(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert!(!txn.commit().unwrap());
|
assert!(!txn.commit().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,10 +159,7 @@ fn test_clear_db() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let txn = env.begin_ro_txn().unwrap();
|
let txn = env.begin_ro_txn().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(txn.get::<()>(&txn.open_db(None).unwrap(), b"key").unwrap(), None);
|
||||||
txn.get::<()>(&txn.open_db(None).unwrap(), b"key").unwrap(),
|
|
||||||
None
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -195,8 +178,7 @@ fn test_drop_db() {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// Workaround for MDBX dbi drop issue
|
// Workaround for MDBX dbi drop issue
|
||||||
txn.create_db(Some("canary"), DatabaseFlags::empty())
|
txn.create_db(Some("canary"), DatabaseFlags::empty()).unwrap();
|
||||||
.unwrap();
|
|
||||||
assert!(!txn.commit().unwrap());
|
assert!(!txn.commit().unwrap());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@ -205,10 +187,7 @@ fn test_drop_db() {
|
|||||||
unsafe {
|
unsafe {
|
||||||
txn.drop_db(db).unwrap();
|
txn.drop_db(db).unwrap();
|
||||||
}
|
}
|
||||||
assert!(matches!(
|
assert!(matches!(txn.open_db(Some("test")).unwrap_err(), Error::NotFound));
|
||||||
txn.open_db(Some("test")).unwrap_err(),
|
|
||||||
Error::NotFound
|
|
||||||
));
|
|
||||||
assert!(!txn.commit().unwrap());
|
assert!(!txn.commit().unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -217,10 +196,7 @@ fn test_drop_db() {
|
|||||||
|
|
||||||
let txn = env.begin_ro_txn().unwrap();
|
let txn = env.begin_ro_txn().unwrap();
|
||||||
txn.open_db(Some("canary")).unwrap();
|
txn.open_db(Some("canary")).unwrap();
|
||||||
assert!(matches!(
|
assert!(matches!(txn.open_db(Some("test")).unwrap_err(), Error::NotFound));
|
||||||
txn.open_db(Some("test")).unwrap_err(),
|
|
||||||
Error::NotFound
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -285,13 +261,8 @@ fn test_concurrent_writers() {
|
|||||||
threads.push(thread::spawn(move || {
|
threads.push(thread::spawn(move || {
|
||||||
let txn = writer_env.begin_rw_txn().unwrap();
|
let txn = writer_env.begin_rw_txn().unwrap();
|
||||||
let db = txn.open_db(None).unwrap();
|
let db = txn.open_db(None).unwrap();
|
||||||
txn.put(
|
txn.put(&db, &format!("{}{}", key, i), &format!("{}{}", val, i), WriteFlags::empty())
|
||||||
&db,
|
.unwrap();
|
||||||
&format!("{}{}", key, i),
|
|
||||||
&format!("{}{}", val, i),
|
|
||||||
WriteFlags::empty(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
txn.commit().is_ok()
|
txn.commit().is_ok()
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
@ -303,9 +274,7 @@ fn test_concurrent_writers() {
|
|||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Cow::<Vec<u8>>::Owned(format!("{}{}", val, i).into_bytes()),
|
Cow::<Vec<u8>>::Owned(format!("{}{}", val, i).into_bytes()),
|
||||||
txn.get(&db, format!("{}{}", key, i).as_bytes())
|
txn.get(&db, format!("{}{}", key, i).as_bytes()).unwrap().unwrap()
|
||||||
.unwrap()
|
|
||||||
.unwrap()
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -57,10 +57,6 @@ exceptions = [
|
|||||||
{ allow = ["CC0-1.0"], name = "tiny-keccak" },
|
{ allow = ["CC0-1.0"], name = "tiny-keccak" },
|
||||||
{ allow = ["CC0-1.0"], name = "more-asserts" },
|
{ allow = ["CC0-1.0"], name = "more-asserts" },
|
||||||
|
|
||||||
# TODO: temporarily allow libmdx
|
|
||||||
{ allow = ["GPL-3.0"], name = "libmdbx" },
|
|
||||||
{ allow = ["GPL-3.0"], name = "mdbx-sys" },
|
|
||||||
|
|
||||||
# TODO: ethers transitive deps
|
# TODO: ethers transitive deps
|
||||||
{ allow = ["GPL-3.0"], name = "fastrlp" },
|
{ allow = ["GPL-3.0"], name = "fastrlp" },
|
||||||
{ allow = ["GPL-3.0"], name = "fastrlp-derive" },
|
{ allow = ["GPL-3.0"], name = "fastrlp-derive" },
|
||||||
|
|||||||
Reference in New Issue
Block a user