sqlite caching refactor

General refactoring to make blocking operations use special blocking
thread workers, SQL operations to use transactions, and setting up WAL
journal mode mode to minimize locking.

Signed-off-by: Manos Pitsidianakis <manos@pitsidianak.is>
pull/355/head
Manos Pitsidianakis 3 months ago
parent fd64fe0bf8
commit 4e7b665672
No known key found for this signature in database
GPG Key ID: 7729C7707F7E09D0

@ -552,7 +552,7 @@ impl Account {
#[cfg(feature = "sqlite3")] #[cfg(feature = "sqlite3")]
if settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 { if settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 {
let db_path = match crate::sqlite3::db_path() { let db_path = match crate::sqlite3::AccountCache::db_path(&name) {
Err(err) => { Err(err) => {
main_loop_handler.send(ThreadEvent::UIEvent(UIEvent::StatusEvent( main_loop_handler.send(ThreadEvent::UIEvent(UIEvent::StatusEvent(
StatusEvent::DisplayMessage(format!( StatusEvent::DisplayMessage(format!(
@ -872,9 +872,9 @@ impl Account {
}; };
#[cfg(feature = "sqlite3")] #[cfg(feature = "sqlite3")]
if self.settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 { if self.settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 {
let handle = self.main_loop_handler.job_executor.spawn_blocking( let handle = self.main_loop_handler.job_executor.spawn_specialized(
"sqlite3::insert".into(), "sqlite3::insert".into(),
crate::sqlite3::insert( crate::sqlite3::AccountCache::insert(
(*envelope).clone(), (*envelope).clone(),
self.backend.clone(), self.backend.clone(),
self.name.clone(), self.name.clone(),
@ -951,15 +951,18 @@ impl Account {
} }
#[cfg(feature = "sqlite3")] #[cfg(feature = "sqlite3")]
if self.settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 { if self.settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 {
if let Err(err) = crate::sqlite3::remove(env_hash) { let fut = crate::sqlite3::AccountCache::remove(self.name.clone(), env_hash);
let envelopes = self.collection.envelopes.read().unwrap(); let handle = self
log::error!( .main_loop_handler
"Failed to remove envelope {} [{}] in cache: {}", .job_executor
&envelopes[&env_hash].message_id_display(), .spawn_specialized("remove envelope from cache".into(), fut);
env_hash, self.insert_job(
err handle.job_id,
); JobRequest::Refresh {
} mailbox_hash,
handle,
},
);
} }
let thread_hash = self.collection.get_env(env_hash).thread(); let thread_hash = self.collection.get_env(env_hash).thread();
if !self if !self
@ -1643,7 +1646,9 @@ impl Account {
let query = melib::search::Query::try_from(search_term)?; let query = melib::search::Query::try_from(search_term)?;
match self.settings.conf.search_backend { match self.settings.conf.search_backend {
#[cfg(feature = "sqlite3")] #[cfg(feature = "sqlite3")]
crate::conf::SearchBackend::Sqlite3 => crate::sqlite3::search(&query, _sort), crate::conf::SearchBackend::Sqlite3 => Ok(Box::pin(
crate::sqlite3::AccountCache::search(self.name.clone(), query, _sort),
)),
crate::conf::SearchBackend::Auto | crate::conf::SearchBackend::None => { crate::conf::SearchBackend::Auto | crate::conf::SearchBackend::None => {
if self.backend_capabilities.supports_search { if self.backend_capabilities.supports_search {
self.backend self.backend

@ -60,28 +60,31 @@ impl Account {
pub(super) fn update_cached_env(&mut self, env: Envelope, old_hash: Option<EnvelopeHash>) { pub(super) fn update_cached_env(&mut self, env: Envelope, old_hash: Option<EnvelopeHash>) {
if self.settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 { if self.settings.conf.search_backend == crate::conf::SearchBackend::Sqlite3 {
let msg_id = env.message_id_display().to_string(); let msg_id = env.message_id_display().to_string();
match crate::sqlite3::remove(old_hash.unwrap_or_else(|| env.hash())) let name = self.name.clone();
.map(|_| crate::sqlite3::insert(env, self.backend.clone(), self.name.clone())) let backend = self.backend.clone();
{ let fut = async move {
Ok(job) => { crate::sqlite3::AccountCache::remove(
let handle = self name.clone(),
.main_loop_handler old_hash.unwrap_or_else(|| env.hash()),
.job_executor )
.spawn_blocking("sqlite3::remove".into(), job); .await?;
self.insert_job(
handle.job_id, crate::sqlite3::AccountCache::insert(env, backend, name).await?;
JobRequest::Generic { Ok(())
name: format!("Update envelope {} in sqlite3 cache", msg_id).into(), };
handle, let handle = self
log_level: LogLevel::TRACE, .main_loop_handler
on_finish: None, .job_executor
}, .spawn_specialized("sqlite3::remove".into(), fut);
); self.insert_job(
} handle.job_id,
Err(err) => { JobRequest::Generic {
log::error!("Failed to update envelope {} in cache: {}", msg_id, err); name: format!("Update envelope {} in sqlite3 cache", msg_id).into(),
} handle,
} log_level: LogLevel::TRACE,
on_finish: None,
},
);
} }
} }
} }

@ -234,6 +234,7 @@ impl JobExecutor {
} }
/// Spawns a future with a generic return value `R` /// Spawns a future with a generic return value `R`
#[inline(always)]
pub fn spawn_specialized<F, R>(&self, desc: Cow<'static, str>, future: F) -> JoinHandle<R> pub fn spawn_specialized<F, R>(&self, desc: Cow<'static, str>, future: F) -> JoinHandle<R>
where where
F: Future<Output = R> + Send + 'static, F: Future<Output = R> + Send + 'static,
@ -295,6 +296,7 @@ impl JobExecutor {
/// Spawns a future with a generic return value `R` that might block on a /// Spawns a future with a generic return value `R` that might block on a
/// new thread /// new thread
#[inline(always)]
pub fn spawn_blocking<F, R>(&self, desc: Cow<'static, str>, future: F) -> JoinHandle<R> pub fn spawn_blocking<F, R>(&self, desc: Cow<'static, str>, future: F) -> JoinHandle<R>
where where
F: Future<Output = R> + Send + 'static, F: Future<Output = R> + Send + 'static,

@ -178,10 +178,9 @@ impl AccountStatus {
} }
#[cfg(feature = "sqlite3")] #[cfg(feature = "sqlite3")]
(SearchBackend::Sqlite3, _) => { (SearchBackend::Sqlite3, _) => {
if let Ok(path) = crate::sqlite3::db_path() { match crate::sqlite3::AccountCache::db_path(&a.name) {
format!("sqlite3 database {}", path.display()) Ok(path) => format!("sqlite3 database: {}", path.display()),
} else { Err(err) => format!("sqlite3 error: {err}"),
"sqlite3 database".to_string()
} }
} }
}, },

@ -26,22 +26,23 @@ use std::{
}; };
use melib::{ use melib::{
backends::{MailBackend, ResultFuture}, backends::MailBackend,
email::{Envelope, EnvelopeHash}, email::{Envelope, EnvelopeHash},
log, log,
search::{ search::{
escape_double_quote, escape_double_quote,
Query::{self, *}, Query::{self, *},
}, },
utils::sqlite3::{self as melib_sqlite3, rusqlite::params, DatabaseDescription}, smol,
Error, Result, SortField, SortOrder, utils::sqlite3::{rusqlite::params, DatabaseDescription},
Error, Result, ResultIntoError, SortField, SortOrder,
}; };
use smallvec::SmallVec; use smallvec::SmallVec;
use crate::melib::ResultIntoError;
const DB: DatabaseDescription = DatabaseDescription { const DB: DatabaseDescription = DatabaseDescription {
name: "index.db", name: "index.db",
identifier: None,
application_prefix: "meli",
init_script: Some( init_script: Some(
"CREATE TABLE IF NOT EXISTS envelopes ( "CREATE TABLE IF NOT EXISTS envelopes (
id INTEGER PRIMARY KEY, id INTEGER PRIMARY KEY,
@ -113,10 +114,6 @@ END; ",
version: 1, version: 1,
}; };
pub fn db_path() -> Result<PathBuf> {
melib_sqlite3::db_path(DB.name)
}
//#[inline(always)] //#[inline(always)]
//fn fts5_bareword(w: &str) -> Cow<str> { //fn fts5_bareword(w: &str) -> Cow<str> {
// if w == "AND" || w == "OR" || w == "NOT" { // if w == "AND" || w == "OR" || w == "NOT" {
@ -140,152 +137,192 @@ pub fn db_path() -> Result<PathBuf> {
// } // }
//} //}
// //
//
pub async fn insert(
envelope: Envelope,
backend: Arc<RwLock<Box<dyn MailBackend>>>,
acc_name: String,
) -> Result<()> {
let db_path = db_path()?;
if !db_path.exists() {
return Err(Error::new(
"Database hasn't been initialised. Run `reindex` command",
));
}
let conn = melib_sqlite3::open_db(db_path)?; pub struct AccountCache;
let op = backend impl AccountCache {
.read() pub async fn insert(
.unwrap() envelope: Envelope,
.operation(envelope.hash())? backend: Arc<RwLock<Box<dyn MailBackend>>>,
.as_bytes()?; acc_name: String,
) -> Result<()> {
let db_desc = DatabaseDescription {
identifier: Some(acc_name.clone().into()),
..DB.clone()
};
let body = match op.await.map(|bytes| envelope.body_bytes(&bytes)) { if !db_desc.exists().unwrap_or(false) {
Ok(body) => body.text(), return Err(Error::new(format!(
Err(err) => { "Database hasn't been initialised. Run `reindex {acc_name}` command"
log::error!( )));
"Failed to open envelope {}: {err}",
envelope.message_id_display(),
);
return Err(err);
} }
};
if let Err(err) = conn.execute( let op = backend
"INSERT OR IGNORE INTO accounts (name) VALUES (?1)", .read()
params![acc_name,],
) {
log::error!(
"Failed to insert envelope {}: {err}",
envelope.message_id_display(),
);
return Err(Error::new(err.to_string()));
}
let account_id: i32 = {
let mut stmt = conn
.prepare("SELECT id FROM accounts WHERE name = ?")
.unwrap();
let x = stmt
.query_map(params![acc_name], |row| row.get(0))
.unwrap()
.next()
.unwrap() .unwrap()
.unwrap(); .operation(envelope.hash())?
x .as_bytes()?;
};
if let Err(err) = conn let body = match op.await.map(|bytes| envelope.body_bytes(&bytes)) {
.execute( Ok(body) => body.text(),
"INSERT OR REPLACE INTO envelopes (account_id, hash, date, _from, _to, cc, bcc, \ Err(err) => {
subject, message_id, in_reply_to, _references, flags, has_attachments, body_text, \ log::error!(
timestamp) "Failed to open envelope {}: {err}",
envelope.message_id_display(),
);
return Err(err);
}
};
smol::unblock(move || {
let mut conn = db_desc.open_or_create_db()?;
let tx =
conn.transaction_with_behavior(melib::rusqlite::TransactionBehavior::Immediate)?;
if let Err(err) = tx.execute(
"INSERT OR IGNORE INTO accounts (name) VALUES (?1)",
params![acc_name,],
) {
log::error!(
"Failed to insert envelope {}: {err}",
envelope.message_id_display(),
);
return Err(Error::new(err.to_string()));
}
let account_id: i32 = {
let mut stmt = tx
.prepare("SELECT id FROM accounts WHERE name = ?")
.unwrap();
let x = stmt
.query_map(params![acc_name], |row| row.get(0))
.unwrap()
.next()
.unwrap()
.unwrap();
x
};
if let Err(err) = tx
.execute(
"INSERT OR REPLACE INTO envelopes (account_id, hash, date, _from, _to, cc, \
bcc, subject, message_id, in_reply_to, _references, flags, has_attachments, \
body_text, timestamp)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)",
params![ params![
account_id, account_id,
envelope.hash().to_be_bytes().to_vec(), envelope.hash().to_be_bytes().to_vec(),
envelope.date_as_str(), envelope.date_as_str(),
envelope.field_from_to_string(), envelope.field_from_to_string(),
envelope.field_to_to_string(), envelope.field_to_to_string(),
envelope.field_cc_to_string(), envelope.field_cc_to_string(),
envelope.field_bcc_to_string(), envelope.field_bcc_to_string(),
envelope.subject().into_owned().trim_end_matches('\u{0}'), envelope.subject().into_owned().trim_end_matches('\u{0}'),
envelope.message_id_display().to_string(), envelope.message_id_display().to_string(),
envelope envelope
.in_reply_to_display() .in_reply_to_display()
.map(|f| f.to_string()) .map(|f| f.to_string())
.unwrap_or_default(), .unwrap_or_default(),
envelope.field_references_to_string(), envelope.field_references_to_string(),
i64::from(envelope.flags().bits()), i64::from(envelope.flags().bits()),
i32::from(envelope.has_attachments()), i32::from(envelope.has_attachments()),
body, body,
envelope.date().to_be_bytes().to_vec() envelope.date().to_be_bytes().to_vec()
], ],
) )
.map_err(|e| Error::new(e.to_string())) .map_err(|e| Error::new(e.to_string()))
{ {
log::error!( drop(tx);
"Failed to insert envelope {}: {err}", log::error!(
envelope.message_id_display(), "Failed to insert envelope {}: {err}",
); envelope.message_id_display(),
);
} else {
tx.commit()?;
}
Ok(())
})
.await?;
Ok(())
} }
Ok(())
}
pub fn remove(env_hash: EnvelopeHash) -> Result<()> { pub async fn remove(acc_name: String, env_hash: EnvelopeHash) -> Result<()> {
let db_path = db_path()?; let db_desc = DatabaseDescription {
if !db_path.exists() { identifier: Some(acc_name.into()),
return Err(Error::new( ..DB.clone()
"Database hasn't been initialised. Run `reindex` command", };
)); let db_path = db_desc.db_path()?;
} if !db_path.exists() {
return Err(Error::new(
"Database hasn't been initialised. Run `reindex {acc_name}` command",
));
}
let conn = melib_sqlite3::open_db(db_path)?; smol::unblock(move || {
if let Err(err) = conn let mut conn = db_desc.open_or_create_db()?;
.execute( let tx =
"DELETE FROM envelopes WHERE hash = ?", conn.transaction_with_behavior(melib::rusqlite::TransactionBehavior::Immediate)?;
params![env_hash.to_be_bytes().to_vec(),], if let Err(err) = tx
) .execute(
.map_err(|e| Error::new(e.to_string())) "DELETE FROM envelopes WHERE hash = ?",
{ params![env_hash.to_be_bytes().to_vec(),],
log::error!("Failed to remove envelope {env_hash}: {err}"); )
return Err(err); .map_err(|e| Error::new(e.to_string()))
{
drop(tx);
log::error!("Failed to remove envelope {env_hash}: {err}");
return Err(err);
}
tx.commit()?;
Ok(())
})
.await?;
Ok(())
} }
Ok(())
}
pub fn index(context: &crate::state::Context, account_index: usize) -> ResultFuture<()> { pub async fn index(
let account = &context.accounts[account_index]; acc_name: Arc<String>,
let (acc_name, acc_mutex, backend_mutex): (String, Arc<RwLock<_>>, Arc<_>) = ( collection: melib::Collection,
account.name().to_string(), backend_mutex: Arc<RwLock<Box<dyn MailBackend>>>,
account.collection.envelopes.clone(), ) -> Result<()> {
account.backend.clone(), let acc_mutex = collection.envelopes.clone();
); let db_desc = Arc::new(DatabaseDescription {
let conn = melib_sqlite3::open_or_create_db(&DB, Some(acc_name.as_str()))?; identifier: Some(acc_name.to_string().into()),
let env_hashes = acc_mutex ..DB.clone()
.read() });
.unwrap() let env_hashes = acc_mutex
.keys() .read()
.cloned() .unwrap()
.collect::<Vec<_>>(); .keys()
.cloned()
.collect::<Vec<_>>();
/* Sleep, index and repeat in order not to block the main process */ /* Sleep, index and repeat in order not to block the main process */
Ok(Box::pin(async move {
conn.execute(
"INSERT OR REPLACE INTO accounts (name) VALUES (?1)",
params![acc_name.as_str(),],
)
.chain_err_summary(|| "Failed to update index:")?;
let account_id: i32 = { let account_id: i32 = {
let mut stmt = conn let acc_name = Arc::clone(&acc_name);
.prepare("SELECT id FROM accounts WHERE name = ?") let db_desc = Arc::clone(&db_desc);
.unwrap(); smol::unblock(move || {
let x = stmt let mut conn = db_desc.open_or_create_db()?;
.query_map(params![acc_name.as_str()], |row| row.get(0)) let tx = conn
.unwrap() .transaction_with_behavior(melib::rusqlite::TransactionBehavior::Immediate)?;
.next() tx.execute(
.unwrap() "INSERT OR REPLACE INTO accounts (name) VALUES (?1)",
.unwrap(); params![acc_name.as_str(),],
x )
.chain_err_summary(|| "Failed to update index:")?;
let account_id = {
let mut stmt = tx
.prepare("SELECT id FROM accounts WHERE name = ?")
.unwrap();
let x = stmt
.query_map(params![acc_name.as_str()], |row| row.get(0))
.unwrap()
.next()
.unwrap()
.unwrap();
x
};
tx.commit()?;
Ok::<i32, Error>(account_id)
})
.await?
}; };
let mut ctr = 0; let mut ctr = 0;
log::trace!( log::trace!(
@ -296,90 +333,133 @@ pub fn index(context: &crate::state::Context, account_index: usize) -> ResultFut
); );
for chunk in env_hashes.chunks(200) { for chunk in env_hashes.chunks(200) {
ctr += chunk.len(); ctr += chunk.len();
for env_hash in chunk { let mut chunk_bytes = Vec::with_capacity(chunk.len());
let mut op = backend_mutex.read().unwrap().operation(*env_hash)?; for &env_hash in chunk {
let mut op = backend_mutex.read().unwrap().operation(env_hash)?;
let bytes = op let bytes = op
.as_bytes()? .as_bytes()?
.await .await
.chain_err_summary(|| format!("Failed to open envelope {}", env_hash))?; .chain_err_summary(|| format!("Failed to open envelope {}", env_hash))?;
let envelopes_lck = acc_mutex.read().unwrap(); chunk_bytes.push((env_hash, bytes));
if let Some(e) = envelopes_lck.get(env_hash) { }
let body = e.body_bytes(&bytes).text().replace('\0', ""); {
conn.execute( let acc_mutex = acc_mutex.clone();
"INSERT OR REPLACE INTO envelopes (account_id, hash, date, _from, _to, \ let db_desc = Arc::clone(&db_desc);
cc, bcc, subject, message_id, in_reply_to, _references, flags, \ smol::unblock(move || {
has_attachments, body_text, timestamp) let mut conn = db_desc.open_or_create_db()?;
let tx = conn.transaction_with_behavior(
melib::rusqlite::TransactionBehavior::Immediate,
)?;
let envelopes_lck = acc_mutex.read().unwrap();
for (env_hash, bytes) in chunk_bytes {
if let Some(e) = envelopes_lck.get(&env_hash) {
let body = e.body_bytes(&bytes).text().replace('\0', "");
tx.execute(
"INSERT OR REPLACE INTO envelopes (account_id, hash, date, _from, \
_to, cc, bcc, subject, message_id, in_reply_to, _references, \
flags, has_attachments, body_text, timestamp)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)",
params![ params![
account_id, account_id,
e.hash().to_be_bytes().to_vec(), e.hash().to_be_bytes().to_vec(),
e.date_as_str(), e.date_as_str(),
e.field_from_to_string(), e.field_from_to_string(),
e.field_to_to_string(), e.field_to_to_string(),
e.field_cc_to_string(), e.field_cc_to_string(),
e.field_bcc_to_string(), e.field_bcc_to_string(),
e.subject().into_owned().trim_end_matches('\u{0}'), e.subject().into_owned().trim_end_matches('\u{0}'),
e.message_id_display().to_string(), e.message_id_display().to_string(),
e.in_reply_to_display() e.in_reply_to_display()
.map(|f| f.to_string()) .map(|f| f.to_string())
.unwrap_or_default(), .unwrap_or_default(),
e.field_references_to_string(), e.field_references_to_string(),
i64::from(e.flags().bits()), i64::from(e.flags().bits()),
i32::from(e.has_attachments()), i32::from(e.has_attachments()),
body, body,
e.date().to_be_bytes().to_vec() e.date().to_be_bytes().to_vec()
], ],
) )
.chain_err_summary(|| { .chain_err_summary(|| {
format!("Failed to insert envelope {}", e.message_id_display()) format!("Failed to insert envelope {}", e.message_id_display())
})?; })?;
} }
}
tx.commit()?;
Ok::<(), Error>(())
})
.await?;
} }
let sleep_dur = std::time::Duration::from_millis(20); let sleep_dur = std::time::Duration::from_millis(50);
std::thread::sleep(sleep_dur); smol::Timer::after(sleep_dur).await;
} }
Ok(()) Ok(())
}))
}
pub fn search(
query: &Query,
(sort_field, sort_order): (SortField, SortOrder),
) -> ResultFuture<SmallVec<[EnvelopeHash; 512]>> {
let db_path = db_path()?;
if !db_path.exists() {
return Err(Error::new(
"Database hasn't been initialised. Run `reindex` command",
));
} }
let conn = melib_sqlite3::open_db(db_path)?; pub async fn search(
acc_name: String,
query: Query,
(sort_field, sort_order): (SortField, SortOrder),
) -> Result<SmallVec<[EnvelopeHash; 512]>> {
let db_desc = DatabaseDescription {
identifier: Some(acc_name.clone().into()),
..DB.clone()
};
if !db_desc.exists().unwrap_or(false) {
return Err(Error::new(format!(
"Database hasn't been initialised for account `{}`. Run `reindex` command to \
build an index.",
acc_name
)));
}
let query = query_to_sql(&query);
let sort_field = match sort_field { smol::unblock(move || {
SortField::Subject => "subject", let mut conn = db_desc.open_or_create_db()?;
SortField::Date => "timestamp",
};
let sort_order = match sort_order { let sort_field = match sort_field {
SortOrder::Asc => "ASC", SortField::Subject => "subject",
SortOrder::Desc => "DESC", SortField::Date => "timestamp",
}; };
let mut stmt = conn let sort_order = match sort_order {
.prepare(&format!( SortOrder::Asc => "ASC",
"SELECT hash FROM envelopes WHERE {} ORDER BY {} {};", SortOrder::Desc => "DESC",
query_to_sql(query), };
sort_field,
sort_order
))
.map_err(|e| Error::new(e.to_string()))?;
let results = stmt let tx = conn.transaction()?;
.query_map([], |row| row.get::<_, EnvelopeHash>(0)) let mut stmt = tx
.map_err(Error::from)? .prepare(&format!(
.map(|item| item.map_err(Error::from)) "SELECT hash FROM envelopes WHERE {} ORDER BY {} {};",
.collect::<Result<SmallVec<[EnvelopeHash; 512]>>>(); query, sort_field, sort_order
Ok(Box::pin(async { results })) ))
.map_err(|e| Error::new(e.to_string()))?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map([], |row| row.get::<_, EnvelopeHash>(0))
.map_err(Error::from)?
.map(|item| item.map_err(Error::from))
.collect::<Result<SmallVec<[EnvelopeHash; 512]>>>();
x
})
.await
}
pub fn db_path(acc_name: &str) -> Result<PathBuf> {
let db_desc = DatabaseDescription {
identifier: Some(acc_name.to_string().into()),
..DB.clone()
};
let db_path = db_desc.db_path()?;
if !db_path.exists() {
return Err(Error::new(
"Database hasn't been initialised. Run `reindex {acc_name}` command",
));
}
Ok(db_path)
}
} }
/// Translates a `Query` to an Sqlite3 expression in a `String`. /// Translates a `Query` to an Sqlite3 expression in a `String`.

@ -799,38 +799,36 @@ impl State {
}); });
return; return;
} }
match crate::sqlite3::index(&self.context, account_index) { let account = &self.context.accounts[account_index];
Ok(job) => { let (acc_name, backend_mutex): (Arc<String>, Arc<_>) = (
let handle = self Arc::new(account.name().to_string()),
.context account.backend.clone(),
.main_loop_handler );
.job_executor let job = crate::sqlite3::AccountCache::index(
.spawn_blocking("sqlite3::index".into(), job); acc_name,
self.context.accounts[account_index].active_jobs.insert( account.collection.clone(),
handle.job_id, backend_mutex,
crate::accounts::JobRequest::Generic { );
name: "Message index rebuild".into(), let handle = self
handle, .context
on_finish: None, .main_loop_handler
log_level: LogLevel::INFO, .job_executor
}, .spawn_specialized("sqlite3::index".into(), job);
); self.context.accounts[account_index].active_jobs.insert(
self.context.replies.push_back(UIEvent::Notification { handle.job_id,
title: None, crate::accounts::JobRequest::Generic {
source: None, name: "Message index rebuild".into(),
body: "Message index rebuild started.".into(), handle,
kind: Some(NotificationType::Info), on_finish: None,
}); log_level: LogLevel::INFO,
} },
Err(err) => { );
self.context.replies.push_back(UIEvent::Notification { self.context.replies.push_back(UIEvent::Notification {
title: Some("Message index rebuild failed".into()), title: None,
source: None, source: None,
body: err.to_string().into(), body: "Message index rebuild started.".into(),
kind: Some(NotificationType::Error(err.kind)), kind: Some(NotificationType::Info),
}); });
}
}
} }
#[cfg(not(feature = "sqlite3"))] #[cfg(not(feature = "sqlite3"))]
AccountAction(_, ReIndex) => { AccountAction(_, ReIndex) => {

@ -19,16 +19,20 @@
* along with meli. If not, see <http://www.gnu.org/licenses/>. * along with meli. If not, see <http://www.gnu.org/licenses/>.
*/ */
use super::*;
pub mod sync;
use std::convert::TryFrom; use std::convert::TryFrom;
use super::*;
use crate::{ use crate::{
backends::MailboxHash, backends::MailboxHash,
email::{Envelope, EnvelopeHash}, email::{Envelope, EnvelopeHash},
error::*, error::*,
}; };
pub mod ram_cache;
#[cfg(feature = "sqlite3")]
pub mod sqlite3_cache;
pub mod sync;
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct ModSequence(pub std::num::NonZeroU64); pub struct ModSequence(pub std::num::NonZeroU64);
@ -107,690 +111,6 @@ pub trait ImapCacheReset: Send + std::fmt::Debug {
Self: Sized; Self: Sized;
} }
#[cfg(feature = "sqlite3")]
pub use sqlite3_m::*;
#[cfg(feature = "sqlite3")]
pub mod sqlite3_m {
use super::*;
use crate::utils::sqlite3::{
self,
rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, Value},
Connection, DatabaseDescription,
};
type Sqlite3UID = i32;
#[derive(Debug)]
pub struct Sqlite3Cache {
connection: Connection,
loaded_mailboxes: BTreeSet<MailboxHash>,
uid_store: Arc<UIDStore>,
}
const DB_DESCRIPTION: DatabaseDescription = DatabaseDescription {
name: "header_cache.db",
init_script: Some(
"PRAGMA foreign_keys = true;
PRAGMA encoding = 'UTF-8';
CREATE TABLE IF NOT EXISTS envelopes (
hash INTEGER NOT NULL,
mailbox_hash INTEGER NOT NULL,
uid INTEGER NOT NULL,
modsequence INTEGER,
rfc822 BLOB,
envelope BLOB NOT NULL,
PRIMARY KEY (mailbox_hash, uid),
FOREIGN KEY (mailbox_hash) REFERENCES mailbox(mailbox_hash) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS mailbox (
mailbox_hash INTEGER UNIQUE,
uidvalidity INTEGER,
flags BLOB NOT NULL,
highestmodseq INTEGER,
PRIMARY KEY (mailbox_hash)
);
CREATE INDEX IF NOT EXISTS envelope_uid_idx ON envelopes(mailbox_hash, uid);
CREATE INDEX IF NOT EXISTS envelope_idx ON envelopes(hash);
CREATE INDEX IF NOT EXISTS mailbox_idx ON mailbox(mailbox_hash);",
),
version: 3,
};
impl From<EnvelopeHash> for Value {
fn from(env_hash: EnvelopeHash) -> Self {
(env_hash.0 as i64).into()
}
}
impl ToSql for ModSequence {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput> {
Ok(ToSqlOutput::from(self.0.get() as i64))
}
}
impl FromSql for ModSequence {
fn column_result(value: rusqlite::types::ValueRef) -> FromSqlResult<Self> {
let i: i64 = FromSql::column_result(value)?;
if i == 0 {
return Err(FromSqlError::OutOfRange(0));
}
Ok(Self::try_from(i).unwrap())
}
}
impl Sqlite3Cache {
pub fn get(uid_store: Arc<UIDStore>) -> Result<Box<dyn ImapCache>> {
let connection =
match sqlite3::open_or_create_db(&DB_DESCRIPTION, Some(&uid_store.account_name)) {
Ok(c) => Ok(c),
Err(err) => {
// try resetting database on error, but only one time.
if Self::reset_db(&uid_store).is_ok() {
sqlite3::open_or_create_db(
&DB_DESCRIPTION,
Some(&uid_store.account_name),
)
} else {
Err(err)
}
}
}?;
Ok(Box::new(Self {
connection,
loaded_mailboxes: BTreeSet::default(),
uid_store,
}))
}
fn max_uid(&self, mailbox_hash: MailboxHash) -> Result<UID> {
let mut stmt = self
.connection
.prepare("SELECT MAX(uid) FROM envelopes WHERE mailbox_hash = ?1;")?;
let mut ret: Vec<UID> = stmt
.query_map(sqlite3::params![mailbox_hash], |row| {
row.get(0).map(|i: Sqlite3UID| i as UID)
})?
.collect::<std::result::Result<_, _>>()?;
Ok(ret.pop().unwrap_or(0))
}
}
impl ImapCacheReset for Sqlite3Cache {
fn reset_db(uid_store: &UIDStore) -> Result<()> {
sqlite3::reset_db(&DB_DESCRIPTION, Some(&uid_store.account_name))
}
}
impl ImapCache for Sqlite3Cache {
fn reset(&mut self) -> Result<()> {
Self::reset_db(&self.uid_store)
}
fn mailbox_state(&mut self, mailbox_hash: MailboxHash) -> Result<Option<()>> {
if self.loaded_mailboxes.contains(&mailbox_hash) {
return Ok(Some(()));
}
debug!("loading mailbox state {} from cache", mailbox_hash);
let mut stmt = self.connection.prepare(
"SELECT uidvalidity, flags, highestmodseq FROM mailbox WHERE mailbox_hash = ?1;",
)?;
let mut ret = stmt.query_map(sqlite3::params![mailbox_hash], |row| {
Ok((
row.get(0).map(|u: Sqlite3UID| u as UID)?,
row.get(1)?,
row.get(2)?,
))
})?;
if let Some(v) = ret.next() {
let (uidvalidity, flags, highestmodseq): (
UIDVALIDITY,
Vec<u8>,
Option<ModSequence>,
) = v?;
debug!(
"mailbox state {} in cache uidvalidity {}",
mailbox_hash, uidvalidity
);
debug!(
"mailbox state {} in cache highestmodseq {:?}",
mailbox_hash, &highestmodseq
);
debug!(
"mailbox state {} inserting flags: {:?}",
mailbox_hash,
to_str!(&flags)
);
self.uid_store
.highestmodseqs
.lock()
.unwrap()
.entry(mailbox_hash)
.and_modify(|entry| *entry = highestmodseq.ok_or(()))
.or_insert_with(|| highestmodseq.ok_or(()));
self.uid_store
.uidvalidity
.lock()
.unwrap()
.entry(mailbox_hash)
.and_modify(|entry| *entry = uidvalidity)
.or_insert(uidvalidity);
let mut tag_lck = self.uid_store.collection.tag_index.write().unwrap();
for f in to_str!(&flags).split('\0') {
let hash = TagHash::from_bytes(f.as_bytes());
tag_lck.entry(hash).or_insert_with(|| f.to_string());
}
self.loaded_mailboxes.insert(mailbox_hash);
Ok(Some(()))
} else {
debug!("mailbox state {} not in cache", mailbox_hash);
Ok(None)
}
}
fn clear(
&mut self,
mailbox_hash: MailboxHash,
select_response: &SelectResponse,
) -> Result<()> {
debug!("clear mailbox_hash {} {:?}", mailbox_hash, select_response);
self.loaded_mailboxes.remove(&mailbox_hash);
self.connection
.execute(
"DELETE FROM mailbox WHERE mailbox_hash = ?1",
sqlite3::params![mailbox_hash],
)
.chain_err_summary(|| {
format!(
"Could not clear cache of mailbox {} account {}",
mailbox_hash, self.uid_store.account_name
)
})?;
if let Some(Ok(highestmodseq)) = select_response.highestmodseq {
self.connection
.execute(
"INSERT OR IGNORE INTO mailbox (uidvalidity, flags, highestmodseq, \
mailbox_hash) VALUES (?1, ?2, ?3, ?4)",
sqlite3::params![
select_response.uidvalidity as Sqlite3UID,
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
highestmodseq,
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not insert uidvalidity {} in header_cache of account {}",
select_response.uidvalidity, self.uid_store.account_name
)
})?;
} else {
self.connection
.execute(
"INSERT OR IGNORE INTO mailbox (uidvalidity, flags, mailbox_hash) VALUES \
(?1, ?2, ?3)",
sqlite3::params![
select_response.uidvalidity as Sqlite3UID,
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not insert mailbox {} in header_cache of account {}",
select_response.uidvalidity, self.uid_store.account_name
)
})?;
}
Ok(())
}
fn update_mailbox(
&mut self,
mailbox_hash: MailboxHash,
select_response: &SelectResponse,
) -> Result<()> {
if self.mailbox_state(mailbox_hash)?.is_none() {
return self.clear(mailbox_hash, select_response);
}
if let Some(Ok(highestmodseq)) = select_response.highestmodseq {
self.connection
.execute(
"UPDATE mailbox SET flags=?1, highestmodseq =?2 where mailbox_hash = ?3;",
sqlite3::params![
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
highestmodseq,
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not update mailbox {} in header_cache of account {}",
mailbox_hash, self.uid_store.account_name
)
})?;
} else {
self.connection
.execute(
"UPDATE mailbox SET flags=?1 where mailbox_hash = ?2;",
sqlite3::params![
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not update mailbox {} in header_cache of account {}",
mailbox_hash, self.uid_store.account_name
)
})?;
}
Ok(())
}
fn envelopes(&mut self, mailbox_hash: MailboxHash) -> Result<Option<Vec<EnvelopeHash>>> {
debug!("envelopes mailbox_hash {}", mailbox_hash);
if self.mailbox_state(mailbox_hash)?.is_none() {
return Ok(None);
}
let res = {
let mut stmt = self.connection.prepare(
"SELECT uid, envelope, modsequence FROM envelopes WHERE mailbox_hash = ?1;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash], |row| {
Ok((
row.get(0).map(|i: Sqlite3UID| i as UID)?,
row.get(1)?,
row.get(2)?,
))
})?
.collect::<std::result::Result<_, _>>();
x
};
let ret: Vec<(UID, Envelope, Option<ModSequence>)> = match res {
Err(err) if matches!(&err, rusqlite::Error::FromSqlConversionFailure(_, _, _)) => {
drop(err);
self.reset()?;
return Ok(None);
}
Err(err) => return Err(err.into()),
Ok(v) => v,
};
let mut max_uid = 0;
let mut env_lck = self.uid_store.envelopes.lock().unwrap();
let mut hash_index_lck = self.uid_store.hash_index.lock().unwrap();
let mut uid_index_lck = self.uid_store.uid_index.lock().unwrap();
let mut env_hashes = Vec::with_capacity(ret.len());
for (uid, env, modseq) in ret {
env_hashes.push(env.hash());
max_uid = std::cmp::max(max_uid, uid);
hash_index_lck.insert(env.hash(), (uid, mailbox_hash));
uid_index_lck.insert((mailbox_hash, uid), env.hash());
env_lck.insert(
env.hash(),
CachedEnvelope {
inner: env,
uid,
mailbox_hash,
modsequence: modseq,
},
);
}
self.uid_store
.max_uids
.lock()
.unwrap()
.insert(mailbox_hash, max_uid);
Ok(Some(env_hashes))
}
fn insert_envelopes(
&mut self,
mailbox_hash: MailboxHash,
fetches: &[FetchResponse<'_>],
) -> Result<()> {
debug!(
"insert_envelopes mailbox_hash {} len {}",
mailbox_hash,
fetches.len()
);
let mut max_uid = self
.uid_store
.max_uids
.lock()
.unwrap()
.get(&mailbox_hash)
.cloned()
.unwrap_or_default();
if self.mailbox_state(mailbox_hash)?.is_none() {
return Err(Error::new("Mailbox is not in cache").set_kind(ErrorKind::Bug));
}
let Self {
ref mut connection,
ref uid_store,
loaded_mailboxes: _,
} = self;
let tx = connection.transaction()?;
for item in fetches {
if let FetchResponse {
uid: Some(uid),
message_sequence_number: _,
modseq,
flags: _,
body: _,
references: _,
envelope: Some(envelope),
raw_fetch_value: _,
} = item
{
max_uid = std::cmp::max(max_uid, *uid);
tx.execute(
"INSERT OR REPLACE INTO envelopes (hash, uid, mailbox_hash, modsequence, \
envelope) VALUES (?1, ?2, ?3, ?4, ?5)",
sqlite3::params![
envelope.hash(),
*uid as Sqlite3UID,
mailbox_hash,
modseq,
&envelope
],
)
.chain_err_summary(|| {
format!(
"Could not insert envelope {} {} in header_cache of account {}",
envelope.message_id(),
envelope.hash(),
uid_store.account_name
)
})?;
}
}
tx.commit()?;
self.uid_store
.max_uids
.lock()
.unwrap()
.insert(mailbox_hash, max_uid);
Ok(())
}
fn update_flags(
&mut self,
env_hashes: EnvelopeHashBatch,
mailbox_hash: MailboxHash,
flags: SmallVec<[FlagOp; 8]>,
) -> Result<()> {
if self.mailbox_state(mailbox_hash)?.is_none() {
return Err(Error::new("Mailbox is not in cache").set_kind(ErrorKind::Bug));
}
let Self {
ref mut connection,
ref uid_store,
loaded_mailboxes: _,
} = self;
let tx = connection.transaction()?;
let values =
std::rc::Rc::new(env_hashes.iter().map(Value::from).collect::<Vec<Value>>());
let mut stmt =
tx.prepare("SELECT uid, envelope FROM envelopes WHERE hash IN rarray(?1);")?;
let rows = stmt
.query_map([values], |row| Ok((row.get(0)?, row.get(1)?)))?
.filter_map(|r| r.ok())
.collect::<Vec<(UID, Envelope)>>();
drop(stmt);
let mut stmt = tx.prepare(
"UPDATE envelopes SET envelope = ?1 WHERE mailbox_hash = ?2 AND uid = ?3;",
)?;
for (uid, mut env) in rows {
for op in flags.iter() {
match op {
FlagOp::UnSet(flag) | FlagOp::Set(flag) => {
let mut f = env.flags();
f.set(*flag, op.as_bool());
env.set_flags(f);
}
FlagOp::UnSetTag(tag) | FlagOp::SetTag(tag) => {
let hash = TagHash::from_bytes(tag.as_bytes());
if op.as_bool() {
env.tags_mut().insert(hash);
} else {
env.tags_mut().remove(&hash);
}
}
}
}
stmt.execute(sqlite3::params![&env, mailbox_hash, uid as Sqlite3UID])?;
uid_store
.envelopes
.lock()
.unwrap()
.entry(env.hash())
.and_modify(|entry| {
entry.inner = env;
});
}
drop(stmt);
tx.commit()?;
Ok(())
}
fn update(
&mut self,
mailbox_hash: MailboxHash,
refresh_events: &[(UID, RefreshEvent)],
) -> Result<()> {
if self.mailbox_state(mailbox_hash)?.is_none() {
return Err(Error::new("Mailbox is not in cache").set_kind(ErrorKind::Bug));
}
let Self {
ref mut connection,
ref uid_store,
loaded_mailboxes: _,
} = self;
let tx = connection.transaction()?;
let mut hash_index_lck = uid_store.hash_index.lock().unwrap();
for (uid, event) in refresh_events {
match &event.kind {
RefreshEventKind::Remove(env_hash) => {
hash_index_lck.remove(env_hash);
tx.execute(
"DELETE FROM envelopes WHERE mailbox_hash = ?1 AND uid = ?2;",
sqlite3::params![mailbox_hash, *uid as Sqlite3UID],
)
.chain_err_summary(|| {
format!(
"Could not remove envelope {} uid {} from mailbox {} account {}",
env_hash, *uid, mailbox_hash, uid_store.account_name
)
})?;
}
RefreshEventKind::NewFlags(env_hash, (flags, tags)) => {
let mut stmt = tx.prepare(
"SELECT envelope FROM envelopes WHERE mailbox_hash = ?1 AND uid = ?2;",
)?;
let mut ret: Vec<Envelope> = stmt
.query_map(sqlite3::params![mailbox_hash, *uid as Sqlite3UID], |row| {
row.get(0)
})?
.collect::<std::result::Result<_, _>>()?;
if let Some(mut env) = ret.pop() {
env.set_flags(*flags);
env.tags_mut().clear();
env.tags_mut()
.extend(tags.iter().map(|t| TagHash::from_bytes(t.as_bytes())));
tx.execute(
"UPDATE envelopes SET envelope = ?1 WHERE mailbox_hash = ?2 AND \
uid = ?3;",
sqlite3::params![&env, mailbox_hash, *uid as Sqlite3UID],
)
.chain_err_summary(|| {
format!(
"Could not update envelope {} uid {} from mailbox {} account \
{}",
env_hash, *uid, mailbox_hash, uid_store.account_name
)
})?;
uid_store
.envelopes
.lock()
.unwrap()
.entry(*env_hash)
.and_modify(|entry| {
entry.inner = env;
});
}
}
_ => {}
}
}
tx.commit()?;
let new_max_uid = self.max_uid(mailbox_hash).unwrap_or(0);
self.uid_store
.max_uids
.lock()
.unwrap()
.insert(mailbox_hash, new_max_uid);
Ok(())
}
fn find_envelope(
&mut self,
identifier: std::result::Result<UID, EnvelopeHash>,
mailbox_hash: MailboxHash,
) -> Result<Option<CachedEnvelope>> {
let mut ret: Vec<(UID, Envelope, Option<ModSequence>)> = match identifier {
Ok(uid) => {
let mut stmt = self.connection.prepare(
"SELECT uid, envelope, modsequence FROM envelopes WHERE mailbox_hash = ?1 \
AND uid = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, uid as Sqlite3UID], |row| {
Ok((
row.get(0).map(|u: Sqlite3UID| u as UID)?,
row.get(1)?,
row.get(2)?,
))
})?
.collect::<std::result::Result<_, _>>()?;
x
}
Err(env_hash) => {
let mut stmt = self.connection.prepare(
"SELECT uid, envelope, modsequence FROM envelopes WHERE mailbox_hash = ?1 \
AND hash = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, env_hash], |row| {
Ok((
row.get(0).map(|u: Sqlite3UID| u as UID)?,
row.get(1)?,
row.get(2)?,
))
})?
.collect::<std::result::Result<_, _>>()?;
x
}
};
if ret.len() != 1 {
return Ok(None);
}
let (uid, inner, modsequence) = ret.pop().unwrap();
Ok(Some(CachedEnvelope {
inner,
uid,
mailbox_hash,
modsequence,
}))
}
fn rfc822(
&mut self,
identifier: std::result::Result<UID, EnvelopeHash>,
mailbox_hash: MailboxHash,
) -> Result<Option<Vec<u8>>> {
let mut ret: Vec<Option<Vec<u8>>> = match identifier {
Ok(uid) => {
let mut stmt = self.connection.prepare(
"SELECT rfc822 FROM envelopes WHERE mailbox_hash = ?1 AND uid = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, uid as Sqlite3UID], |row| {
row.get(0)
})?
.collect::<std::result::Result<_, _>>()?;
x
}
Err(env_hash) => {
let mut stmt = self.connection.prepare(
"SELECT rfc822 FROM envelopes WHERE mailbox_hash = ?1 AND hash = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, env_hash], |row| row.get(0))?
.collect::<std::result::Result<_, _>>()?;
x
}
};
if ret.len() != 1 {
return Ok(None);
}
Ok(ret.pop().unwrap())
}
}
}
pub(super) async fn fetch_cached_envs(state: &mut FetchState) -> Result<Option<Vec<Envelope>>> { pub(super) async fn fetch_cached_envs(state: &mut FetchState) -> Result<Option<Vec<Envelope>>> {
let FetchState { let FetchState {
stage: _, stage: _,
@ -822,96 +142,3 @@ pub(super) async fn fetch_cached_envs(state: &mut FetchState) -> Result<Option<V
} }
} }
} }
#[cfg(not(feature = "sqlite3"))]
pub use default_m::*;
#[cfg(not(feature = "sqlite3"))]
pub mod default_m {
use super::*;
#[derive(Debug)]
pub struct DefaultCache;
impl DefaultCache {
pub fn get(_uid_store: Arc<UIDStore>) -> Result<Box<dyn ImapCache>> {
Ok(Box::new(Self))
}
}
impl ImapCacheReset for DefaultCache {
fn reset_db(_: &UIDStore) -> Result<()> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
}
impl ImapCache for DefaultCache {
fn reset(&mut self) -> Result<()> {
Ok(())
}
fn mailbox_state(&mut self, _mailbox_hash: MailboxHash) -> Result<Option<()>> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn clear(
&mut self,
_mailbox_hash: MailboxHash,
_select_response: &SelectResponse,
) -> Result<()> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn envelopes(&mut self, _mailbox_hash: MailboxHash) -> Result<Option<Vec<EnvelopeHash>>> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn insert_envelopes(
&mut self,
_mailbox_hash: MailboxHash,
_fetches: &[FetchResponse<'_>],
) -> Result<()> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn update_mailbox(
&mut self,
_mailbox_hash: MailboxHash,
_select_response: &SelectResponse,
) -> Result<()> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn update(
&mut self,
_mailbox_hash: MailboxHash,
_refresh_events: &[(UID, RefreshEvent)],
) -> Result<()> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn find_envelope(
&mut self,
_identifier: std::result::Result<UID, EnvelopeHash>,
_mailbox_hash: MailboxHash,
) -> Result<Option<CachedEnvelope>> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn rfc822(
&mut self,
_identifier: std::result::Result<UID, EnvelopeHash>,
_mailbox_hash: MailboxHash,
) -> Result<Option<Vec<u8>>> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
fn update_flags(
&mut self,
_env_hashes: EnvelopeHashBatch,
_mailbox_hash: MailboxHash,
_flags: SmallVec<[FlagOp; 8]>,
) -> Result<()> {
Err(Error::new("melib is not built with any imap cache").set_kind(ErrorKind::Bug))
}
}
}

@ -0,0 +1,21 @@
//
// ____
//
// Copyright 2024 Emmanouil Pitsidianakis <manos@pitsidianak.is>
//
// This file is part of ____.
//
// ____ is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// ____ is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with ____. If not, see <http://www.gnu.org/licenses/>.
//
// SPDX-License-Identifier: EUPL-1.2 OR GPL-3.0-or-later

@ -0,0 +1,714 @@
//
// melib - IMAP
//
// Copyright 2024 Emmanouil Pitsidianakis <manos@pitsidianak.is>
//
// This file is part of melib.
//
// melib is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// melib is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with melib. If not, see <http://www.gnu.org/licenses/>.
//
// SPDX-License-Identifier: EUPL-1.2 OR GPL-3.0-or-later
use std::{collections::BTreeSet, sync::Arc};
use smallvec::SmallVec;
use crate::{
backends::{EnvelopeHashBatch, FlagOp, MailboxHash, RefreshEvent, RefreshEventKind, TagHash},
email::{Envelope, EnvelopeHash},
error::{Error, ErrorKind, Result, ResultIntoError},
imap::{
cache::{CachedEnvelope, ImapCache, ImapCacheReset},
FetchResponse, ModSequence, SelectResponse, UIDStore, UID, UIDVALIDITY,
},
utils::sqlite3::{
self,
rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, Value},
Connection, DatabaseDescription,
},
};
type Sqlite3UID = i32;
#[derive(Debug)]
pub struct Sqlite3Cache {
connection: Connection,
loaded_mailboxes: BTreeSet<MailboxHash>,
uid_store: Arc<UIDStore>,
}
const DB_DESCRIPTION: DatabaseDescription = DatabaseDescription {
name: "header_cache.db",
identifier: None,
application_prefix: "meli",
init_script: Some(
"PRAGMA foreign_keys = true;
PRAGMA encoding = 'UTF-8';
CREATE TABLE IF NOT EXISTS envelopes (
hash INTEGER NOT NULL,
mailbox_hash INTEGER NOT NULL,
uid INTEGER NOT NULL,
modsequence INTEGER,
rfc822 BLOB,
envelope BLOB NOT NULL,
PRIMARY KEY (mailbox_hash, uid),
FOREIGN KEY (mailbox_hash) REFERENCES mailbox(mailbox_hash) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS mailbox (
mailbox_hash INTEGER UNIQUE,
uidvalidity INTEGER,
flags BLOB NOT NULL,
highestmodseq INTEGER,
PRIMARY KEY (mailbox_hash)
);
CREATE INDEX IF NOT EXISTS envelope_uid_idx ON envelopes(mailbox_hash, uid);
CREATE INDEX IF NOT EXISTS envelope_idx ON envelopes(hash);
CREATE INDEX IF NOT EXISTS mailbox_idx ON mailbox(mailbox_hash);",
),
version: 3,
};
impl From<EnvelopeHash> for Value {
fn from(env_hash: EnvelopeHash) -> Self {
(env_hash.0 as i64).into()
}
}
impl ToSql for ModSequence {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput> {
Ok(ToSqlOutput::from(self.0.get() as i64))
}
}
impl FromSql for ModSequence {
fn column_result(value: rusqlite::types::ValueRef) -> FromSqlResult<Self> {
let i: i64 = FromSql::column_result(value)?;
if i == 0 {
return Err(FromSqlError::OutOfRange(0));
}
Ok(Self::try_from(i).unwrap())
}
}
impl Sqlite3Cache {
pub fn get(uid_store: Arc<UIDStore>) -> Result<Box<dyn ImapCache>> {
let db_desc = DatabaseDescription {
identifier: Some(uid_store.account_name.to_string().into()),
..DB_DESCRIPTION.clone()
};
let connection = match db_desc.open_or_create_db() {
Ok(c) => Ok(c),
Err(err) => {
// try resetting database on error, but only one time.
if db_desc.reset_db().is_ok() {
db_desc.open_or_create_db()
} else {
Err(err)
}
}
}?;
Ok(Box::new(Self {
connection,
loaded_mailboxes: BTreeSet::default(),
uid_store,
}))
}
fn max_uid(&mut self, mailbox_hash: MailboxHash) -> Result<UID> {
let tx = self.connection.transaction()?;
let mut stmt = tx.prepare("SELECT MAX(uid) FROM envelopes WHERE mailbox_hash = ?1;")?;
let mut ret: Vec<UID> = stmt
.query_map(sqlite3::params![mailbox_hash], |row| {
row.get(0).map(|i: Sqlite3UID| i as UID)
})?
.collect::<std::result::Result<_, _>>()?;
Ok(ret.pop().unwrap_or(0))
}
}
impl ImapCacheReset for Sqlite3Cache {
fn reset_db(uid_store: &UIDStore) -> Result<()> {
let db_desc = DatabaseDescription {
identifier: Some(uid_store.account_name.to_string().into()),
..DB_DESCRIPTION.clone()
};
db_desc.reset_db()
}
}
impl ImapCache for Sqlite3Cache {
fn reset(&mut self) -> Result<()> {
Self::reset_db(&self.uid_store)
}
fn mailbox_state(&mut self, mailbox_hash: MailboxHash) -> Result<Option<()>> {
if self.loaded_mailboxes.contains(&mailbox_hash) {
return Ok(Some(()));
}
debug!("loading mailbox state {} from cache", mailbox_hash);
let tx = self.connection.transaction()?;
let mut stmt = tx.prepare(
"SELECT uidvalidity, flags, highestmodseq FROM mailbox WHERE mailbox_hash = ?1;",
)?;
let mut ret = stmt.query_map(sqlite3::params![mailbox_hash], |row| {
Ok((
row.get(0).map(|u: Sqlite3UID| u as UID)?,
row.get(1)?,
row.get(2)?,
))
})?;
if let Some(v) = ret.next() {
let (uidvalidity, flags, highestmodseq): (UIDVALIDITY, Vec<u8>, Option<ModSequence>) =
v?;
debug!(
"mailbox state {} in cache uidvalidity {}",
mailbox_hash, uidvalidity
);
debug!(
"mailbox state {} in cache highestmodseq {:?}",
mailbox_hash, &highestmodseq
);
debug!(
"mailbox state {} inserting flags: {:?}",
mailbox_hash,
to_str!(&flags)
);
self.uid_store
.highestmodseqs
.lock()
.unwrap()
.entry(mailbox_hash)
.and_modify(|entry| *entry = highestmodseq.ok_or(()))
.or_insert_with(|| highestmodseq.ok_or(()));
self.uid_store
.uidvalidity
.lock()
.unwrap()
.entry(mailbox_hash)
.and_modify(|entry| *entry = uidvalidity)
.or_insert(uidvalidity);
let mut tag_lck = self.uid_store.collection.tag_index.write().unwrap();
for f in to_str!(&flags).split('\0') {
let hash = TagHash::from_bytes(f.as_bytes());
tag_lck.entry(hash).or_insert_with(|| f.to_string());
}
self.loaded_mailboxes.insert(mailbox_hash);
Ok(Some(()))
} else {
debug!("mailbox state {} not in cache", mailbox_hash);
Ok(None)
}
}
fn clear(&mut self, mailbox_hash: MailboxHash, select_response: &SelectResponse) -> Result<()> {
debug!("clear mailbox_hash {} {:?}", mailbox_hash, select_response);
self.loaded_mailboxes.remove(&mailbox_hash);
let tx = self.connection.transaction()?;
tx.execute(
"DELETE FROM mailbox WHERE mailbox_hash = ?1",
sqlite3::params![mailbox_hash],
)
.chain_err_summary(|| {
format!(
"Could not clear cache of mailbox {} account {}",
mailbox_hash, self.uid_store.account_name
)
})?;
if let Some(Ok(highestmodseq)) = select_response.highestmodseq {
tx.execute(
"INSERT OR IGNORE INTO mailbox (uidvalidity, flags, highestmodseq, mailbox_hash) \
VALUES (?1, ?2, ?3, ?4)",
sqlite3::params![
select_response.uidvalidity as Sqlite3UID,
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
highestmodseq,
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not insert uidvalidity {} in header_cache of account {}",
select_response.uidvalidity, self.uid_store.account_name
)
})?;
} else {
tx.execute(
"INSERT OR IGNORE INTO mailbox (uidvalidity, flags, mailbox_hash) VALUES (?1, ?2, \
?3)",
sqlite3::params![
select_response.uidvalidity as Sqlite3UID,
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not insert mailbox {} in header_cache of account {}",
select_response.uidvalidity, self.uid_store.account_name
)
})?;
}
tx.commit()?;
Ok(())
}
fn update_mailbox(
&mut self,
mailbox_hash: MailboxHash,
select_response: &SelectResponse,
) -> Result<()> {
if self.mailbox_state(mailbox_hash)?.is_none() {
return self.clear(mailbox_hash, select_response);
}
let tx = self.connection.transaction()?;
if let Some(Ok(highestmodseq)) = select_response.highestmodseq {
tx.execute(
"UPDATE mailbox SET flags=?1, highestmodseq =?2 where mailbox_hash = ?3;",
sqlite3::params![
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
highestmodseq,
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not update mailbox {} in header_cache of account {}",
mailbox_hash, self.uid_store.account_name
)
})?;
} else {
tx.execute(
"UPDATE mailbox SET flags=?1 where mailbox_hash = ?2;",
sqlite3::params![
select_response
.flags
.1
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>()
.join("\0")
.as_bytes(),
mailbox_hash
],
)
.chain_err_summary(|| {
format!(
"Could not update mailbox {} in header_cache of account {}",
mailbox_hash, self.uid_store.account_name
)
})?;
}
tx.commit()?;
Ok(())
}
fn envelopes(&mut self, mailbox_hash: MailboxHash) -> Result<Option<Vec<EnvelopeHash>>> {
debug!("envelopes mailbox_hash {}", mailbox_hash);
if self.mailbox_state(mailbox_hash)?.is_none() {
return Ok(None);
}
let res = {
let tx = self.connection.transaction()?;
let mut stmt = tx.prepare(
"SELECT uid, envelope, modsequence FROM envelopes WHERE mailbox_hash = ?1;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash], |row| {
Ok((
row.get(0).map(|i: Sqlite3UID| i as UID)?,
row.get(1)?,
row.get(2)?,
))
})?
.collect::<std::result::Result<_, _>>();
x
};
let ret: Vec<(UID, Envelope, Option<ModSequence>)> = match res {
Err(err) if matches!(&err, rusqlite::Error::FromSqlConversionFailure(_, _, _)) => {
drop(err);
self.reset()?;
return Ok(None);
}
Err(err) => return Err(err.into()),
Ok(v) => v,
};
let mut max_uid = 0;
let mut env_lck = self.uid_store.envelopes.lock().unwrap();
let mut hash_index_lck = self.uid_store.hash_index.lock().unwrap();
let mut uid_index_lck = self.uid_store.uid_index.lock().unwrap();
let mut env_hashes = Vec::with_capacity(ret.len());
for (uid, env, modseq) in ret {
env_hashes.push(env.hash());
max_uid = std::cmp::max(max_uid, uid);
hash_index_lck.insert(env.hash(), (uid, mailbox_hash));
uid_index_lck.insert((mailbox_hash, uid), env.hash());
env_lck.insert(
env.hash(),
CachedEnvelope {
inner: env,
uid,
mailbox_hash,
modsequence: modseq,
},
);
}
self.uid_store
.max_uids
.lock()
.unwrap()
.insert(mailbox_hash, max_uid);
Ok(Some(env_hashes))
}
fn insert_envelopes(
&mut self,
mailbox_hash: MailboxHash,
fetches: &[FetchResponse<'_>],
) -> Result<()> {
debug!(
"insert_envelopes mailbox_hash {} len {}",
mailbox_hash,
fetches.len()
);
let mut max_uid = self
.uid_store
.max_uids
.lock()
.unwrap()
.get(&mailbox_hash)
.cloned()
.unwrap_or_default();
if self.mailbox_state(mailbox_hash)?.is_none() {
return Err(Error::new("Mailbox is not in cache").set_kind(ErrorKind::Bug));
}
let Self {
ref mut connection,
ref uid_store,
loaded_mailboxes: _,
} = self;
let tx = connection.transaction()?;
for item in fetches {
if let FetchResponse {
uid: Some(uid),
message_sequence_number: _,
modseq,
flags: _,
body: _,
references: _,
envelope: Some(envelope),
raw_fetch_value: _,
} = item
{
max_uid = std::cmp::max(max_uid, *uid);
tx.execute(
"INSERT OR REPLACE INTO envelopes (hash, uid, mailbox_hash, modsequence, \
envelope) VALUES (?1, ?2, ?3, ?4, ?5)",
sqlite3::params![
envelope.hash(),
*uid as Sqlite3UID,
mailbox_hash,
modseq,
&envelope
],
)
.chain_err_summary(|| {
format!(
"Could not insert envelope {} {} in header_cache of account {}",
envelope.message_id(),
envelope.hash(),
uid_store.account_name
)
})?;
}
}
tx.commit()?;
self.uid_store
.max_uids
.lock()
.unwrap()
.insert(mailbox_hash, max_uid);
Ok(())
}
fn update_flags(
&mut self,
env_hashes: EnvelopeHashBatch,
mailbox_hash: MailboxHash,
flags: SmallVec<[FlagOp; 8]>,
) -> Result<()> {
if self.mailbox_state(mailbox_hash)?.is_none() {
return Err(Error::new("Mailbox is not in cache").set_kind(ErrorKind::Bug));
}
let Self {
ref mut connection,
ref uid_store,
loaded_mailboxes: _,
} = self;
let tx = connection.transaction()?;
let values = std::rc::Rc::new(env_hashes.iter().map(Value::from).collect::<Vec<Value>>());
let mut stmt =
tx.prepare("SELECT uid, envelope FROM envelopes WHERE hash IN rarray(?1);")?;
let rows = stmt
.query_map([values], |row| Ok((row.get(0)?, row.get(1)?)))?
.filter_map(|r| r.ok())
.collect::<Vec<(UID, Envelope)>>();
drop(stmt);
let mut stmt =
tx.prepare("UPDATE envelopes SET envelope = ?1 WHERE mailbox_hash = ?2 AND uid = ?3;")?;
for (uid, mut env) in rows {
for op in flags.iter() {
match op {
FlagOp::UnSet(flag) | FlagOp::Set(flag) => {
let mut f = env.flags();
f.set(*flag, op.as_bool());
env.set_flags(f);
}
FlagOp::UnSetTag(tag) | FlagOp::SetTag(tag) => {
let hash = TagHash::from_bytes(tag.as_bytes());
if op.as_bool() {
env.tags_mut().insert(hash);
} else {
env.tags_mut().remove(&hash);
}
}
}
}
stmt.execute(sqlite3::params![&env, mailbox_hash, uid as Sqlite3UID])?;
uid_store
.envelopes
.lock()
.unwrap()
.entry(env.hash())
.and_modify(|entry| {
entry.inner = env;
});
}
drop(stmt);
tx.commit()?;
Ok(())
}
fn update(
&mut self,
mailbox_hash: MailboxHash,
refresh_events: &[(UID, RefreshEvent)],
) -> Result<()> {
if self.mailbox_state(mailbox_hash)?.is_none() {
return Err(Error::new("Mailbox is not in cache").set_kind(ErrorKind::Bug));
}
{
let Self {
ref mut connection,
ref uid_store,
loaded_mailboxes: _,
} = self;
let tx = connection.transaction()?;
let mut hash_index_lck = uid_store.hash_index.lock().unwrap();
for (uid, event) in refresh_events {
match &event.kind {
RefreshEventKind::Remove(env_hash) => {
hash_index_lck.remove(env_hash);
tx.execute(
"DELETE FROM envelopes WHERE mailbox_hash = ?1 AND uid = ?2;",
sqlite3::params![mailbox_hash, *uid as Sqlite3UID],
)
.chain_err_summary(|| {
format!(
"Could not remove envelope {} uid {} from mailbox {} account {}",
env_hash, *uid, mailbox_hash, uid_store.account_name
)
})?;
}
RefreshEventKind::NewFlags(env_hash, (flags, tags)) => {
let mut stmt = tx.prepare(
"SELECT envelope FROM envelopes WHERE mailbox_hash = ?1 AND uid = ?2;",
)?;
let mut ret: Vec<Envelope> = stmt
.query_map(sqlite3::params![mailbox_hash, *uid as Sqlite3UID], |row| {
row.get(0)
})?
.collect::<std::result::Result<_, _>>()?;
if let Some(mut env) = ret.pop() {
env.set_flags(*flags);
env.tags_mut().clear();
env.tags_mut()
.extend(tags.iter().map(|t| TagHash::from_bytes(t.as_bytes())));
tx.execute(
"UPDATE envelopes SET envelope = ?1 WHERE mailbox_hash = ?2 AND \
uid = ?3;",
sqlite3::params![&env, mailbox_hash, *uid as Sqlite3UID],
)
.chain_err_summary(|| {
format!(
"Could not update envelope {} uid {} from mailbox {} account \
{}",
env_hash, *uid, mailbox_hash, uid_store.account_name
)
})?;
uid_store
.envelopes
.lock()
.unwrap()
.entry(*env_hash)
.and_modify(|entry| {
entry.inner = env;
});
}
}
_ => {}
}
}
tx.commit()?;
}
let new_max_uid = self.max_uid(mailbox_hash).unwrap_or(0);
self.uid_store
.max_uids
.lock()
.unwrap()
.insert(mailbox_hash, new_max_uid);
Ok(())
}
fn find_envelope(
&mut self,
identifier: std::result::Result<UID, EnvelopeHash>,
mailbox_hash: MailboxHash,
) -> Result<Option<CachedEnvelope>> {
let mut ret: Vec<(UID, Envelope, Option<ModSequence>)> = match identifier {
Ok(uid) => {
let tx = self.connection.transaction()?;
let mut stmt = tx.prepare(
"SELECT uid, envelope, modsequence FROM envelopes WHERE mailbox_hash = ?1 AND \
uid = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, uid as Sqlite3UID], |row| {
Ok((
row.get(0).map(|u: Sqlite3UID| u as UID)?,
row.get(1)?,
row.get(2)?,
))
})?
.collect::<std::result::Result<_, _>>()?;
x
}
Err(env_hash) => {
let tx = self.connection.transaction()?;
let mut stmt = tx.prepare(
"SELECT uid, envelope, modsequence FROM envelopes WHERE mailbox_hash = ?1 AND \
hash = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, env_hash], |row| {
Ok((
row.get(0).map(|u: Sqlite3UID| u as UID)?,
row.get(1)?,
row.get(2)?,
))
})?
.collect::<std::result::Result<_, _>>()?;
x
}
};
if ret.len() != 1 {
return Ok(None);
}
let (uid, inner, modsequence) = ret.pop().unwrap();
Ok(Some(CachedEnvelope {
inner,
uid,
mailbox_hash,
modsequence,
}))
}
fn rfc822(
&mut self,
identifier: std::result::Result<UID, EnvelopeHash>,
mailbox_hash: MailboxHash,
) -> Result<Option<Vec<u8>>> {
let mut ret: Vec<Option<Vec<u8>>> = match identifier {
Ok(uid) => {
let tx = self.connection.transaction()?;
let mut stmt = tx.prepare(
"SELECT rfc822 FROM envelopes WHERE mailbox_hash = ?1 AND uid = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, uid as Sqlite3UID], |row| {
row.get(0)
})?
.collect::<std::result::Result<_, _>>()?;
x
}
Err(env_hash) => {
let tx = self.connection.transaction()?;
let mut stmt = tx.prepare(
"SELECT rfc822 FROM envelopes WHERE mailbox_hash = ?1 AND hash = ?2;",
)?;
#[allow(clippy::let_and_return)] // false positive, the let binding is needed
// for the temporary to live long enough
let x = stmt
.query_map(sqlite3::params![mailbox_hash, env_hash], |row| row.get(0))?
.collect::<std::result::Result<_, _>>()?;
x
}
};
if ret.len() != 1 {
return Ok(None);
}
Ok(ret.pop().unwrap())
}
}

@ -37,22 +37,22 @@ impl ImapConnection {
return Ok(None); return Ok(None);
} }
#[cfg(not(feature = "sqlite3"))] if let Some(mut cache_handle) = self.uid_store.cache_handle()? {
let mut cache_handle = DefaultCache::get(self.uid_store.clone())?; if cache_handle.mailbox_state(mailbox_hash)?.is_none() {
#[cfg(feature = "sqlite3")] return Ok(None);
let mut cache_handle = Sqlite3Cache::get(self.uid_store.clone())?; }
if cache_handle.mailbox_state(mailbox_hash)?.is_none() {
return Ok(None);
}
match self.sync_policy { match self.sync_policy {
SyncPolicy::None => Ok(None), SyncPolicy::None => Ok(None),
SyncPolicy::Basic => self.resync_basic(cache_handle, mailbox_hash).await, SyncPolicy::Basic => self.resync_basic(cache_handle, mailbox_hash).await,
SyncPolicy::Condstore => self.resync_condstore(cache_handle, mailbox_hash).await, SyncPolicy::Condstore => self.resync_condstore(cache_handle, mailbox_hash).await,
SyncPolicy::CondstoreQresync => { SyncPolicy::CondstoreQresync => {
self.resync_condstoreqresync(cache_handle, mailbox_hash) self.resync_condstoreqresync(cache_handle, mailbox_hash)
.await .await
}
} }
} else {
Ok(None)
} }
} }
@ -61,14 +61,8 @@ impl ImapConnection {
mailbox_hash: MailboxHash, mailbox_hash: MailboxHash,
) -> Option<Result<Vec<EnvelopeHash>>> { ) -> Option<Result<Vec<EnvelopeHash>>> {
debug!("load_cache {}", mailbox_hash); debug!("load_cache {}", mailbox_hash);
#[cfg(not(feature = "sqlite3"))] let mut cache_handle = match self.uid_store.cache_handle() {
let mut cache_handle = match DefaultCache::get(self.uid_store.clone()) { Ok(v) => v?,
Ok(v) => v,
Err(err) => return Some(Err(err)),
};
#[cfg(feature = "sqlite3")]
let mut cache_handle = match Sqlite3Cache::get(self.uid_store.clone()) {
Ok(v) => v,
Err(err) => return Some(Err(err)), Err(err) => return Some(Err(err)),
}; };
match cache_handle.mailbox_state(mailbox_hash) { match cache_handle.mailbox_state(mailbox_hash) {
@ -85,7 +79,7 @@ impl ImapConnection {
} }
} }
//rfc4549_Synchronization_Operations_for_Disconnected_IMAP4_Clients /// > rfc4549_Synchronization_Operations_for_Disconnected_IMAP4_Clients
pub async fn resync_basic( pub async fn resync_basic(
&mut self, &mut self,
mut cache_handle: Box<dyn ImapCache>, mut cache_handle: Box<dyn ImapCache>,
@ -317,8 +311,8 @@ impl ImapConnection {
Ok(Some(payload.into_iter().map(|(_, env)| env).collect())) Ok(Some(payload.into_iter().map(|(_, env)| env).collect()))
} }
//rfc4549_Synchronization_Operations_for_Disconnected_IMAP4_Clients /// > rfc4549_Synchronization_Operations_for_Disconnected_IMAP4_Clients
//Section 6.1 /// > Section 6.1
pub async fn resync_condstore( pub async fn resync_condstore(
&mut self, &mut self,
mut cache_handle: Box<dyn ImapCache>, mut cache_handle: Box<dyn ImapCache>,
@ -627,8 +621,9 @@ impl ImapConnection {
Ok(Some(payload.into_iter().map(|(_, env)| env).collect())) Ok(Some(payload.into_iter().map(|(_, env)| env).collect()))
} }
//rfc7162_Quick Flag Changes Resynchronization (CONDSTORE)_and Quick Mailbox /// > rfc7162_Quick Flag Changes Resynchronization (CONDSTORE)_and Quick
// Resynchronization (QRESYNC) /// > Mailbox
/// > Resynchronization (QRESYNC)
pub async fn resync_condstoreqresync( pub async fn resync_condstoreqresync(
&mut self, &mut self,
_cache_handle: Box<dyn ImapCache>, _cache_handle: Box<dyn ImapCache>,

@ -993,11 +993,7 @@ impl ImapConnection {
format!("Could not parse select response for mailbox {}", imap_path) format!("Could not parse select response for mailbox {}", imap_path)
})?; })?;
{ {
if *self.uid_store.keep_offline_cache.lock().unwrap() { if let Some(mut cache_handle) = self.uid_store.cache_handle()? {
#[cfg(not(feature = "sqlite3"))]
let mut cache_handle = super::cache::DefaultCache::get(self.uid_store.clone())?;
#[cfg(feature = "sqlite3")]
let mut cache_handle = super::cache::Sqlite3Cache::get(self.uid_store.clone())?;
if let Err(err) = cache_handle.mailbox_state(mailbox_hash).and_then(|r| { if let Err(err) = cache_handle.mailbox_state(mailbox_hash).and_then(|r| {
if r.is_none() { if r.is_none() {
cache_handle.clear(mailbox_hash, &select_response) cache_handle.clear(mailbox_hash, &select_response)

@ -52,8 +52,6 @@ use std::{
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
#[cfg(feature = "sqlite3")]
pub use cache::ImapCacheReset;
pub use cache::ModSequence; pub use cache::ModSequence;
use futures::{lock::Mutex as FutureMutex, stream::Stream}; use futures::{lock::Mutex as FutureMutex, stream::Stream};
use imap_codec::imap_types::{ use imap_codec::imap_types::{
@ -203,6 +201,30 @@ impl UIDStore {
timeout, timeout,
} }
} }
pub fn cache_handle(self: &Arc<Self>) -> Result<Option<Box<dyn cache::ImapCache>>> {
if !*self.keep_offline_cache.lock().unwrap() {
return Ok(None);
}
#[cfg(not(feature = "sqlite3"))]
return Ok(None);
#[cfg(feature = "sqlite3")]
return Ok(Some(cache::sqlite3_cache::Sqlite3Cache::get(Arc::clone(
self,
))?));
}
pub fn reset_db(self: &Arc<Self>) -> Result<()> {
if !*self.keep_offline_cache.lock().unwrap() {
return Ok(());
}
#[cfg(not(feature = "sqlite3"))]
return Ok(());
#[cfg(feature = "sqlite3")]
use crate::imap::cache::ImapCacheReset;
#[cfg(feature = "sqlite3")]
return cache::sqlite3_cache::Sqlite3Cache::reset_db(self);
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -304,39 +326,32 @@ impl MailBackend for ImapType {
mailbox_hash: MailboxHash, mailbox_hash: MailboxHash,
) -> Result<Pin<Box<dyn Stream<Item = Result<Vec<Envelope>>> + Send + 'static>>> { ) -> Result<Pin<Box<dyn Stream<Item = Result<Vec<Envelope>>> + Send + 'static>>> {
let cache_handle = { let cache_handle = {
#[cfg(feature = "sqlite3")] match self.uid_store.cache_handle().chain_err_summary(|| {
if *self.uid_store.keep_offline_cache.lock().unwrap() { format!(
match cache::Sqlite3Cache::get(self.uid_store.clone()).chain_err_summary(|| { "Could not initialize cache for IMAP account {}. Resetting database.",
format!( self.uid_store.account_name
"Could not initialize cache for IMAP account {}. Resetting database.", )
self.uid_store.account_name }) {
) Ok(Some(v)) => Some(v),
}) { Ok(None) => None,
Ok(v) => Some(v), Err(err) => {
Err(err) => { (self.uid_store.event_consumer)(self.uid_store.account_hash, err.into());
(self.uid_store.event_consumer)(self.uid_store.account_hash, err.into()); match self
match cache::Sqlite3Cache::reset_db(&self.uid_store) .uid_store
.and_then(|()| cache::Sqlite3Cache::get(self.uid_store.clone())) .reset_db()
.chain_err_summary(|| "Could not reset IMAP cache database.") .and_then(|()| self.uid_store.cache_handle())
{ .chain_err_summary(|| "Could not reset IMAP cache database.")
Ok(v) => Some(v), {
Err(err) => { Ok(Some(v)) => Some(v),
*self.uid_store.keep_offline_cache.lock().unwrap() = false; Ok(None) => None,
log::trace!( Err(err) => {
"{}: sqlite3 cache error: {}", *self.uid_store.keep_offline_cache.lock().unwrap() = false;
self.uid_store.account_name, log::trace!("{}: cache error: {}", self.uid_store.account_name, err);
err None
);
None
}
} }
} }
} }
} else {
None
} }
#[cfg(not(feature = "sqlite3"))]
None
}; };
let mut state = FetchState { let mut state = FetchState {
stage: if *self.uid_store.keep_offline_cache.lock().unwrap() && cache_handle.is_some() { stage: if *self.uid_store.keep_offline_cache.lock().unwrap() && cache_handle.is_some() {
@ -860,9 +875,7 @@ impl MailBackend for ImapType {
} }
} }
} }
#[cfg(feature = "sqlite3")] if let Some(mut cache_handle) = uid_store.cache_handle()? {
if *uid_store.keep_offline_cache.lock().unwrap() {
let mut cache_handle = cache::Sqlite3Cache::get(uid_store.clone())?;
let res = cache_handle.update_flags(env_hashes, mailbox_hash, flags); let res = cache_handle.update_flags(env_hashes, mailbox_hash, flags);
log::trace!("update_flags in cache: {:?}", res); log::trace!("update_flags in cache: {:?}", res);
} }

@ -23,7 +23,6 @@ use std::convert::{TryFrom, TryInto};
use imap_codec::imap_types::{command::CommandBody, search::SearchKey, sequence::SequenceSet}; use imap_codec::imap_types::{command::CommandBody, search::SearchKey, sequence::SequenceSet};
use super::{ImapConnection, MailboxSelection, UID};
use crate::{ use crate::{
backends::{ backends::{
BackendMailbox, RefreshEvent, BackendMailbox, RefreshEvent,
@ -32,8 +31,12 @@ use crate::{
}, },
email::common_attributes, email::common_attributes,
error::*, error::*,
imap::protocol_parser::{ imap::{
generate_envelope_hash, FetchResponse, ImapLineSplit, RequiredResponses, UntaggedResponse, protocol_parser::{
generate_envelope_hash, FetchResponse, ImapLineSplit, RequiredResponses,
UntaggedResponse,
},
ImapConnection, MailboxSelection, UID,
}, },
}; };
@ -60,10 +63,7 @@ impl ImapConnection {
let mailbox = let mailbox =
std::clone::Clone::clone(&self.uid_store.mailboxes.lock().await[&mailbox_hash]); std::clone::Clone::clone(&self.uid_store.mailboxes.lock().await[&mailbox_hash]);
#[cfg(not(feature = "sqlite3"))] let mut cache_handle = self.uid_store.cache_handle();
let mut cache_handle = super::cache::DefaultCache::get(self.uid_store.clone())?;
#[cfg(feature = "sqlite3")]
let mut cache_handle = super::cache::Sqlite3Cache::get(self.uid_store.clone())?;
let mut response = Vec::with_capacity(8 * 1024); let mut response = Vec::with_capacity(8 * 1024);
let untagged_response = let untagged_response =
match super::protocol_parser::untagged_responses(line).map(|(_, v, _)| v) { match super::protocol_parser::untagged_responses(line).map(|(_, v, _)| v) {
@ -156,7 +156,7 @@ impl ImapConnection {
} }
} }
} }
if *self.uid_store.keep_offline_cache.lock().unwrap() { if let Ok(Some(ref mut cache_handle)) = cache_handle {
for mailbox_hash in mboxes_to_update { for mailbox_hash in mboxes_to_update {
cache_handle.update(mailbox_hash, &events)?; cache_handle.update(mailbox_hash, &events)?;
} }
@ -215,7 +215,7 @@ impl ImapConnection {
}) })
.collect::<Vec<(_, [(UID, RefreshEvent); 1])>>(); .collect::<Vec<(_, [(UID, RefreshEvent); 1])>>();
for (mailbox_hash, pair) in events { for (mailbox_hash, pair) in events {
if *self.uid_store.keep_offline_cache.lock().unwrap() { if let Ok(Some(ref mut cache_handle)) = cache_handle {
cache_handle.update(mailbox_hash, &pair)?; cache_handle.update(mailbox_hash, &pair)?;
} }
let [(_, event)] = pair; let [(_, event)] = pair;
@ -302,7 +302,7 @@ impl ImapConnection {
mailbox.path(), mailbox.path(),
); );
} }
if *self.uid_store.keep_offline_cache.lock().unwrap() { if let Ok(Some(ref mut cache_handle)) = cache_handle {
if let Err(err) = cache_handle if let Err(err) = cache_handle
.insert_envelopes(mailbox_hash, &v) .insert_envelopes(mailbox_hash, &v)
.chain_err_summary(|| { .chain_err_summary(|| {
@ -404,7 +404,7 @@ impl ImapConnection {
} }
mailbox.exists.lock().unwrap().insert_new(env.hash()); mailbox.exists.lock().unwrap().insert_new(env.hash());
} }
if *self.uid_store.keep_offline_cache.lock().unwrap() { if let Ok(Some(ref mut cache_handle)) = cache_handle {
if let Err(err) = cache_handle if let Err(err) = cache_handle
.insert_envelopes(mailbox_hash, &v) .insert_envelopes(mailbox_hash, &v)
.chain_err_summary(|| { .chain_err_summary(|| {
@ -551,7 +551,7 @@ impl ImapConnection {
kind: NewFlags(env_hash, flags), kind: NewFlags(env_hash, flags),
}, },
)]; )];
if *self.uid_store.keep_offline_cache.lock().unwrap() { if let Ok(Some(ref mut cache_handle)) = cache_handle {
cache_handle.update(mailbox_hash, &event)?; cache_handle.update(mailbox_hash, &event)?;
} }
self.add_refresh_event(std::mem::replace( self.add_refresh_event(std::mem::replace(

@ -90,11 +90,7 @@ pub async fn idle(kit: ImapWatchKit) -> Result<()> {
if let Some(v) = uidvalidities.get(&mailbox_hash) { if let Some(v) = uidvalidities.get(&mailbox_hash) {
if *v != select_response.uidvalidity { if *v != select_response.uidvalidity {
if *uid_store.keep_offline_cache.lock().unwrap() { if let Ok(Some(mut cache_handle)) = uid_store.cache_handle() {
#[cfg(not(feature = "sqlite3"))]
let mut cache_handle = super::cache::DefaultCache::get(uid_store.clone())?;
#[cfg(feature = "sqlite3")]
let mut cache_handle = super::cache::Sqlite3Cache::get(uid_store.clone())?;
cache_handle.clear(mailbox_hash, &select_response)?; cache_handle.clear(mailbox_hash, &select_response)?;
} }
conn.add_refresh_event(RefreshEvent { conn.add_refresh_event(RefreshEvent {
@ -213,10 +209,7 @@ pub async fn examine_updates(
}); });
} }
} else { } else {
#[cfg(not(feature = "sqlite3"))] let cache_handle = uid_store.cache_handle();
let mut cache_handle = super::cache::DefaultCache::get(uid_store.clone())?;
#[cfg(feature = "sqlite3")]
let mut cache_handle = super::cache::Sqlite3Cache::get(uid_store.clone())?;
let mut response = Vec::with_capacity(8 * 1024); let mut response = Vec::with_capacity(8 * 1024);
let select_response = conn let select_response = conn
.examine_mailbox(mailbox_hash, &mut response, true) .examine_mailbox(mailbox_hash, &mut response, true)
@ -227,7 +220,7 @@ pub async fn examine_updates(
if let Some(v) = uidvalidities.get(&mailbox_hash) { if let Some(v) = uidvalidities.get(&mailbox_hash) {
if *v != select_response.uidvalidity { if *v != select_response.uidvalidity {
if *uid_store.keep_offline_cache.lock().unwrap() { if let Ok(Some(mut cache_handle)) = cache_handle {
cache_handle.clear(mailbox_hash, &select_response)?; cache_handle.clear(mailbox_hash, &select_response)?;
} }
conn.add_refresh_event(RefreshEvent { conn.add_refresh_event(RefreshEvent {
@ -378,17 +371,17 @@ pub async fn examine_updates(
} }
} }
} }
if *uid_store.keep_offline_cache.lock().unwrap() if let Ok(Some(mut cache_handle)) = cache_handle {
&& cache_handle.mailbox_state(mailbox_hash)?.is_some() if cache_handle.mailbox_state(mailbox_hash)?.is_some() {
{ cache_handle
cache_handle .insert_envelopes(mailbox_hash, &v)
.insert_envelopes(mailbox_hash, &v) .chain_err_summary(|| {
.chain_err_summary(|| { format!(
format!( "Could not save envelopes in cache for mailbox {}",
"Could not save envelopes in cache for mailbox {}", mailbox.imap_path()
mailbox.imap_path() )
) })?;
})?; }
} }
for FetchResponse { uid, envelope, .. } in v { for FetchResponse { uid, envelope, .. } in v {

@ -191,6 +191,8 @@ pub extern crate futures;
#[allow(unused_imports)] #[allow(unused_imports)]
#[macro_use] #[macro_use]
pub extern crate indexmap; pub extern crate indexmap;
#[cfg(feature = "sqlite3")]
pub extern crate rusqlite;
pub extern crate serde_path_to_error; pub extern crate serde_path_to_error;
pub extern crate smallvec; pub extern crate smallvec;
pub extern crate smol; pub extern crate smol;

@ -35,6 +35,8 @@ mod inner {
pub const DB_DESCRIPTION: DatabaseDescription = DatabaseDescription { pub const DB_DESCRIPTION: DatabaseDescription = DatabaseDescription {
name: "nntp_store.db", name: "nntp_store.db",
application_prefix: "meli",
identifier: None,
init_script: Some( init_script: Some(
"PRAGMA foreign_keys = true; "PRAGMA foreign_keys = true;
PRAGMA encoding = 'UTF-8'; PRAGMA encoding = 'UTF-8';
@ -60,8 +62,12 @@ CREATE TABLE IF NOT EXISTS article (
impl Store { impl Store {
pub fn new(id: &str) -> Result<Self> { pub fn new(id: &str) -> Result<Self> {
let db_desc = DatabaseDescription {
identifier: Some(id.to_string().into()),
..DB_DESCRIPTION
};
Ok(Self { Ok(Self {
connection: sqlite3::open_or_create_db(&DB_DESCRIPTION, Some(id))?, connection: db_desc.open_or_create_db()?,
}) })
} }

@ -19,115 +19,151 @@
* along with meli. If not, see <http://www.gnu.org/licenses/>. * along with meli. If not, see <http://www.gnu.org/licenses/>.
*/ */
use std::path::PathBuf; use std::{borrow::Cow, os::unix::fs::PermissionsExt, path::PathBuf, sync::Arc};
use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput}; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput};
pub use rusqlite::{self, params, Connection}; pub use rusqlite::{self, config::DbConfig, params, Connection};
use crate::{error::*, log, Envelope}; use crate::{error::*, log, Envelope};
#[derive(Clone, Copy, Debug)] /// A description for creating, opening and handling application databases.
#[derive(Clone, Debug)]
pub struct DatabaseDescription { pub struct DatabaseDescription {
/// A name that represents the function of this database, e.g.
/// `headers_cache`, `contacts`, `settings`, etc.
pub name: &'static str, pub name: &'static str,
/// An optional identifier string that along with
/// [`DatabaseDescription::name`] makes a specialized identifier for the
/// database. E.g. an account name, a date, etc.
pub identifier: Option<Cow<'static, str>>,
/// The name of the application to use when storing the database in `XDG`
/// directories, used for when the consumer application is not `meli`
/// itself.
pub application_prefix: &'static str,
/// A script that initializes the schema of the database.
pub init_script: Option<&'static str>, pub init_script: Option<&'static str>,
/// The current value of the `user_version` `PRAGMA` of the `sqlite3`
/// database, used for schema versioning.
pub version: u32, pub version: u32,
} }
pub fn db_path(name: &str) -> Result<PathBuf> { impl DatabaseDescription {
let data_dir = /// Returns whether the computed database path for this description exist.
xdg::BaseDirectories::with_prefix("meli").map_err(|e| Error::new(e.to_string()))?; pub fn exists(&self) -> Result<bool> {
data_dir let path = self.db_path()?;
.place_data_file(name) Ok(path.exists())
.map_err(|err| Error::new(err.to_string())) }
}
pub fn open_db(db_path: PathBuf) -> Result<Connection> { /// Returns the computed database path for this description.
if !db_path.exists() { pub fn db_path(&self) -> Result<PathBuf> {
return Err(Error::new("Database doesn't exist")); let name: Cow<'static, str> = self.identifier.as_ref().map_or_else(
|| self.name.into(),
|id| format!("{}_{}", id, self.name).into(),
);
let data_dir =
xdg::BaseDirectories::with_prefix(self.application_prefix).map_err(|err| {
Error::new(format!(
"Could not open XDG data directory with prefix {}",
self.application_prefix
))
.set_source(Some(Arc::new(err)))
})?;
data_dir.place_data_file(name.as_ref()).map_err(|err| {
Error::new(format!("Could not create `{}`", name)).set_source(Some(Arc::new(err)))
})
} }
Ok(Connection::open(&db_path).and_then(|db| {
rusqlite::vtab::array::load_module(&db)?;
Ok(db)
})?)
}
pub fn open_or_create_db( /// Returns an [`rusqlite::Connection`] for this description.
description: &DatabaseDescription, pub fn open_or_create_db(&self) -> Result<Connection> {
identifier: Option<&str>, let mut second_try: bool = false;
) -> Result<Connection> { let db_path = self.db_path()?;
let mut second_try: bool = false;
loop {
let db_path = identifier.map_or_else(
|| db_path(description.name),
|id| db_path(&format!("{}_{}", id, description.name)),
)?;
let set_mode = !db_path.exists(); let set_mode = !db_path.exists();
if set_mode { if set_mode {
log::info!( log::info!("Creating {} database in {}", self.name, db_path.display());
"Creating {} database in {}",
description.name,
db_path.display()
);
} }
let conn = Connection::open(&db_path)?; loop {
rusqlite::vtab::array::load_module(&conn)?; let mut inner_fn = || {
if set_mode { let conn = Connection::open(&db_path)?;
use std::os::unix::fs::PermissionsExt; conn.busy_timeout(std::time::Duration::new(10, 0))?;
let file = std::fs::File::open(&db_path)?; for conf_flag in [
let metadata = file.metadata()?; DbConfig::SQLITE_DBCONFIG_ENABLE_FKEY,
let mut permissions = metadata.permissions(); DbConfig::SQLITE_DBCONFIG_ENABLE_TRIGGER,
]
.into_iter()
{
conn.set_db_config(conf_flag, true)?;
}
rusqlite::vtab::array::load_module(&conn)?;
if set_mode {
let file = std::fs::File::open(&db_path)?;
let metadata = file.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o600); // Read/write for owner only. permissions.set_mode(0o600); // Read/write for owner only.
file.set_permissions(permissions)?; file.set_permissions(permissions)?;
} }
let version: i32 = conn.pragma_query_value(None, "user_version", |row| row.get(0))?; let _: String =
if version != 0_i32 && version as u32 != description.version { conn.pragma_update_and_check(None, "journal_mode", "WAL", |row| row.get(0))?;
log::info!( let version: i32 =
"Database version mismatch, is {} but expected {}. Attempting to recreate \ conn.pragma_query_value(None, "user_version", |row| row.get(0))?;
database.", if version != 0_i32 && version as u32 != self.version {
version, log::info!(
description.version "Database version mismatch, is {} but expected {}. Attempting to recreate \
); database.",
if second_try { version,
return Err(Error::new(format!( self.version
"Database version mismatch, is {} but expected {}. Could not recreate \ );
database.", if second_try {
version, description.version return Err(Error::new(format!(
))); "Database version mismatch, is {} but expected {}. Could not recreate \
} database.",
reset_db(description, identifier)?; version, self.version
second_try = true; )));
continue; }
} self.reset_db()?;
second_try = true;
return Ok(conn);
}
if version == 0 { if version == 0 {
conn.pragma_update(None, "user_version", description.version)?; conn.pragma_update(None, "user_version", self.version)?;
} }
if let Some(s) = description.init_script { if let Some(s) = self.init_script {
conn.execute_batch(s) conn.execute_batch(s)
.map_err(|e| Error::new(e.to_string()))?; .map_err(|err| Error::new(err.to_string()))?;
} }
return Ok(conn); Ok(conn)
};
inner_fn().unwrap();
match inner_fn() {
Ok(_) if second_try => continue,
Ok(conn) => return Ok(conn),
Err(err) => {
return Err(Error::new(format!(
"{}: Could not open or create database",
db_path.display()
))
.set_source(Some(Arc::new(err))))
}
}
}
} }
}
/// Return database to a clean slate. /// Reset database to a clean slate.
pub fn reset_db(description: &DatabaseDescription, identifier: Option<&str>) -> Result<()> { pub fn reset_db(&self) -> Result<()> {
let db_path = identifier.map_or_else( let db_path = self.db_path()?;
|| db_path(description.name), if !db_path.exists() {
|id| db_path(&format!("{}_{}", id, description.name)), return Ok(());
)?; }
if !db_path.exists() { log::info!("Resetting {} database in {}", self.name, db_path.display());
return Ok(()); std::fs::remove_file(&db_path).map_err(|err| {
Error::new(format!("{}: could not remove file", db_path.display()))
.set_kind(ErrorKind::from(err.kind()))
.set_source(Some(Arc::new(err)))
})?;
Ok(())
} }
log::info!(
"Resetting {} database in {}",
description.name,
db_path.display()
);
std::fs::remove_file(&db_path)?;
Ok(())
} }
impl ToSql for Envelope { impl ToSql for Envelope {

Loading…
Cancel
Save