diff --git a/src/database/stores/fs/mod.rs b/src/database/stores/fs/mod.rs index 93dd2d7..05e8873 100644 --- a/src/database/stores/fs/mod.rs +++ b/src/database/stores/fs/mod.rs @@ -15,7 +15,7 @@ use crate::util::hash::{b58_encode, Hash, Hashable}; use crate::util::jobs::{JobContainer, JobHandle}; use anyhow::{anyhow, Error, Result}; use chrono::prelude::*; -use diesel::r2d2::{ConnectionManager, ManageConnection}; +use diesel::r2d2::{self, ConnectionManager}; use diesel::ExpressionMethods; use diesel::{Connection, QueryDsl, RunQueryDsl, SqliteConnection}; use log::{debug, error, info, warn}; @@ -46,7 +46,7 @@ lazy_static! { static ref BLOB_TYPE_ADDR: Address = BLOB_TYPE_INVARIANT.entity().unwrap(); } -struct PragmaSynchronousGuard<'a>(&'a UpEndConnection); +struct PragmaSynchronousGuard<'a>(&'a SqliteConnection); impl Drop for PragmaSynchronousGuard<'_> { fn drop(&mut self) { @@ -63,7 +63,7 @@ impl Drop for PragmaSynchronousGuard<'_> { pub struct FsStore { path: PathBuf, - manager: ConnectionManager, + pool: r2d2::Pool>, lock: Arc>, } @@ -76,7 +76,8 @@ impl FsStore { .to_str() .unwrap(), ); - let connection = manager.connect()?; + let pool = r2d2::Pool::builder().build(manager)?; + let connection = pool.get()?; // while diesel doesn't support multiple embedded migrations... connection.execute( @@ -95,16 +96,13 @@ impl FsStore { CREATE INDEX IF NOT EXISTS files_hash ON files (hash); CREATE INDEX IF NOT EXISTS files_valid ON files (valid); + PRAGMA journal_mode = WAL; PRAGMA wal_autocheckpoint = 1000; PRAGMA wal_checkpoint(TRUNCATE); "#, )?; let lock = Arc::new(RwLock::new(())); - Ok(FsStore { - path, - manager, - lock, - }) + Ok(FsStore { path, pool, lock }) } fn rescan_vault>( @@ -118,16 +116,17 @@ impl FsStore { info!("Vault rescan started."); let db = db.borrow(); - let connection = db.connection()?; + let upconnection = db.connection()?; + let connection = self.pool.get()?; // Initialize types, etc... debug!("Initializing DB types."); - connection.insert_entry(Entry::try_from(&*BLOB_TYPE_INVARIANT)?)?; - upend_insert_addr!(connection, BLOB_TYPE_ADDR, IS_OF_TYPE_ATTR, TYPE_ADDR)?; - upend_insert_val!(connection, BLOB_TYPE_ADDR, TYPE_HAS_ATTR, FILE_MTIME_KEY)?; - upend_insert_val!(connection, BLOB_TYPE_ADDR, TYPE_HAS_ATTR, FILE_SIZE_KEY)?; - upend_insert_val!(connection, BLOB_TYPE_ADDR, TYPE_HAS_ATTR, FILE_MIME_KEY)?; - upend_insert_val!(connection, BLOB_TYPE_ADDR, LABEL_ATTR, "Data Blob")?; + upconnection.insert_entry(Entry::try_from(&*BLOB_TYPE_INVARIANT)?)?; + upend_insert_addr!(upconnection, BLOB_TYPE_ADDR, IS_OF_TYPE_ATTR, TYPE_ADDR)?; + upend_insert_val!(upconnection, BLOB_TYPE_ADDR, TYPE_HAS_ATTR, FILE_MTIME_KEY)?; + upend_insert_val!(upconnection, BLOB_TYPE_ADDR, TYPE_HAS_ATTR, FILE_SIZE_KEY)?; + upend_insert_val!(upconnection, BLOB_TYPE_ADDR, TYPE_HAS_ATTR, FILE_MIME_KEY)?; + upend_insert_val!(upconnection, BLOB_TYPE_ADDR, LABEL_ATTR, "Data Blob")?; // Disable syncing in SQLite for the duration of the import let mut _guard: Option = None; @@ -161,7 +160,7 @@ impl FsStore { .into_par_iter() .map(|path| { let result = self.process_directory_entry( - db.connection().unwrap(), + db, &resolve_cache, path.clone(), &existing_files, @@ -191,7 +190,7 @@ impl FsStore { let existing_files = existing_files.read().unwrap(); let cleanup_results = existing_files.iter().filter(|f| f.valid).map(|file| { - let trans_result = connection.transaction::<_, Error, _>(|| { + let trans_result = upconnection.transaction::<_, Error, _>(|| { self.file_set_valid(file.id, false)?; // remove_object(&connection, )? Ok(()) @@ -256,9 +255,9 @@ impl FsStore { Ok(all_outcomes) } - fn process_directory_entry( + fn process_directory_entry>( &self, - connection: UpEndConnection, + db: D, resolve_cache: &Arc>, path: PathBuf, existing_files: &Arc>>, @@ -345,7 +344,7 @@ impl FsStore { let mime_type = tree_magic_mini::from_filepath(&path).map(|s| s.to_string()); self.insert_file_with_metadata( - &connection, + &db.borrow().connection()?, &normalized_path, file_hash.unwrap(), size, @@ -498,7 +497,7 @@ impl FsStore { ); let _lock = self.lock.write().unwrap(); - let conn = self.manager.connect()?; + let conn = self.pool.get()?; diesel::insert_into(files::table) .values(&file) @@ -517,7 +516,7 @@ impl FsStore { use self::db::files::dsl::*; let _lock = self.lock.read().unwrap(); - let conn = self.manager.connect()?; + let conn = self.pool.get()?; let matches = files .filter(valid.eq(true)) @@ -544,7 +543,7 @@ impl FsStore { use self::db::files::dsl::*; let _lock = self.lock.read().unwrap(); - let conn = self.manager.connect()?; + let conn = self.pool.get()?; let matches = files.load::(&conn)?; Ok(matches) @@ -555,7 +554,7 @@ impl FsStore { debug!("Setting file ID {}'s mtime = {:?}", file_id, m_time); let _lock = self.lock.write().unwrap(); - let conn = self.manager.connect()?; + let conn = self.pool.get()?; Ok(diesel::update(files.filter(id.eq(file_id))) .set(mtime.eq(m_time)) @@ -567,7 +566,7 @@ impl FsStore { debug!("Setting file ID {} to valid = {}", file_id, is_valid); let _lock = self.lock.write().unwrap(); - let conn = self.manager.connect()?; + let conn = self.pool.get()?; Ok(diesel::update(files.filter(id.eq(file_id))) .set(valid.eq(is_valid))