@@ -14,10 +14,8 @@ use std::{
1414 collections:: HashMap ,
1515 error, io,
1616 path:: { Path , PathBuf } ,
17- time:: { Duration , Instant } ,
1817} ;
1918
20- use parking_lot:: Mutex ;
2119use rocksdb:: {
2220 BlockBasedOptions , ColumnFamily , ColumnFamilyDescriptor , CompactOptions , Options , ReadOptions , WriteBatch ,
2321 WriteOptions , DB ,
@@ -271,7 +269,6 @@ pub struct Database {
271269 read_opts : ReadOptions ,
272270 block_opts : BlockBasedOptions ,
273271 stats : stats:: RunningDbStats ,
274- last_compaction : Mutex < Instant > ,
275272}
276273
277274/// Generate the options for RocksDB, based on the given `DatabaseConfig`.
@@ -354,23 +351,15 @@ impl Database {
354351 Self :: open_primary ( & opts, path. as_ref ( ) , config, column_names. as_slice ( ) , & block_opts) ?
355352 } ;
356353
357- let db = Database {
354+ Ok ( Database {
358355 inner : DBAndColumns { db, column_names } ,
359356 config : config. clone ( ) ,
360357 opts,
361358 read_opts,
362359 write_opts,
363360 block_opts,
364361 stats : stats:: RunningDbStats :: new ( ) ,
365- last_compaction : Mutex :: new ( Instant :: now ( ) ) ,
366- } ;
367-
368- // After opening the DB, we want to compact it.
369- //
370- // This just in case the node crashed before to ensure the db stays fast.
371- db. force_compaction ( ) ?;
372-
373- Ok ( db)
362+ } )
374363 }
375364
376365 /// Internal api to open a database in primary mode.
@@ -472,21 +461,7 @@ impl Database {
472461 }
473462 self . stats . tally_bytes_written ( stats_total_bytes as u64 ) ;
474463
475- let res = cfs. db . write_opt ( batch, & self . write_opts ) . map_err ( other_io_err) ?;
476-
477- // If we have written more data than what we want to have stored in a `sst` file, we force compaction.
478- // We also ensure that we only compact once per minute.
479- //
480- // Otherwise, rocksdb read performance is going down, after e.g. a warp sync.
481- if stats_total_bytes > self . config . compaction . initial_file_size as usize &&
482- self . last_compaction . lock ( ) . elapsed ( ) > Duration :: from_secs ( 60 )
483- {
484- self . force_compaction ( ) ?;
485-
486- * self . last_compaction . lock ( ) = Instant :: now ( ) ;
487- }
488-
489- Ok ( res)
464+ cfs. db . write_opt ( batch, & self . write_opts ) . map_err ( other_io_err)
490465 }
491466
492467 /// Get value by key.
@@ -606,25 +581,23 @@ impl Database {
606581 self . inner . db . try_catch_up_with_primary ( ) . map_err ( other_io_err)
607582 }
608583
609- /// Force compacting the entire db.
610- fn force_compaction ( & self ) -> io:: Result < ( ) > {
584+ /// Force compact a single column.
585+ ///
586+ /// After compaction of the column, this may lead to better read performance.
587+ pub fn force_compact ( & self , col : u32 ) -> io:: Result < ( ) > {
611588 let mut compact_options = CompactOptions :: default ( ) ;
612589 compact_options. set_bottommost_level_compaction ( rocksdb:: BottommostLevelCompaction :: Force ) ;
613-
614- // Don't ask me why we can not just use `compact_range_opt`...
615- // But we are forced to trigger compaction on every column. Actually we only need this for the `STATE` column,
616- // but we don't know which one this is here. So, we just iterate all of them.
617- for col in 0 ..self . inner . column_names . len ( ) {
618- self . inner
619- . db
620- . compact_range_cf_opt ( self . inner . cf ( col) ?, None :: < Vec < u8 > > , None :: < Vec < u8 > > , & compact_options) ;
621- }
622-
590+ self . inner . db . compact_range_cf_opt (
591+ self . inner . cf ( col as usize ) ?,
592+ None :: < Vec < u8 > > ,
593+ None :: < Vec < u8 > > ,
594+ & compact_options,
595+ ) ;
623596 Ok ( ( ) )
624597 }
625598}
626599
627- // duplicate declaration of methods here to avoid trait import in certain existing cases
600+ // Duplicate declaration of methods here to avoid trait import in certain existing cases
628601// at time of addition.
629602impl KeyValueDB for Database {
630603 fn get ( & self , col : u32 , key : & [ u8 ] ) -> io:: Result < Option < DBValue > > {
0 commit comments