22752: Improve comments to explain why we're doing this fix.

Based on questions and comments from dgoulet, I've tried to fill
in the reasoning about why these functions work in the way that they
do, so that it will be easier for future programmers to understand
why this code exists and works the way it does.
This commit is contained in:
Nick Mathewson 2017-09-04 11:54:49 -04:00
parent a58a41c15a
commit 948be49ce0

View File

@ -10,7 +10,10 @@
#define CCE_MAGIC 0x17162253 #define CCE_MAGIC 0x17162253
#ifdef _WIN32 #ifdef _WIN32
/* On Windows, unlink won't work if there's an active mmap. */ /* On Windows, unlink won't work on a file if the file is actively mmap()ed.
* That forces us to be less aggressive about unlinking files, and causes other
* changes throughout our logic.
*/
#define MUST_UNMAP_TO_UNLINK #define MUST_UNMAP_TO_UNLINK
#endif #endif
@ -74,13 +77,26 @@ static void consensus_cache_entry_unmap(consensus_cache_entry_t *ent);
consensus_cache_t * consensus_cache_t *
consensus_cache_open(const char *subdir, int max_entries) consensus_cache_open(const char *subdir, int max_entries)
{ {
int storagedir_max_entries;
consensus_cache_t *cache = tor_malloc_zero(sizeof(consensus_cache_t)); consensus_cache_t *cache = tor_malloc_zero(sizeof(consensus_cache_t));
char *directory = get_datadir_fname(subdir); char *directory = get_datadir_fname(subdir);
cache->max_entries = max_entries; cache->max_entries = max_entries;
#ifdef MUST_UNMAP_TO_UNLINK #ifdef MUST_UNMAP_TO_UNLINK
max_entries = 1000000; /* If we can't unlink the files that we're still using, then we need to
* tell the storagedir backend to allow far more files than this consensus
* cache actually wants, so that it can hold files which, from this cache's
* perspective, have become useless.
*/
#define VERY_LARGE_STORAGEDIR_LIMIT (1000*1000)
storagedir_max_entries = VERY_LARGE_STORAGEDIR_LIMIT;
#else
/* Otherwise, we can just tell the storagedir to use the same limits
* as this cache. */
storagedir_max_entries = max_entries;
#endif #endif
cache->dir = storage_dir_new(directory, max_entries);
cache->dir = storage_dir_new(directory, storagedir_max_entries);
tor_free(directory); tor_free(directory);
if (!cache->dir) { if (!cache->dir) {
tor_free(cache); tor_free(cache);
@ -92,7 +108,12 @@ consensus_cache_open(const char *subdir, int max_entries)
} }
/** Return true if it's okay to put more entries in this cache than /** Return true if it's okay to put more entries in this cache than
* its official file limit. */ * its official file limit.
*
* (We need this method on Windows, where we can't unlink files that are still
* in use, and therefore might need to temporarily exceed the file limit until
* the no-longer-wanted files are deletable.)
*/
int int
consensus_cache_may_overallocate(consensus_cache_t *cache) consensus_cache_may_overallocate(consensus_cache_t *cache)
{ {
@ -113,7 +134,15 @@ consensus_cache_register_with_sandbox(consensus_cache_t *cache,
struct sandbox_cfg_elem **cfg) struct sandbox_cfg_elem **cfg)
{ {
#ifdef MUST_UNMAP_TO_UNLINK #ifdef MUST_UNMAP_TO_UNLINK
/* Our sandbox doesn't support huge limits like we use here. /* Our Linux sandbox doesn't support huge file lists like the one that would
* be generated by using VERY_LARGE_STORAGEDIR_LIMIT above in
* consensus_cache_open(). Since the Linux sandbox is the only one we have
* right now, we just assert that we never reach this point when we've had
* to use VERY_LARGE_STORAGEDIR_LIMIT.
*
* If at some point in the future we have a different sandbox mechanism that
* can handle huge file lists, we can remove this assertion or make it
* conditional.
*/ */
tor_assert_nonfatal_unreached(); tor_assert_nonfatal_unreached();
#endif #endif