mirror of
https://gitlab.torproject.org/tpo/core/tor.git
synced 2024-11-10 21:23:58 +01:00
prop224: Use fetch and store HSDir indexes.
Based on our #23387 findings, it seems like to maintain 24/7 reachability we need to employ different logic when computing hsdir indices for fetching vs storing. That's to guarantee that the client will always fetch the current descriptor, while the service will always publish two descriptors aiming to cover all possible edge cases. For more details see the next commit and the spec branch. Signed-off-by: David Goulet <dgoulet@torproject.org>
This commit is contained in:
parent
72c7f81459
commit
b586de78e3
@ -99,42 +99,65 @@ add_unix_port(smartlist_t *ports, rend_service_port_config_t *p)
|
||||
/* Helper function: The key is a digest that we compare to a node_t object
|
||||
* current hsdir_index. */
|
||||
static int
|
||||
compare_digest_to_current_hsdir_index(const void *_key, const void **_member)
|
||||
compare_digest_to_fetch_hsdir_index(const void *_key, const void **_member)
|
||||
{
|
||||
const char *key = _key;
|
||||
const node_t *node = *_member;
|
||||
return tor_memcmp(key, node->hsdir_index->current, DIGEST256_LEN);
|
||||
return tor_memcmp(key, node->hsdir_index->fetch, DIGEST256_LEN);
|
||||
}
|
||||
|
||||
/* Helper function: The key is a digest that we compare to a node_t object
|
||||
* next hsdir_index. */
|
||||
static int
|
||||
compare_digest_to_next_hsdir_index(const void *_key, const void **_member)
|
||||
compare_digest_to_store_first_hsdir_index(const void *_key,
|
||||
const void **_member)
|
||||
{
|
||||
const char *key = _key;
|
||||
const node_t *node = *_member;
|
||||
return tor_memcmp(key, node->hsdir_index->next, DIGEST256_LEN);
|
||||
return tor_memcmp(key, node->hsdir_index->store_first, DIGEST256_LEN);
|
||||
}
|
||||
|
||||
/* Helper function: The key is a digest that we compare to a node_t object
|
||||
* next hsdir_index. */
|
||||
static int
|
||||
compare_digest_to_store_second_hsdir_index(const void *_key,
|
||||
const void **_member)
|
||||
{
|
||||
const char *key = _key;
|
||||
const node_t *node = *_member;
|
||||
return tor_memcmp(key, node->hsdir_index->store_second, DIGEST256_LEN);
|
||||
}
|
||||
|
||||
/* Helper function: Compare two node_t objects current hsdir_index. */
|
||||
static int
|
||||
compare_node_current_hsdir_index(const void **a, const void **b)
|
||||
compare_node_fetch_hsdir_index(const void **a, const void **b)
|
||||
{
|
||||
const node_t *node1= *a;
|
||||
const node_t *node2 = *b;
|
||||
return tor_memcmp(node1->hsdir_index->current,
|
||||
node2->hsdir_index->current,
|
||||
return tor_memcmp(node1->hsdir_index->fetch,
|
||||
node2->hsdir_index->fetch,
|
||||
DIGEST256_LEN);
|
||||
}
|
||||
|
||||
/* Helper function: Compare two node_t objects next hsdir_index. */
|
||||
static int
|
||||
compare_node_next_hsdir_index(const void **a, const void **b)
|
||||
compare_node_store_first_hsdir_index(const void **a, const void **b)
|
||||
{
|
||||
const node_t *node1= *a;
|
||||
const node_t *node2 = *b;
|
||||
return tor_memcmp(node1->hsdir_index->next,
|
||||
node2->hsdir_index->next,
|
||||
return tor_memcmp(node1->hsdir_index->store_first,
|
||||
node2->hsdir_index->store_first,
|
||||
DIGEST256_LEN);
|
||||
}
|
||||
|
||||
/* Helper function: Compare two node_t objects next hsdir_index. */
|
||||
static int
|
||||
compare_node_store_second_hsdir_index(const void **a, const void **b)
|
||||
{
|
||||
const node_t *node1= *a;
|
||||
const node_t *node2 = *b;
|
||||
return tor_memcmp(node1->hsdir_index->store_second,
|
||||
node2->hsdir_index->store_second,
|
||||
DIGEST256_LEN);
|
||||
}
|
||||
|
||||
@ -1200,10 +1223,9 @@ hs_get_hsdir_spread_store(void)
|
||||
}
|
||||
|
||||
/** <b>node</b> is an HSDir so make sure that we have assigned an hsdir index.
|
||||
* If <b>is_for_next_period</b> is set, also check the next HSDir index field.
|
||||
* Return 0 if everything is as expected, else return -1. */
|
||||
static int
|
||||
node_has_hsdir_index(const node_t *node, int is_for_next_period)
|
||||
node_has_hsdir_index(const node_t *node)
|
||||
{
|
||||
tor_assert(node_supports_v3_hsdir(node));
|
||||
|
||||
@ -1215,19 +1237,19 @@ node_has_hsdir_index(const node_t *node, int is_for_next_period)
|
||||
|
||||
/* At this point, since the node has a desc, this node must also have an
|
||||
* hsdir index. If not, something went wrong, so BUG out. */
|
||||
if (BUG(node->hsdir_index == NULL) ||
|
||||
BUG(tor_mem_is_zero((const char*)node->hsdir_index->current,
|
||||
DIGEST256_LEN))) {
|
||||
log_warn(LD_BUG, "Zero current index (ri: %p, rs: %p, md: %p)",
|
||||
node->ri, node->rs, node->md);
|
||||
if (BUG(node->hsdir_index == NULL)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_for_next_period &&
|
||||
BUG(tor_mem_is_zero((const char*)node->hsdir_index->next,
|
||||
if (BUG(tor_mem_is_zero((const char*)node->hsdir_index->fetch,
|
||||
DIGEST256_LEN))) {
|
||||
return 0;
|
||||
}
|
||||
if (BUG(tor_mem_is_zero((const char*)node->hsdir_index->store_first,
|
||||
DIGEST256_LEN))) {
|
||||
return 0;
|
||||
}
|
||||
if (BUG(tor_mem_is_zero((const char*)node->hsdir_index->store_second,
|
||||
DIGEST256_LEN))) {
|
||||
log_warn(LD_BUG, "Zero next index (ri: %p, rs: %p, md: %p)",
|
||||
node->ri, node->rs, node->md);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1275,7 +1297,7 @@ hs_get_responsible_hsdirs(const ed25519_public_key_t *blinded_pk,
|
||||
node_t *n = node_get_mutable_by_id(rs->identity_digest);
|
||||
tor_assert(n);
|
||||
if (node_supports_v3_hsdir(n) && rs->is_hs_dir) {
|
||||
if (!node_has_hsdir_index(n, is_next_period)) {
|
||||
if (!node_has_hsdir_index(n)) {
|
||||
log_info(LD_GENERAL, "Node %s was found without hsdir index.",
|
||||
node_describe(n));
|
||||
continue;
|
||||
@ -1292,12 +1314,15 @@ hs_get_responsible_hsdirs(const ed25519_public_key_t *blinded_pk,
|
||||
/* First thing we have to do is sort all node_t by hsdir_index. The
|
||||
* is_next_period tells us if we want the current or the next one. Set the
|
||||
* bsearch compare function also while we are at it. */
|
||||
if (is_next_period) {
|
||||
smartlist_sort(sorted_nodes, compare_node_next_hsdir_index);
|
||||
cmp_fct = compare_digest_to_next_hsdir_index;
|
||||
if (is_client) {
|
||||
smartlist_sort(sorted_nodes, compare_node_fetch_hsdir_index);
|
||||
cmp_fct = compare_digest_to_fetch_hsdir_index;
|
||||
} else if (is_next_period) {
|
||||
smartlist_sort(sorted_nodes, compare_node_store_second_hsdir_index);
|
||||
cmp_fct = compare_digest_to_store_second_hsdir_index;
|
||||
} else {
|
||||
smartlist_sort(sorted_nodes, compare_node_current_hsdir_index);
|
||||
cmp_fct = compare_digest_to_current_hsdir_index;
|
||||
smartlist_sort(sorted_nodes, compare_node_store_first_hsdir_index);
|
||||
cmp_fct = compare_digest_to_store_first_hsdir_index;
|
||||
}
|
||||
|
||||
/* For all replicas, we'll select a set of HSDirs using the consensus
|
||||
|
@ -142,10 +142,12 @@ typedef struct rend_service_port_config_t {
|
||||
/* Hidden service directory index used in a node_t which is set once we set
|
||||
* the consensus. */
|
||||
typedef struct hsdir_index_t {
|
||||
/* The hsdir index for the current time period. */
|
||||
uint8_t current[DIGEST256_LEN];
|
||||
/* The hsdir index for the next time period. */
|
||||
uint8_t next[DIGEST256_LEN];
|
||||
/* Index to use when fetching a descriptor. */
|
||||
uint8_t fetch[DIGEST256_LEN];
|
||||
|
||||
/* Index to store the first and second descriptor. */
|
||||
uint8_t store_first[DIGEST256_LEN];
|
||||
uint8_t store_second[DIGEST256_LEN];
|
||||
} hsdir_index_t;
|
||||
|
||||
void hs_init(void);
|
||||
|
@ -2102,8 +2102,8 @@ upload_descriptor_to_hsdir(const hs_service_t *service,
|
||||
/* Logging so we know where it was sent. */
|
||||
{
|
||||
int is_next_desc = (service->desc_next == desc);
|
||||
const uint8_t *index = (is_next_desc) ? hsdir->hsdir_index->next :
|
||||
hsdir->hsdir_index->current;
|
||||
const uint8_t *index = (is_next_desc) ? hsdir->hsdir_index->store_second:
|
||||
hsdir->hsdir_index->store_first;
|
||||
log_info(LD_REND, "Service %s %s descriptor of revision %" PRIu64
|
||||
" initiated upload request to %s with index %s",
|
||||
safe_str_client(service->onion_address),
|
||||
|
@ -181,8 +181,9 @@ node_set_hsdir_index(node_t *node, const networkstatus_t *ns)
|
||||
{
|
||||
time_t now = approx_time();
|
||||
const ed25519_public_key_t *node_identity_pk;
|
||||
uint8_t *next_hsdir_index_srv = NULL, *current_hsdir_index_srv = NULL;
|
||||
uint8_t *fetch_srv = NULL, *store_first_srv = NULL, *store_second_srv = NULL;
|
||||
uint64_t next_time_period_num, current_time_period_num;
|
||||
uint64_t fetch_tp, store_first_tp, store_second_tp;
|
||||
|
||||
tor_assert(node);
|
||||
tor_assert(ns);
|
||||
@ -200,43 +201,59 @@ node_set_hsdir_index(node_t *node, const networkstatus_t *ns)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Get the current and next time period number, we might use them both. We
|
||||
* use the valid_after time of the consensus because we use that time to
|
||||
* detect if we are in the overlap period or not. */
|
||||
/* Get the current and next time period number. */
|
||||
current_time_period_num = hs_get_time_period_num(0);
|
||||
next_time_period_num = hs_get_next_time_period_num(0);
|
||||
|
||||
if (hs_overlap_mode_is_active(ns, now)) {
|
||||
/* We are in overlap mode, this means that our consensus has just cycled
|
||||
* from current SRV to previous SRV so for the _next_ upcoming time
|
||||
* period, we have to use the current SRV and use the previous SRV for the
|
||||
* current time period. If the current or previous SRV can't be found, the
|
||||
* disaster one is returned. */
|
||||
next_hsdir_index_srv = hs_get_current_srv(next_time_period_num, ns);
|
||||
/* The following can be confusing so again, in overlap mode, we use our
|
||||
* previous SRV for our _current_ hsdir index. */
|
||||
current_hsdir_index_srv = hs_get_previous_srv(current_time_period_num, ns);
|
||||
/* We always use the current time period for fetching descs */
|
||||
fetch_tp = current_time_period_num;
|
||||
|
||||
/* Now extract the needed SRVs and time periods for building hsdir indices */
|
||||
if (!hs_overlap_mode_is_active(ns, now)) {
|
||||
fetch_srv = hs_get_current_srv(fetch_tp, ns);
|
||||
|
||||
store_first_tp = hs_get_previous_time_period_num(0);
|
||||
store_second_tp = current_time_period_num;
|
||||
} else {
|
||||
/* If NOT in overlap mode, we only need to compute the current hsdir index
|
||||
* for the ongoing time period and thus the current SRV. If it can't be
|
||||
* found, the disaster one is returned. */
|
||||
current_hsdir_index_srv = hs_get_current_srv(current_time_period_num, ns);
|
||||
fetch_srv = hs_get_previous_srv(fetch_tp, ns);
|
||||
|
||||
store_first_tp = current_time_period_num;
|
||||
store_second_tp = next_time_period_num;
|
||||
}
|
||||
|
||||
/* Build the current hsdir index. */
|
||||
hs_build_hsdir_index(node_identity_pk, current_hsdir_index_srv,
|
||||
current_time_period_num, node->hsdir_index->current);
|
||||
if (next_hsdir_index_srv) {
|
||||
/* Build the next hsdir index if we have a next SRV that we can use. */
|
||||
hs_build_hsdir_index(node_identity_pk, next_hsdir_index_srv,
|
||||
next_time_period_num, node->hsdir_index->next);
|
||||
/* We always use the old SRV for storing the first descriptor and the latest
|
||||
* SRV for storing the second descriptor */
|
||||
store_first_srv = hs_get_previous_srv(store_first_tp, ns);
|
||||
store_second_srv = hs_get_current_srv(store_second_tp, ns);
|
||||
|
||||
/* Build the fetch index. */
|
||||
hs_build_hsdir_index(node_identity_pk, fetch_srv, fetch_tp,
|
||||
node->hsdir_index->fetch);
|
||||
|
||||
/* If we are in the time segment between SRV#N and TP#N, the fetch index is
|
||||
the same as the first store index */
|
||||
if (!hs_time_between_tp_and_srv(ns, now)) {
|
||||
memcpy(node->hsdir_index->store_first, node->hsdir_index->fetch,
|
||||
sizeof(node->hsdir_index->store_first));
|
||||
} else {
|
||||
memset(node->hsdir_index->next, 0, sizeof(node->hsdir_index->next));
|
||||
hs_build_hsdir_index(node_identity_pk, store_first_srv, store_first_tp,
|
||||
node->hsdir_index->store_first);
|
||||
}
|
||||
|
||||
/* If we are in the time segment between TP#N and SRV#N+1, the fetch index is
|
||||
the same as the second store index */
|
||||
if (hs_time_between_tp_and_srv(ns, now)) {
|
||||
memcpy(node->hsdir_index->store_second, node->hsdir_index->fetch,
|
||||
sizeof(node->hsdir_index->store_second));
|
||||
} else {
|
||||
hs_build_hsdir_index(node_identity_pk, store_second_srv, store_second_tp,
|
||||
node->hsdir_index->store_second);
|
||||
}
|
||||
|
||||
done:
|
||||
tor_free(current_hsdir_index_srv);
|
||||
tor_free(next_hsdir_index_srv);
|
||||
tor_free(fetch_srv);
|
||||
tor_free(store_first_srv);
|
||||
tor_free(store_second_srv);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -390,8 +390,8 @@ helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
|
||||
node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
|
||||
tt_assert(node);
|
||||
node->rs = rs;
|
||||
memcpy(node->hsdir_index->current, curr_hsdir_index,
|
||||
sizeof(node->hsdir_index->current));
|
||||
memcpy(node->hsdir_index->fetch, curr_hsdir_index,
|
||||
sizeof(node->hsdir_index->fetch));
|
||||
smartlist_add(ns->routerstatus_list, rs);
|
||||
|
||||
done:
|
||||
|
Loading…
Reference in New Issue
Block a user