2017-03-15 21:13:17 +01:00
|
|
|
/* Copyright (c) 2016-2017, The Tor Project, Inc. */
|
2016-05-31 20:51:30 +02:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \file hs_common.c
|
|
|
|
* \brief Contains code shared between different HS protocol version as well
|
|
|
|
* as useful data structures and accessors used by other subsystems.
|
|
|
|
* The rendcommon.c should only contains code relating to the v2
|
|
|
|
* protocol.
|
|
|
|
**/
|
|
|
|
|
2017-02-13 14:31:34 +01:00
|
|
|
#define HS_COMMON_PRIVATE
|
|
|
|
|
2016-05-31 20:51:30 +02:00
|
|
|
#include "or.h"
|
|
|
|
|
2016-08-25 17:52:29 +02:00
|
|
|
#include "config.h"
|
|
|
|
#include "networkstatus.h"
|
2017-04-19 18:23:43 +02:00
|
|
|
#include "nodelist.h"
|
2017-01-16 19:29:03 +01:00
|
|
|
#include "hs_cache.h"
|
2016-05-31 20:51:30 +02:00
|
|
|
#include "hs_common.h"
|
2017-01-16 19:29:03 +01:00
|
|
|
#include "hs_service.h"
|
2016-05-31 20:51:30 +02:00
|
|
|
#include "rendcommon.h"
|
2017-03-08 23:31:36 +01:00
|
|
|
#include "rendservice.h"
|
2017-04-19 18:23:43 +02:00
|
|
|
#include "router.h"
|
2017-04-18 21:06:44 +02:00
|
|
|
#include "shared_random.h"
|
2017-07-18 15:44:03 +02:00
|
|
|
#include "shared_random_state.h"
|
2016-05-31 20:51:30 +02:00
|
|
|
|
2017-02-06 18:26:36 +01:00
|
|
|
/* Ed25519 Basepoint value. Taken from section 5 of
|
|
|
|
* https://tools.ietf.org/html/draft-josefsson-eddsa-ed25519-03 */
|
|
|
|
static const char *str_ed25519_basepoint =
|
|
|
|
"(15112221349535400772501151409588531511"
|
|
|
|
"454012693041857206046113283949847762202, "
|
|
|
|
"463168356949264781694283940034751631413"
|
|
|
|
"07993866256225615783033603165251855960)";
|
|
|
|
|
2017-05-10 21:04:40 +02:00
|
|
|
#ifdef HAVE_SYS_UN_H
|
|
|
|
|
|
|
|
/** Given <b>ports</b>, a smarlist containing rend_service_port_config_t,
|
|
|
|
* add the given <b>p</b>, a AF_UNIX port to the list. Return 0 on success
|
|
|
|
* else return -ENOSYS if AF_UNIX is not supported (see function in the
|
|
|
|
* #else statement below). */
|
|
|
|
static int
|
|
|
|
add_unix_port(smartlist_t *ports, rend_service_port_config_t *p)
|
|
|
|
{
|
|
|
|
tor_assert(ports);
|
|
|
|
tor_assert(p);
|
|
|
|
tor_assert(p->is_unix_addr);
|
|
|
|
|
|
|
|
smartlist_add(ports, p);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Given <b>conn</b> set it to use the given port <b>p</b> values. Return 0
|
|
|
|
* on success else return -ENOSYS if AF_UNIX is not supported (see function
|
|
|
|
* in the #else statement below). */
|
|
|
|
static int
|
|
|
|
set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p)
|
|
|
|
{
|
|
|
|
tor_assert(conn);
|
|
|
|
tor_assert(p);
|
|
|
|
tor_assert(p->is_unix_addr);
|
|
|
|
|
|
|
|
conn->base_.socket_family = AF_UNIX;
|
|
|
|
tor_addr_make_unspec(&conn->base_.addr);
|
|
|
|
conn->base_.port = 1;
|
|
|
|
conn->base_.address = tor_strdup(p->unix_addr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* defined(HAVE_SYS_UN_H) */
|
|
|
|
|
|
|
|
static int
|
|
|
|
set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p)
|
|
|
|
{
|
|
|
|
(void) conn;
|
|
|
|
(void) p;
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
add_unix_port(smartlist_t *ports, rend_service_port_config_t *p)
|
|
|
|
{
|
|
|
|
(void) ports;
|
|
|
|
(void) p;
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* HAVE_SYS_UN_H */
|
|
|
|
|
2017-04-19 18:23:43 +02:00
|
|
|
/* Helper function: The key is a digest that we compare to a node_t object
|
|
|
|
* current hsdir_index. */
|
|
|
|
static int
|
|
|
|
compare_digest_to_current_hsdir_index(const void *_key, const void **_member)
|
|
|
|
{
|
|
|
|
const char *key = _key;
|
|
|
|
const node_t *node = *_member;
|
|
|
|
return tor_memcmp(key, node->hsdir_index->current, DIGEST256_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper function: The key is a digest that we compare to a node_t object
|
|
|
|
* next hsdir_index. */
|
|
|
|
static int
|
|
|
|
compare_digest_to_next_hsdir_index(const void *_key, const void **_member)
|
|
|
|
{
|
|
|
|
const char *key = _key;
|
|
|
|
const node_t *node = *_member;
|
|
|
|
return tor_memcmp(key, node->hsdir_index->next, DIGEST256_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper function: Compare two node_t objects current hsdir_index. */
|
|
|
|
static int
|
|
|
|
compare_node_current_hsdir_index(const void **a, const void **b)
|
|
|
|
{
|
|
|
|
const node_t *node1= *a;
|
|
|
|
const node_t *node2 = *b;
|
|
|
|
return tor_memcmp(node1->hsdir_index->current,
|
|
|
|
node2->hsdir_index->current,
|
|
|
|
DIGEST256_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper function: Compare two node_t objects next hsdir_index. */
|
|
|
|
static int
|
|
|
|
compare_node_next_hsdir_index(const void **a, const void **b)
|
|
|
|
{
|
|
|
|
const node_t *node1= *a;
|
|
|
|
const node_t *node2 = *b;
|
|
|
|
return tor_memcmp(node1->hsdir_index->next,
|
|
|
|
node2->hsdir_index->next,
|
|
|
|
DIGEST256_LEN);
|
|
|
|
}
|
|
|
|
|
2017-02-01 15:18:58 +01:00
|
|
|
/* Allocate and return a string containing the path to filename in directory.
|
|
|
|
* This function will never return NULL. The caller must free this path. */
|
|
|
|
char *
|
|
|
|
hs_path_from_filename(const char *directory, const char *filename)
|
|
|
|
{
|
|
|
|
char *file_path = NULL;
|
|
|
|
|
|
|
|
tor_assert(directory);
|
|
|
|
tor_assert(filename);
|
|
|
|
|
|
|
|
tor_asprintf(&file_path, "%s%s%s", directory, PATH_SEPARATOR, filename);
|
|
|
|
return file_path;
|
|
|
|
}
|
|
|
|
|
2016-12-22 22:40:21 +01:00
|
|
|
/* Make sure that the directory for <b>service</b> is private, using the config
|
|
|
|
* <b>username</b>.
|
|
|
|
* If <b>create</b> is true:
|
|
|
|
* - if the directory exists, change permissions if needed,
|
|
|
|
* - if the directory does not exist, create it with the correct permissions.
|
|
|
|
* If <b>create</b> is false:
|
|
|
|
* - if the directory exists, check permissions,
|
|
|
|
* - if the directory does not exist, check if we think we can create it.
|
|
|
|
* Return 0 on success, -1 on failure. */
|
|
|
|
int
|
|
|
|
hs_check_service_private_dir(const char *username, const char *path,
|
|
|
|
unsigned int dir_group_readable,
|
|
|
|
unsigned int create)
|
|
|
|
{
|
|
|
|
cpd_check_t check_opts = CPD_NONE;
|
|
|
|
|
|
|
|
tor_assert(path);
|
|
|
|
|
|
|
|
if (create) {
|
|
|
|
check_opts |= CPD_CREATE;
|
|
|
|
} else {
|
|
|
|
check_opts |= CPD_CHECK_MODE_ONLY;
|
|
|
|
check_opts |= CPD_CHECK;
|
|
|
|
}
|
|
|
|
if (dir_group_readable) {
|
|
|
|
check_opts |= CPD_GROUP_READ;
|
|
|
|
}
|
|
|
|
/* Check/create directory */
|
|
|
|
if (check_private_dir(path, check_opts, username) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-13 14:31:34 +01:00
|
|
|
/** Get the default HS time period length in minutes from the consensus. */
|
|
|
|
STATIC uint64_t
|
|
|
|
get_time_period_length(void)
|
|
|
|
{
|
2017-07-18 17:10:26 +02:00
|
|
|
/* If we are on a test network, make the time period smaller than normal so
|
|
|
|
that we actually see it rotate. Specifically, make it the same length as
|
|
|
|
an SRV protocol run. */
|
|
|
|
if (get_options()->TestingTorNetwork) {
|
|
|
|
unsigned run_duration = sr_state_get_protocol_run_duration();
|
|
|
|
/* An SRV run should take more than a minute (it's 24 rounds) */
|
|
|
|
tor_assert_nonfatal(run_duration > 60);
|
|
|
|
/* Turn it from seconds to minutes before returning: */
|
|
|
|
return sr_state_get_protocol_run_duration() / 60;
|
|
|
|
}
|
|
|
|
|
2017-02-13 14:31:34 +01:00
|
|
|
int32_t time_period_length = networkstatus_get_param(NULL, "hsdir-interval",
|
|
|
|
HS_TIME_PERIOD_LENGTH_DEFAULT,
|
|
|
|
HS_TIME_PERIOD_LENGTH_MIN,
|
|
|
|
HS_TIME_PERIOD_LENGTH_MAX);
|
|
|
|
/* Make sure it's a positive value. */
|
|
|
|
tor_assert(time_period_length >= 0);
|
|
|
|
/* uint64_t will always be able to contain a int32_t */
|
|
|
|
return (uint64_t) time_period_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Get the HS time period number at time <b>now</b> */
|
2017-02-06 18:26:36 +01:00
|
|
|
uint64_t
|
|
|
|
hs_get_time_period_num(time_t now)
|
2017-02-13 14:31:34 +01:00
|
|
|
{
|
|
|
|
uint64_t time_period_num;
|
2017-07-24 12:31:17 +02:00
|
|
|
|
|
|
|
/* Start by calculating minutes since the epoch */
|
2017-02-13 14:31:34 +01:00
|
|
|
uint64_t time_period_length = get_time_period_length();
|
|
|
|
uint64_t minutes_since_epoch = now / 60;
|
|
|
|
|
2017-07-24 12:31:17 +02:00
|
|
|
/* Apply the rotation offset as specified by prop224 (section
|
|
|
|
* [TIME-PERIODS]), so that new time periods synchronize nicely with SRV
|
|
|
|
* publication */
|
|
|
|
unsigned int time_period_rotation_offset = sr_state_get_phase_duration();
|
|
|
|
time_period_rotation_offset /= 60; /* go from seconds to minutes */
|
|
|
|
tor_assert(minutes_since_epoch > time_period_rotation_offset);
|
|
|
|
minutes_since_epoch -= time_period_rotation_offset;
|
2017-02-13 14:31:34 +01:00
|
|
|
|
|
|
|
/* Calculate the time period */
|
|
|
|
time_period_num = minutes_since_epoch / time_period_length;
|
|
|
|
return time_period_num;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Get the number of the _upcoming_ HS time period, given that the current
|
|
|
|
* time is <b>now</b>. */
|
|
|
|
uint64_t
|
|
|
|
hs_get_next_time_period_num(time_t now)
|
|
|
|
{
|
2017-02-06 18:26:36 +01:00
|
|
|
return hs_get_time_period_num(now) + 1;
|
2017-02-13 14:31:34 +01:00
|
|
|
}
|
|
|
|
|
2017-07-18 15:44:03 +02:00
|
|
|
/* Return the start time of the upcoming time period based on <b>now</b>. */
|
|
|
|
time_t
|
|
|
|
hs_get_start_time_of_next_time_period(time_t now)
|
|
|
|
{
|
|
|
|
uint64_t time_period_length = get_time_period_length();
|
|
|
|
|
|
|
|
/* Get start time of next time period */
|
|
|
|
uint64_t next_time_period_num = hs_get_next_time_period_num(now);
|
|
|
|
uint64_t start_of_next_tp_in_mins = next_time_period_num *time_period_length;
|
|
|
|
|
|
|
|
/* Apply rotation offset as specified by prop224 section [TIME-PERIODS] */
|
|
|
|
unsigned int time_period_rotation_offset = sr_state_get_phase_duration();
|
|
|
|
return start_of_next_tp_in_mins * 60 + time_period_rotation_offset;
|
|
|
|
}
|
|
|
|
|
2016-05-31 20:51:30 +02:00
|
|
|
/* Create a new rend_data_t for a specific given <b>version</b>.
|
|
|
|
* Return a pointer to the newly allocated data structure. */
|
|
|
|
static rend_data_t *
|
|
|
|
rend_data_alloc(uint32_t version)
|
|
|
|
{
|
|
|
|
rend_data_t *rend_data = NULL;
|
|
|
|
|
|
|
|
switch (version) {
|
|
|
|
case HS_VERSION_TWO:
|
|
|
|
{
|
|
|
|
rend_data_v2_t *v2 = tor_malloc_zero(sizeof(*v2));
|
|
|
|
v2->base_.version = HS_VERSION_TWO;
|
|
|
|
v2->base_.hsdirs_fp = smartlist_new();
|
|
|
|
rend_data = &v2->base_;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rend_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Free all storage associated with <b>data</b> */
|
|
|
|
void
|
|
|
|
rend_data_free(rend_data_t *data)
|
|
|
|
{
|
|
|
|
if (!data) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* By using our allocation function, this should always be set. */
|
|
|
|
tor_assert(data->hsdirs_fp);
|
|
|
|
/* Cleanup the HSDir identity digest. */
|
|
|
|
SMARTLIST_FOREACH(data->hsdirs_fp, char *, d, tor_free(d));
|
|
|
|
smartlist_free(data->hsdirs_fp);
|
|
|
|
/* Depending on the version, cleanup. */
|
|
|
|
switch (data->version) {
|
|
|
|
case HS_VERSION_TWO:
|
|
|
|
{
|
|
|
|
rend_data_v2_t *v2_data = TO_REND_DATA_V2(data);
|
|
|
|
tor_free(v2_data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate and return a deep copy of <b>data</b>. */
|
|
|
|
rend_data_t *
|
|
|
|
rend_data_dup(const rend_data_t *data)
|
|
|
|
{
|
|
|
|
rend_data_t *data_dup = NULL;
|
|
|
|
smartlist_t *hsdirs_fp = smartlist_new();
|
|
|
|
|
|
|
|
tor_assert(data);
|
|
|
|
tor_assert(data->hsdirs_fp);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH(data->hsdirs_fp, char *, fp,
|
|
|
|
smartlist_add(hsdirs_fp, tor_memdup(fp, DIGEST_LEN)));
|
|
|
|
|
|
|
|
switch (data->version) {
|
|
|
|
case HS_VERSION_TWO:
|
|
|
|
{
|
|
|
|
rend_data_v2_t *v2_data = tor_memdup(TO_REND_DATA_V2(data),
|
|
|
|
sizeof(*v2_data));
|
|
|
|
data_dup = &v2_data->base_;
|
|
|
|
data_dup->hsdirs_fp = hsdirs_fp;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return data_dup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compute the descriptor ID for each HS descriptor replica and save them. A
|
|
|
|
* valid onion address must be present in the <b>rend_data</b>.
|
|
|
|
*
|
|
|
|
* Return 0 on success else -1. */
|
|
|
|
static int
|
|
|
|
compute_desc_id(rend_data_t *rend_data)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
unsigned replica;
|
|
|
|
time_t now = time(NULL);
|
|
|
|
|
|
|
|
tor_assert(rend_data);
|
|
|
|
|
|
|
|
switch (rend_data->version) {
|
|
|
|
case HS_VERSION_TWO:
|
|
|
|
{
|
|
|
|
rend_data_v2_t *v2_data = TO_REND_DATA_V2(rend_data);
|
|
|
|
/* Compute descriptor ID for each replicas. */
|
|
|
|
for (replica = 0; replica < ARRAY_LENGTH(v2_data->descriptor_id);
|
|
|
|
replica++) {
|
|
|
|
ret = rend_compute_v2_desc_id(v2_data->descriptor_id[replica],
|
|
|
|
v2_data->onion_address,
|
|
|
|
v2_data->descriptor_cookie,
|
|
|
|
now, replica);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
}
|
|
|
|
|
2016-08-25 00:44:34 +02:00
|
|
|
end:
|
2016-05-31 20:51:30 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate and initialize a rend_data_t object for a service using the
|
|
|
|
* provided arguments. All arguments are optional (can be NULL), except from
|
2016-11-07 20:03:32 +01:00
|
|
|
* <b>onion_address</b> which MUST be set. The <b>pk_digest</b> is the hash of
|
|
|
|
* the service private key. The <b>cookie</b> is the rendezvous cookie and
|
|
|
|
* <b>auth_type</b> is which authentiation this service is configured with.
|
2016-05-31 20:51:30 +02:00
|
|
|
*
|
|
|
|
* Return a valid rend_data_t pointer. This only returns a version 2 object of
|
|
|
|
* rend_data_t. */
|
|
|
|
rend_data_t *
|
|
|
|
rend_data_service_create(const char *onion_address, const char *pk_digest,
|
|
|
|
const uint8_t *cookie, rend_auth_type_t auth_type)
|
|
|
|
{
|
|
|
|
/* Create a rend_data_t object for version 2. */
|
|
|
|
rend_data_t *rend_data = rend_data_alloc(HS_VERSION_TWO);
|
|
|
|
rend_data_v2_t *v2= TO_REND_DATA_V2(rend_data);
|
|
|
|
|
|
|
|
/* We need at least one else the call is wrong. */
|
|
|
|
tor_assert(onion_address != NULL);
|
|
|
|
|
|
|
|
if (pk_digest) {
|
|
|
|
memcpy(v2->rend_pk_digest, pk_digest, sizeof(v2->rend_pk_digest));
|
|
|
|
}
|
|
|
|
if (cookie) {
|
|
|
|
memcpy(rend_data->rend_cookie, cookie, sizeof(rend_data->rend_cookie));
|
|
|
|
}
|
|
|
|
|
|
|
|
strlcpy(v2->onion_address, onion_address, sizeof(v2->onion_address));
|
|
|
|
v2->auth_type = auth_type;
|
|
|
|
|
|
|
|
return rend_data;
|
|
|
|
}
|
|
|
|
|
2016-11-07 20:03:32 +01:00
|
|
|
/* Allocate and initialize a rend_data_t object for a client request using the
|
|
|
|
* given arguments. Either an onion address or a descriptor ID is needed. Both
|
|
|
|
* can be given but in this case only the onion address will be used to make
|
|
|
|
* the descriptor fetch. The <b>cookie</b> is the rendezvous cookie and
|
|
|
|
* <b>auth_type</b> is which authentiation the service is configured with.
|
2016-05-31 20:51:30 +02:00
|
|
|
*
|
|
|
|
* Return a valid rend_data_t pointer or NULL on error meaning the
|
|
|
|
* descriptor IDs couldn't be computed from the given data. */
|
|
|
|
rend_data_t *
|
|
|
|
rend_data_client_create(const char *onion_address, const char *desc_id,
|
|
|
|
const char *cookie, rend_auth_type_t auth_type)
|
|
|
|
{
|
|
|
|
/* Create a rend_data_t object for version 2. */
|
|
|
|
rend_data_t *rend_data = rend_data_alloc(HS_VERSION_TWO);
|
|
|
|
rend_data_v2_t *v2= TO_REND_DATA_V2(rend_data);
|
|
|
|
|
|
|
|
/* We need at least one else the call is wrong. */
|
|
|
|
tor_assert(onion_address != NULL || desc_id != NULL);
|
|
|
|
|
|
|
|
if (cookie) {
|
|
|
|
memcpy(v2->descriptor_cookie, cookie, sizeof(v2->descriptor_cookie));
|
|
|
|
}
|
|
|
|
if (desc_id) {
|
|
|
|
memcpy(v2->desc_id_fetch, desc_id, sizeof(v2->desc_id_fetch));
|
|
|
|
}
|
|
|
|
if (onion_address) {
|
|
|
|
strlcpy(v2->onion_address, onion_address, sizeof(v2->onion_address));
|
|
|
|
if (compute_desc_id(rend_data) < 0) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
v2->auth_type = auth_type;
|
|
|
|
|
|
|
|
return rend_data;
|
|
|
|
|
|
|
|
error:
|
|
|
|
rend_data_free(rend_data);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the onion address from the rend data. Depending on the version,
|
|
|
|
* the size of the address can vary but it's always NUL terminated. */
|
|
|
|
const char *
|
|
|
|
rend_data_get_address(const rend_data_t *rend_data)
|
|
|
|
{
|
|
|
|
tor_assert(rend_data);
|
|
|
|
|
|
|
|
switch (rend_data->version) {
|
|
|
|
case HS_VERSION_TWO:
|
|
|
|
return TO_REND_DATA_V2(rend_data)->onion_address;
|
|
|
|
default:
|
|
|
|
/* We should always have a supported version. */
|
|
|
|
tor_assert(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the descriptor ID for a specific replica number from the rend
|
|
|
|
* data. The returned data is a binary digest and depending on the version its
|
|
|
|
* size can vary. The size of the descriptor ID is put in <b>len_out</b> if
|
|
|
|
* non NULL. */
|
|
|
|
const char *
|
|
|
|
rend_data_get_desc_id(const rend_data_t *rend_data, uint8_t replica,
|
|
|
|
size_t *len_out)
|
|
|
|
{
|
|
|
|
tor_assert(rend_data);
|
|
|
|
|
|
|
|
switch (rend_data->version) {
|
|
|
|
case HS_VERSION_TWO:
|
|
|
|
tor_assert(replica < REND_NUMBER_OF_NON_CONSECUTIVE_REPLICAS);
|
|
|
|
if (len_out) {
|
|
|
|
*len_out = DIGEST_LEN;
|
|
|
|
}
|
|
|
|
return TO_REND_DATA_V2(rend_data)->descriptor_id[replica];
|
|
|
|
default:
|
|
|
|
/* We should always have a supported version. */
|
|
|
|
tor_assert(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the public key digest using the given <b>rend_data</b>. The size of
|
|
|
|
* the digest is put in <b>len_out</b> (if set) which can differ depending on
|
|
|
|
* the version. */
|
|
|
|
const uint8_t *
|
|
|
|
rend_data_get_pk_digest(const rend_data_t *rend_data, size_t *len_out)
|
|
|
|
{
|
|
|
|
tor_assert(rend_data);
|
|
|
|
|
|
|
|
switch (rend_data->version) {
|
|
|
|
case HS_VERSION_TWO:
|
|
|
|
{
|
|
|
|
const rend_data_v2_t *v2_data = TO_REND_DATA_V2(rend_data);
|
|
|
|
if (len_out) {
|
|
|
|
*len_out = sizeof(v2_data->rend_pk_digest);
|
|
|
|
}
|
|
|
|
return (const uint8_t *) v2_data->rend_pk_digest;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
/* We should always have a supported version. */
|
|
|
|
tor_assert(0);
|
|
|
|
}
|
|
|
|
}
|
2016-12-23 16:46:14 +01:00
|
|
|
|
2017-04-18 21:06:44 +02:00
|
|
|
/* Using the given time period number, compute the disaster shared random
|
|
|
|
* value and put it in srv_out. It MUST be at least DIGEST256_LEN bytes. */
|
|
|
|
static void
|
|
|
|
get_disaster_srv(uint64_t time_period_num, uint8_t *srv_out)
|
|
|
|
{
|
|
|
|
crypto_digest_t *digest;
|
|
|
|
|
|
|
|
tor_assert(srv_out);
|
|
|
|
|
|
|
|
digest = crypto_digest256_new(DIGEST_SHA3_256);
|
2017-08-03 11:01:52 +02:00
|
|
|
|
|
|
|
/* Start setting up payload:
|
|
|
|
* H("shared-random-disaster" | INT_8(period_length) | INT_8(period_num)) */
|
2017-04-18 21:06:44 +02:00
|
|
|
crypto_digest_add_bytes(digest, HS_SRV_DISASTER_PREFIX,
|
|
|
|
HS_SRV_DISASTER_PREFIX_LEN);
|
2017-08-03 11:01:52 +02:00
|
|
|
|
|
|
|
/* Setup INT_8(period_length) | INT_8(period_num) */
|
|
|
|
{
|
|
|
|
uint64_t time_period_length = get_time_period_length();
|
|
|
|
char period_stuff[sizeof(uint64_t)*2];
|
|
|
|
size_t offset = 0;
|
2017-08-04 21:02:28 +02:00
|
|
|
set_uint64(period_stuff, tor_htonll(time_period_length));
|
2017-08-03 11:01:52 +02:00
|
|
|
offset += sizeof(uint64_t);
|
2017-08-04 21:02:28 +02:00
|
|
|
set_uint64(period_stuff+offset, tor_htonll(time_period_num));
|
2017-08-03 11:01:52 +02:00
|
|
|
offset += sizeof(uint64_t);
|
|
|
|
tor_assert(offset == sizeof(period_stuff));
|
|
|
|
|
|
|
|
crypto_digest_add_bytes(digest, period_stuff, sizeof(period_stuff));
|
|
|
|
}
|
|
|
|
|
2017-04-18 21:06:44 +02:00
|
|
|
crypto_digest_get_digest(digest, (char *) srv_out, DIGEST256_LEN);
|
|
|
|
crypto_digest_free(digest);
|
|
|
|
}
|
|
|
|
|
2017-02-06 18:26:36 +01:00
|
|
|
/* When creating a blinded key, we need a parameter which construction is as
|
|
|
|
* follow: H(pubkey | [secret] | ed25519-basepoint | nonce).
|
|
|
|
*
|
|
|
|
* The nonce has a pre-defined format which uses the time period number
|
|
|
|
* period_num and the start of the period in second start_time_period.
|
|
|
|
*
|
|
|
|
* The secret of size secret_len is optional meaning that it can be NULL and
|
|
|
|
* thus will be ignored for the param construction.
|
|
|
|
*
|
|
|
|
* The result is put in param_out. */
|
|
|
|
static void
|
|
|
|
build_blinded_key_param(const ed25519_public_key_t *pubkey,
|
|
|
|
const uint8_t *secret, size_t secret_len,
|
2017-08-03 11:01:52 +02:00
|
|
|
uint64_t period_num, uint64_t period_length,
|
2017-02-06 18:26:36 +01:00
|
|
|
uint8_t *param_out)
|
|
|
|
{
|
|
|
|
size_t offset = 0;
|
2017-08-04 11:37:48 +02:00
|
|
|
const char blind_str[] = "Derive temporary signing key";
|
2017-02-06 18:26:36 +01:00
|
|
|
uint8_t nonce[HS_KEYBLIND_NONCE_LEN];
|
|
|
|
crypto_digest_t *digest;
|
|
|
|
|
|
|
|
tor_assert(pubkey);
|
|
|
|
tor_assert(param_out);
|
|
|
|
|
|
|
|
/* Create the nonce N. The construction is as follow:
|
2017-08-03 11:01:52 +02:00
|
|
|
* N = "key-blind" || INT_8(period_num) || INT_8(period_length) */
|
2017-02-06 18:26:36 +01:00
|
|
|
memcpy(nonce, HS_KEYBLIND_NONCE_PREFIX, HS_KEYBLIND_NONCE_PREFIX_LEN);
|
|
|
|
offset += HS_KEYBLIND_NONCE_PREFIX_LEN;
|
2017-08-03 11:01:52 +02:00
|
|
|
set_uint64(nonce + offset, tor_htonll(period_num));
|
2017-02-06 18:26:36 +01:00
|
|
|
offset += sizeof(uint64_t);
|
2017-08-03 11:01:52 +02:00
|
|
|
set_uint64(nonce + offset, tor_htonll(period_length));
|
2017-02-06 18:26:36 +01:00
|
|
|
offset += sizeof(uint64_t);
|
|
|
|
tor_assert(offset == HS_KEYBLIND_NONCE_LEN);
|
|
|
|
|
|
|
|
/* Generate the parameter h and the construction is as follow:
|
2017-08-04 11:37:48 +02:00
|
|
|
* h = H(BLIND_STRING | pubkey | [secret] | ed25519-basepoint | N) */
|
2017-02-06 18:26:36 +01:00
|
|
|
digest = crypto_digest256_new(DIGEST_SHA3_256);
|
2017-08-04 11:37:48 +02:00
|
|
|
crypto_digest_add_bytes(digest, blind_str, sizeof(blind_str));
|
2017-02-06 18:26:36 +01:00
|
|
|
crypto_digest_add_bytes(digest, (char *) pubkey, ED25519_PUBKEY_LEN);
|
|
|
|
/* Optional secret. */
|
|
|
|
if (secret) {
|
|
|
|
crypto_digest_add_bytes(digest, (char *) secret, secret_len);
|
|
|
|
}
|
|
|
|
crypto_digest_add_bytes(digest, str_ed25519_basepoint,
|
|
|
|
strlen(str_ed25519_basepoint));
|
|
|
|
crypto_digest_add_bytes(digest, (char *) nonce, sizeof(nonce));
|
|
|
|
|
|
|
|
/* Extract digest and put it in the param. */
|
|
|
|
crypto_digest_get_digest(digest, (char *) param_out, DIGEST256_LEN);
|
|
|
|
crypto_digest_free(digest);
|
2017-08-03 14:42:30 +02:00
|
|
|
|
|
|
|
memwipe(nonce, 0, sizeof(nonce));
|
2017-02-06 18:26:36 +01:00
|
|
|
}
|
|
|
|
|
2017-01-30 23:33:18 +01:00
|
|
|
/* Using an ed25519 public key and version to build the checksum of an
|
|
|
|
* address. Put in checksum_out. Format is:
|
|
|
|
* SHA3-256(".onion checksum" || PUBKEY || VERSION)
|
|
|
|
*
|
|
|
|
* checksum_out must be large enough to receive 32 bytes (DIGEST256_LEN). */
|
|
|
|
static void
|
|
|
|
build_hs_checksum(const ed25519_public_key_t *key, uint8_t version,
|
2017-07-12 19:41:33 +02:00
|
|
|
uint8_t *checksum_out)
|
2017-01-30 23:33:18 +01:00
|
|
|
{
|
|
|
|
size_t offset = 0;
|
|
|
|
char data[HS_SERVICE_ADDR_CHECKSUM_INPUT_LEN];
|
|
|
|
|
|
|
|
/* Build checksum data. */
|
|
|
|
memcpy(data, HS_SERVICE_ADDR_CHECKSUM_PREFIX,
|
|
|
|
HS_SERVICE_ADDR_CHECKSUM_PREFIX_LEN);
|
|
|
|
offset += HS_SERVICE_ADDR_CHECKSUM_PREFIX_LEN;
|
|
|
|
memcpy(data + offset, key->pubkey, ED25519_PUBKEY_LEN);
|
|
|
|
offset += ED25519_PUBKEY_LEN;
|
|
|
|
set_uint8(data + offset, version);
|
|
|
|
offset += sizeof(version);
|
|
|
|
tor_assert(offset == HS_SERVICE_ADDR_CHECKSUM_INPUT_LEN);
|
|
|
|
|
|
|
|
/* Hash the data payload to create the checksum. */
|
2017-07-12 19:41:33 +02:00
|
|
|
crypto_digest256((char *) checksum_out, data, sizeof(data),
|
|
|
|
DIGEST_SHA3_256);
|
2017-01-30 23:33:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Using an ed25519 public key, checksum and version to build the binary
|
|
|
|
* representation of a service address. Put in addr_out. Format is:
|
|
|
|
* addr_out = PUBKEY || CHECKSUM || VERSION
|
|
|
|
*
|
|
|
|
* addr_out must be large enough to receive HS_SERVICE_ADDR_LEN bytes. */
|
|
|
|
static void
|
2017-07-12 19:41:33 +02:00
|
|
|
build_hs_address(const ed25519_public_key_t *key, const uint8_t *checksum,
|
2017-01-30 23:33:18 +01:00
|
|
|
uint8_t version, char *addr_out)
|
|
|
|
{
|
|
|
|
size_t offset = 0;
|
|
|
|
|
|
|
|
tor_assert(key);
|
|
|
|
tor_assert(checksum);
|
|
|
|
|
|
|
|
memcpy(addr_out, key->pubkey, ED25519_PUBKEY_LEN);
|
|
|
|
offset += ED25519_PUBKEY_LEN;
|
|
|
|
memcpy(addr_out + offset, checksum, HS_SERVICE_ADDR_CHECKSUM_LEN_USED);
|
|
|
|
offset += HS_SERVICE_ADDR_CHECKSUM_LEN_USED;
|
|
|
|
set_uint8(addr_out + offset, version);
|
|
|
|
offset += sizeof(uint8_t);
|
|
|
|
tor_assert(offset == HS_SERVICE_ADDR_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper for hs_parse_address(): Using a binary representation of a service
|
|
|
|
* address, parse its content into the key_out, checksum_out and version_out.
|
|
|
|
* Any out variable can be NULL in case the caller would want only one field.
|
|
|
|
* checksum_out MUST at least be 2 bytes long. address must be at least
|
|
|
|
* HS_SERVICE_ADDR_LEN bytes but doesn't need to be NUL terminated. */
|
|
|
|
static void
|
|
|
|
hs_parse_address_impl(const char *address, ed25519_public_key_t *key_out,
|
2017-07-12 19:41:33 +02:00
|
|
|
uint8_t *checksum_out, uint8_t *version_out)
|
2017-01-30 23:33:18 +01:00
|
|
|
{
|
|
|
|
size_t offset = 0;
|
|
|
|
|
|
|
|
tor_assert(address);
|
|
|
|
|
|
|
|
if (key_out) {
|
|
|
|
/* First is the key. */
|
|
|
|
memcpy(key_out->pubkey, address, ED25519_PUBKEY_LEN);
|
|
|
|
}
|
|
|
|
offset += ED25519_PUBKEY_LEN;
|
|
|
|
if (checksum_out) {
|
|
|
|
/* Followed by a 2 bytes checksum. */
|
|
|
|
memcpy(checksum_out, address + offset, HS_SERVICE_ADDR_CHECKSUM_LEN_USED);
|
|
|
|
}
|
|
|
|
offset += HS_SERVICE_ADDR_CHECKSUM_LEN_USED;
|
|
|
|
if (version_out) {
|
|
|
|
/* Finally, version value is 1 byte. */
|
|
|
|
*version_out = get_uint8(address + offset);
|
|
|
|
}
|
|
|
|
offset += sizeof(uint8_t);
|
|
|
|
/* Extra safety. */
|
|
|
|
tor_assert(offset == HS_SERVICE_ADDR_LEN);
|
|
|
|
}
|
|
|
|
|
2017-05-11 16:16:28 +02:00
|
|
|
/* Using the given identity public key and a blinded public key, compute the
|
2017-08-03 15:02:51 +02:00
|
|
|
* subcredential and put it in subcred_out (must be of size DIGEST256_LEN).
|
|
|
|
* This can't fail. */
|
2017-05-11 16:16:28 +02:00
|
|
|
void
|
|
|
|
hs_get_subcredential(const ed25519_public_key_t *identity_pk,
|
|
|
|
const ed25519_public_key_t *blinded_pk,
|
|
|
|
uint8_t *subcred_out)
|
|
|
|
{
|
|
|
|
uint8_t credential[DIGEST256_LEN];
|
|
|
|
crypto_digest_t *digest;
|
|
|
|
|
|
|
|
tor_assert(identity_pk);
|
|
|
|
tor_assert(blinded_pk);
|
|
|
|
tor_assert(subcred_out);
|
|
|
|
|
|
|
|
/* First, build the credential. Construction is as follow:
|
|
|
|
* credential = H("credential" | public-identity-key) */
|
|
|
|
digest = crypto_digest256_new(DIGEST_SHA3_256);
|
|
|
|
crypto_digest_add_bytes(digest, HS_CREDENTIAL_PREFIX,
|
|
|
|
HS_CREDENTIAL_PREFIX_LEN);
|
|
|
|
crypto_digest_add_bytes(digest, (const char *) identity_pk->pubkey,
|
|
|
|
ED25519_PUBKEY_LEN);
|
|
|
|
crypto_digest_get_digest(digest, (char *) credential, DIGEST256_LEN);
|
|
|
|
crypto_digest_free(digest);
|
|
|
|
|
|
|
|
/* Now, compute the subcredential. Construction is as follow:
|
|
|
|
* subcredential = H("subcredential" | credential | blinded-public-key). */
|
|
|
|
digest = crypto_digest256_new(DIGEST_SHA3_256);
|
|
|
|
crypto_digest_add_bytes(digest, HS_SUBCREDENTIAL_PREFIX,
|
|
|
|
HS_SUBCREDENTIAL_PREFIX_LEN);
|
|
|
|
crypto_digest_add_bytes(digest, (const char *) credential,
|
|
|
|
sizeof(credential));
|
|
|
|
crypto_digest_add_bytes(digest, (const char *) blinded_pk->pubkey,
|
|
|
|
ED25519_PUBKEY_LEN);
|
|
|
|
crypto_digest_get_digest(digest, (char *) subcred_out, DIGEST256_LEN);
|
|
|
|
crypto_digest_free(digest);
|
2017-08-03 14:42:30 +02:00
|
|
|
|
|
|
|
memwipe(credential, 0, sizeof(credential));
|
2017-05-11 16:16:28 +02:00
|
|
|
}
|
|
|
|
|
2017-08-03 15:02:51 +02:00
|
|
|
/* From the given list of hidden service ports, find the ones that much the
|
|
|
|
* given edge connection conn, pick one at random and use it to set the
|
|
|
|
* connection address. Return 0 on success or -1 if none. */
|
2017-05-10 21:04:40 +02:00
|
|
|
int
|
|
|
|
hs_set_conn_addr_port(const smartlist_t *ports, edge_connection_t *conn)
|
|
|
|
{
|
|
|
|
rend_service_port_config_t *chosen_port;
|
|
|
|
unsigned int warn_once = 0;
|
|
|
|
smartlist_t *matching_ports;
|
|
|
|
|
|
|
|
tor_assert(ports);
|
|
|
|
tor_assert(conn);
|
|
|
|
|
|
|
|
matching_ports = smartlist_new();
|
|
|
|
SMARTLIST_FOREACH_BEGIN(ports, rend_service_port_config_t *, p) {
|
|
|
|
if (TO_CONN(conn)->port != p->virtual_port) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!(p->is_unix_addr)) {
|
|
|
|
smartlist_add(matching_ports, p);
|
|
|
|
} else {
|
|
|
|
if (add_unix_port(matching_ports, p)) {
|
|
|
|
if (!warn_once) {
|
|
|
|
/* Unix port not supported so warn only once. */
|
|
|
|
log_warn(LD_REND, "Saw AF_UNIX virtual port mapping for port %d "
|
|
|
|
"which is unsupported on this platform. "
|
|
|
|
"Ignoring it.",
|
|
|
|
TO_CONN(conn)->port);
|
|
|
|
}
|
|
|
|
warn_once++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(p);
|
|
|
|
|
|
|
|
chosen_port = smartlist_choose(matching_ports);
|
|
|
|
smartlist_free(matching_ports);
|
|
|
|
if (chosen_port) {
|
|
|
|
if (!(chosen_port->is_unix_addr)) {
|
|
|
|
/* Get a non-AF_UNIX connection ready for connection_exit_connect() */
|
|
|
|
tor_addr_copy(&TO_CONN(conn)->addr, &chosen_port->real_addr);
|
|
|
|
TO_CONN(conn)->port = chosen_port->real_port;
|
|
|
|
} else {
|
|
|
|
if (set_unix_port(conn, chosen_port)) {
|
|
|
|
/* Simply impossible to end up here else we were able to add a Unix
|
|
|
|
* port without AF_UNIX support... ? */
|
|
|
|
tor_assert(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (chosen_port) ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
2017-01-30 23:33:18 +01:00
|
|
|
/* Using a base32 representation of a service address, parse its content into
|
|
|
|
* the key_out, checksum_out and version_out. Any out variable can be NULL in
|
|
|
|
* case the caller would want only one field. checksum_out MUST at least be 2
|
|
|
|
* bytes long.
|
|
|
|
*
|
|
|
|
* Return 0 if parsing went well; return -1 in case of error. */
|
|
|
|
int
|
|
|
|
hs_parse_address(const char *address, ed25519_public_key_t *key_out,
|
2017-07-12 19:41:33 +02:00
|
|
|
uint8_t *checksum_out, uint8_t *version_out)
|
2017-01-30 23:33:18 +01:00
|
|
|
{
|
|
|
|
char decoded[HS_SERVICE_ADDR_LEN];
|
|
|
|
|
|
|
|
tor_assert(address);
|
|
|
|
|
|
|
|
/* Obvious length check. */
|
|
|
|
if (strlen(address) != HS_SERVICE_ADDR_LEN_BASE32) {
|
|
|
|
log_warn(LD_REND, "Service address %s has an invalid length. "
|
2017-07-14 17:33:12 +02:00
|
|
|
"Expected %lu but got %lu.",
|
|
|
|
escaped_safe_str(address),
|
|
|
|
(unsigned long) HS_SERVICE_ADDR_LEN_BASE32,
|
|
|
|
(unsigned long) strlen(address));
|
2017-01-30 23:33:18 +01:00
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decode address so we can extract needed fields. */
|
|
|
|
if (base32_decode(decoded, sizeof(decoded), address, strlen(address)) < 0) {
|
|
|
|
log_warn(LD_REND, "Service address %s can't be decoded.",
|
|
|
|
escaped_safe_str(address));
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse the decoded address into the fields we need. */
|
|
|
|
hs_parse_address_impl(decoded, key_out, checksum_out, version_out);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
invalid:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate a given onion address. The length, the base32 decoding and
|
|
|
|
* checksum are validated. Return 1 if valid else 0. */
|
|
|
|
int
|
|
|
|
hs_address_is_valid(const char *address)
|
|
|
|
{
|
|
|
|
uint8_t version;
|
2017-07-12 19:41:33 +02:00
|
|
|
uint8_t checksum[HS_SERVICE_ADDR_CHECKSUM_LEN_USED];
|
|
|
|
uint8_t target_checksum[DIGEST256_LEN];
|
2017-01-30 23:33:18 +01:00
|
|
|
ed25519_public_key_t key;
|
|
|
|
|
|
|
|
/* Parse the decoded address into the fields we need. */
|
|
|
|
if (hs_parse_address(address, &key, checksum, &version) < 0) {
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the checksum it's suppose to be and compare it with what we have
|
|
|
|
* encoded in the address. */
|
|
|
|
build_hs_checksum(&key, version, target_checksum);
|
|
|
|
if (tor_memcmp(checksum, target_checksum, sizeof(checksum))) {
|
|
|
|
log_warn(LD_REND, "Service address %s invalid checksum.",
|
|
|
|
escaped_safe_str(address));
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Valid address. */
|
|
|
|
return 1;
|
|
|
|
invalid:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Build a service address using an ed25519 public key and a given version.
|
|
|
|
* The returned address is base32 encoded and put in addr_out. The caller MUST
|
|
|
|
* make sure the addr_out is at least HS_SERVICE_ADDR_LEN_BASE32 + 1 long.
|
|
|
|
*
|
|
|
|
* Format is as follow:
|
|
|
|
* base32(PUBKEY || CHECKSUM || VERSION)
|
|
|
|
* CHECKSUM = H(".onion checksum" || PUBKEY || VERSION)
|
|
|
|
* */
|
|
|
|
void
|
|
|
|
hs_build_address(const ed25519_public_key_t *key, uint8_t version,
|
|
|
|
char *addr_out)
|
|
|
|
{
|
2017-07-12 19:41:33 +02:00
|
|
|
uint8_t checksum[DIGEST256_LEN];
|
|
|
|
char address[HS_SERVICE_ADDR_LEN];
|
2017-01-30 23:33:18 +01:00
|
|
|
|
|
|
|
tor_assert(key);
|
|
|
|
tor_assert(addr_out);
|
|
|
|
|
|
|
|
/* Get the checksum of the address. */
|
|
|
|
build_hs_checksum(key, version, checksum);
|
|
|
|
/* Get the binary address representation. */
|
|
|
|
build_hs_address(key, checksum, version, address);
|
|
|
|
|
|
|
|
/* Encode the address. addr_out will be NUL terminated after this. */
|
|
|
|
base32_encode(addr_out, HS_SERVICE_ADDR_LEN_BASE32 + 1, address,
|
|
|
|
sizeof(address));
|
|
|
|
/* Validate what we just built. */
|
|
|
|
tor_assert(hs_address_is_valid(addr_out));
|
|
|
|
}
|
|
|
|
|
2017-07-23 16:43:16 +02:00
|
|
|
/* Return a newly allocated copy of lspec. */
|
|
|
|
link_specifier_t *
|
|
|
|
hs_link_specifier_dup(const link_specifier_t *lspec)
|
|
|
|
{
|
|
|
|
link_specifier_t *dup = link_specifier_new();
|
|
|
|
memcpy(dup, lspec, sizeof(*dup));
|
|
|
|
/* The unrecognized field is a dynamic array so make sure to copy its
|
|
|
|
* content and not the pointer. */
|
|
|
|
link_specifier_setlen_un_unrecognized(
|
|
|
|
dup, link_specifier_getlen_un_unrecognized(lspec));
|
|
|
|
if (link_specifier_getlen_un_unrecognized(dup)) {
|
|
|
|
memcpy(link_specifier_getarray_un_unrecognized(dup),
|
|
|
|
link_specifier_getconstarray_un_unrecognized(lspec),
|
|
|
|
link_specifier_getlen_un_unrecognized(dup));
|
|
|
|
}
|
|
|
|
return dup;
|
|
|
|
}
|
|
|
|
|
2017-02-06 18:26:36 +01:00
|
|
|
/* From a given ed25519 public key pk and an optional secret, compute a
|
|
|
|
* blinded public key and put it in blinded_pk_out. This is only useful to
|
|
|
|
* the client side because the client only has access to the identity public
|
|
|
|
* key of the service. */
|
|
|
|
void
|
|
|
|
hs_build_blinded_pubkey(const ed25519_public_key_t *pk,
|
|
|
|
const uint8_t *secret, size_t secret_len,
|
|
|
|
uint64_t time_period_num,
|
|
|
|
ed25519_public_key_t *blinded_pk_out)
|
|
|
|
{
|
|
|
|
/* Our blinding key API requires a 32 bytes parameter. */
|
|
|
|
uint8_t param[DIGEST256_LEN];
|
|
|
|
|
|
|
|
tor_assert(pk);
|
|
|
|
tor_assert(blinded_pk_out);
|
|
|
|
tor_assert(!tor_mem_is_zero((char *) pk, ED25519_PUBKEY_LEN));
|
|
|
|
|
|
|
|
build_blinded_key_param(pk, secret, secret_len,
|
|
|
|
time_period_num, get_time_period_length(), param);
|
|
|
|
ed25519_public_blind(blinded_pk_out, pk, param);
|
2017-08-03 14:42:30 +02:00
|
|
|
|
|
|
|
memwipe(param, 0, sizeof(param));
|
2017-02-06 18:26:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* From a given ed25519 keypair kp and an optional secret, compute a blinded
|
|
|
|
* keypair for the current time period and put it in blinded_kp_out. This is
|
|
|
|
* only useful by the service side because the client doesn't have access to
|
|
|
|
* the identity secret key. */
|
|
|
|
void
|
|
|
|
hs_build_blinded_keypair(const ed25519_keypair_t *kp,
|
|
|
|
const uint8_t *secret, size_t secret_len,
|
|
|
|
uint64_t time_period_num,
|
|
|
|
ed25519_keypair_t *blinded_kp_out)
|
|
|
|
{
|
|
|
|
/* Our blinding key API requires a 32 bytes parameter. */
|
|
|
|
uint8_t param[DIGEST256_LEN];
|
|
|
|
|
|
|
|
tor_assert(kp);
|
|
|
|
tor_assert(blinded_kp_out);
|
|
|
|
/* Extra safety. A zeroed key is bad. */
|
|
|
|
tor_assert(!tor_mem_is_zero((char *) &kp->pubkey, ED25519_PUBKEY_LEN));
|
|
|
|
tor_assert(!tor_mem_is_zero((char *) &kp->seckey, ED25519_SECKEY_LEN));
|
|
|
|
|
|
|
|
build_blinded_key_param(&kp->pubkey, secret, secret_len,
|
|
|
|
time_period_num, get_time_period_length(), param);
|
|
|
|
ed25519_keypair_blind(blinded_kp_out, kp, param);
|
2017-08-03 14:42:30 +02:00
|
|
|
|
|
|
|
memwipe(param, 0, sizeof(param));
|
2017-02-06 18:26:36 +01:00
|
|
|
}
|
|
|
|
|
2017-02-13 14:32:13 +01:00
|
|
|
/* Return true if overlap mode is active given the date in consensus. If
|
|
|
|
* consensus is NULL, then we use the latest live consensus we can find. */
|
2017-05-05 20:55:26 +02:00
|
|
|
MOCK_IMPL(int,
|
|
|
|
hs_overlap_mode_is_active, (const networkstatus_t *consensus, time_t now))
|
2017-02-13 14:32:13 +01:00
|
|
|
{
|
2017-07-18 15:06:12 +02:00
|
|
|
time_t valid_after;
|
|
|
|
time_t srv_start_time, tp_start_time;
|
2017-02-13 14:32:13 +01:00
|
|
|
|
|
|
|
if (!consensus) {
|
|
|
|
consensus = networkstatus_get_live_consensus(now);
|
|
|
|
if (!consensus) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-18 15:06:12 +02:00
|
|
|
/* We consider to be in overlap mode when we are in the period of time
|
|
|
|
* between a fresh SRV and the beginning of the new time period (in the
|
|
|
|
* normal network this is between 00:00 (inclusive) and 12:00 UTC
|
|
|
|
* (exclusive)) */
|
|
|
|
valid_after = consensus->valid_after;
|
|
|
|
srv_start_time =sr_state_get_start_time_of_current_protocol_run(valid_after);
|
|
|
|
tp_start_time = hs_get_start_time_of_next_time_period(srv_start_time);
|
2017-02-13 14:32:13 +01:00
|
|
|
|
2017-07-18 15:06:12 +02:00
|
|
|
if (valid_after >= srv_start_time && valid_after < tp_start_time) {
|
2017-02-13 14:32:13 +01:00
|
|
|
return 1;
|
|
|
|
}
|
2017-07-18 15:06:12 +02:00
|
|
|
|
2017-03-08 23:31:36 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return 1 if any virtual port in ports needs a circuit with good uptime.
|
|
|
|
* Else return 0. */
|
|
|
|
int
|
|
|
|
hs_service_requires_uptime_circ(const smartlist_t *ports)
|
|
|
|
{
|
|
|
|
tor_assert(ports);
|
2017-02-13 14:32:13 +01:00
|
|
|
|
2017-03-08 23:31:36 +01:00
|
|
|
SMARTLIST_FOREACH_BEGIN(ports, rend_service_port_config_t *, p) {
|
|
|
|
if (smartlist_contains_int_as_string(get_options()->LongLivedPorts,
|
|
|
|
p->virtual_port)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(p);
|
2017-02-13 14:32:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-18 21:06:44 +02:00
|
|
|
/* Build hs_index which is used to find the responsible hsdirs. This index
|
|
|
|
* value is used to select the responsible HSDir where their hsdir_index is
|
|
|
|
* closest to this value.
|
|
|
|
* SHA3-256("store-at-idx" | blinded_public_key |
|
2017-08-03 11:01:52 +02:00
|
|
|
* INT_8(replicanum) | INT_8(period_length) | INT_8(period_num) )
|
2017-04-18 21:06:44 +02:00
|
|
|
*
|
|
|
|
* hs_index_out must be large enough to receive DIGEST256_LEN bytes. */
|
|
|
|
void
|
|
|
|
hs_build_hs_index(uint64_t replica, const ed25519_public_key_t *blinded_pk,
|
|
|
|
uint64_t period_num, uint8_t *hs_index_out)
|
|
|
|
{
|
|
|
|
crypto_digest_t *digest;
|
|
|
|
|
|
|
|
tor_assert(blinded_pk);
|
|
|
|
tor_assert(hs_index_out);
|
|
|
|
|
|
|
|
/* Build hs_index. See construction at top of function comment. */
|
|
|
|
digest = crypto_digest256_new(DIGEST_SHA3_256);
|
|
|
|
crypto_digest_add_bytes(digest, HS_INDEX_PREFIX, HS_INDEX_PREFIX_LEN);
|
|
|
|
crypto_digest_add_bytes(digest, (const char *) blinded_pk->pubkey,
|
|
|
|
ED25519_PUBKEY_LEN);
|
2017-08-03 11:01:52 +02:00
|
|
|
|
|
|
|
/* Now setup INT_8(replicanum) | INT_8(period_length) | INT_8(period_num) */
|
|
|
|
{
|
|
|
|
uint64_t period_length = get_time_period_length();
|
|
|
|
char buf[sizeof(uint64_t)*3];
|
|
|
|
size_t offset = 0;
|
|
|
|
set_uint64(buf, tor_htonll(replica));
|
|
|
|
offset += sizeof(uint64_t);
|
2017-08-07 17:58:13 +02:00
|
|
|
set_uint64(buf+offset, tor_htonll(period_length));
|
2017-08-03 11:01:52 +02:00
|
|
|
offset += sizeof(uint64_t);
|
2017-08-07 17:58:13 +02:00
|
|
|
set_uint64(buf+offset, tor_htonll(period_num));
|
2017-08-03 11:01:52 +02:00
|
|
|
offset += sizeof(uint64_t);
|
|
|
|
tor_assert(offset == sizeof(buf));
|
|
|
|
|
|
|
|
crypto_digest_add_bytes(digest, buf, sizeof(buf));
|
|
|
|
}
|
|
|
|
|
2017-04-18 21:06:44 +02:00
|
|
|
crypto_digest_get_digest(digest, (char *) hs_index_out, DIGEST256_LEN);
|
|
|
|
crypto_digest_free(digest);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Build hsdir_index which is used to find the responsible hsdirs. This is the
|
|
|
|
* index value that is compare to the hs_index when selecting an HSDir.
|
|
|
|
* SHA3-256("node-idx" | node_identity |
|
2017-08-03 11:01:52 +02:00
|
|
|
* shared_random_value | INT_8(period_length) | INT_8(period_num) )
|
2017-04-18 21:06:44 +02:00
|
|
|
*
|
|
|
|
* hsdir_index_out must be large enough to receive DIGEST256_LEN bytes. */
|
|
|
|
void
|
|
|
|
hs_build_hsdir_index(const ed25519_public_key_t *identity_pk,
|
|
|
|
const uint8_t *srv_value, uint64_t period_num,
|
|
|
|
uint8_t *hsdir_index_out)
|
|
|
|
{
|
|
|
|
crypto_digest_t *digest;
|
|
|
|
|
|
|
|
tor_assert(identity_pk);
|
|
|
|
tor_assert(srv_value);
|
|
|
|
tor_assert(hsdir_index_out);
|
|
|
|
|
|
|
|
/* Build hsdir_index. See construction at top of function comment. */
|
|
|
|
digest = crypto_digest256_new(DIGEST_SHA3_256);
|
|
|
|
crypto_digest_add_bytes(digest, HSDIR_INDEX_PREFIX, HSDIR_INDEX_PREFIX_LEN);
|
|
|
|
crypto_digest_add_bytes(digest, (const char *) identity_pk->pubkey,
|
|
|
|
ED25519_PUBKEY_LEN);
|
|
|
|
crypto_digest_add_bytes(digest, (const char *) srv_value, DIGEST256_LEN);
|
2017-08-03 11:01:52 +02:00
|
|
|
|
|
|
|
{
|
|
|
|
uint64_t time_period_length = get_time_period_length();
|
|
|
|
char period_stuff[sizeof(uint64_t)*2];
|
|
|
|
size_t offset = 0;
|
|
|
|
set_uint64(period_stuff, tor_htonll(period_num));
|
|
|
|
offset += sizeof(uint64_t);
|
|
|
|
set_uint64(period_stuff+offset, tor_htonll(time_period_length));
|
|
|
|
offset += sizeof(uint64_t);
|
|
|
|
tor_assert(offset == sizeof(period_stuff));
|
|
|
|
|
|
|
|
crypto_digest_add_bytes(digest, period_stuff, sizeof(period_stuff));
|
|
|
|
}
|
|
|
|
|
2017-04-18 21:06:44 +02:00
|
|
|
crypto_digest_get_digest(digest, (char *) hsdir_index_out, DIGEST256_LEN);
|
|
|
|
crypto_digest_free(digest);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return a newly allocated buffer containing the current shared random value
|
|
|
|
* or if not present, a disaster value is computed using the given time period
|
2017-08-04 11:21:14 +02:00
|
|
|
* number. If a consensus is provided in <b>ns</b>, use it to get the SRV
|
|
|
|
* value. This function can't fail. */
|
2017-04-18 21:06:44 +02:00
|
|
|
uint8_t *
|
2017-08-04 11:21:14 +02:00
|
|
|
hs_get_current_srv(uint64_t time_period_num, const networkstatus_t *ns)
|
2017-04-18 21:06:44 +02:00
|
|
|
{
|
|
|
|
uint8_t *sr_value = tor_malloc_zero(DIGEST256_LEN);
|
2017-08-04 11:21:14 +02:00
|
|
|
const sr_srv_t *current_srv = sr_get_current(ns);
|
2017-04-18 21:06:44 +02:00
|
|
|
|
|
|
|
if (current_srv) {
|
|
|
|
memcpy(sr_value, current_srv->value, sizeof(current_srv->value));
|
|
|
|
} else {
|
|
|
|
/* Disaster mode. */
|
|
|
|
get_disaster_srv(time_period_num, sr_value);
|
|
|
|
}
|
|
|
|
return sr_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return a newly allocated buffer containing the previous shared random
|
|
|
|
* value or if not present, a disaster value is computed using the given time
|
|
|
|
* period number. This function can't fail. */
|
|
|
|
uint8_t *
|
2017-08-04 11:21:14 +02:00
|
|
|
hs_get_previous_srv(uint64_t time_period_num, const networkstatus_t *ns)
|
2017-04-18 21:06:44 +02:00
|
|
|
{
|
|
|
|
uint8_t *sr_value = tor_malloc_zero(DIGEST256_LEN);
|
2017-08-04 11:21:14 +02:00
|
|
|
const sr_srv_t *previous_srv = sr_get_previous(ns);
|
2017-04-18 21:06:44 +02:00
|
|
|
|
|
|
|
if (previous_srv) {
|
|
|
|
memcpy(sr_value, previous_srv->value, sizeof(previous_srv->value));
|
|
|
|
} else {
|
|
|
|
/* Disaster mode. */
|
|
|
|
get_disaster_srv(time_period_num, sr_value);
|
|
|
|
}
|
|
|
|
return sr_value;
|
|
|
|
}
|
|
|
|
|
2017-04-19 17:06:19 +02:00
|
|
|
/* Return the number of replicas defined by a consensus parameter or the
|
|
|
|
* default value. */
|
|
|
|
int32_t
|
|
|
|
hs_get_hsdir_n_replicas(void)
|
|
|
|
{
|
|
|
|
/* The [1,16] range is a specification requirement. */
|
|
|
|
return networkstatus_get_param(NULL, "hsdir_n_replicas",
|
|
|
|
HS_DEFAULT_HSDIR_N_REPLICAS, 1, 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the spread fetch value defined by a consensus parameter or the
|
|
|
|
* default value. */
|
|
|
|
int32_t
|
|
|
|
hs_get_hsdir_spread_fetch(void)
|
|
|
|
{
|
|
|
|
/* The [1,128] range is a specification requirement. */
|
|
|
|
return networkstatus_get_param(NULL, "hsdir_spread_fetch",
|
|
|
|
HS_DEFAULT_HSDIR_SPREAD_FETCH, 1, 128);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the spread store value defined by a consensus parameter or the
|
|
|
|
* default value. */
|
|
|
|
int32_t
|
|
|
|
hs_get_hsdir_spread_store(void)
|
|
|
|
{
|
|
|
|
/* The [1,128] range is a specification requirement. */
|
|
|
|
return networkstatus_get_param(NULL, "hsdir_spread_store",
|
|
|
|
HS_DEFAULT_HSDIR_SPREAD_STORE, 1, 128);
|
|
|
|
}
|
|
|
|
|
2017-08-02 15:50:15 +02:00
|
|
|
/** <b>node</b> is an HSDir so make sure that we have assigned an hsdir index.
|
|
|
|
* Return 0 if everything is as expected, else return -1. */
|
|
|
|
static int
|
|
|
|
node_has_hsdir_index(const node_t *node)
|
|
|
|
{
|
|
|
|
tor_assert(node_supports_v3_hsdir(node));
|
|
|
|
|
|
|
|
/* A node can't have an HSDir index without a descriptor since we need desc
|
|
|
|
* to get its ed25519 key */
|
|
|
|
if (!node_has_descriptor(node)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* At this point, since the node has a desc, this node must also have an
|
|
|
|
* hsdir index. If not, something went wrong, so BUG out. */
|
|
|
|
if (BUG(node->hsdir_index == NULL) ||
|
|
|
|
BUG(tor_mem_is_zero((const char*)node->hsdir_index->current,
|
|
|
|
DIGEST256_LEN))) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-04-19 18:23:43 +02:00
|
|
|
/* For a given blinded key and time period number, get the responsible HSDir
|
|
|
|
* and put their routerstatus_t object in the responsible_dirs list. If
|
|
|
|
* is_next_period is true, the next hsdir_index of the node_t is used. If
|
|
|
|
* is_client is true, the spread fetch consensus parameter is used else the
|
|
|
|
* spread store is used which is only for upload. This function can't fail but
|
|
|
|
* it is possible that the responsible_dirs list contains fewer nodes than
|
|
|
|
* expected.
|
|
|
|
*
|
|
|
|
* This function goes over the latest consensus routerstatus list and sorts it
|
|
|
|
* by their node_t hsdir_index then does a binary search to find the closest
|
|
|
|
* node. All of this makes it a bit CPU intensive so use it wisely. */
|
|
|
|
void
|
|
|
|
hs_get_responsible_hsdirs(const ed25519_public_key_t *blinded_pk,
|
|
|
|
uint64_t time_period_num, int is_next_period,
|
|
|
|
int is_client, smartlist_t *responsible_dirs)
|
|
|
|
{
|
|
|
|
smartlist_t *sorted_nodes;
|
|
|
|
/* The compare function used for the smartlist bsearch. We have two
|
|
|
|
* different depending on is_next_period. */
|
|
|
|
int (*cmp_fct)(const void *, const void **);
|
|
|
|
|
|
|
|
tor_assert(blinded_pk);
|
|
|
|
tor_assert(responsible_dirs);
|
|
|
|
|
|
|
|
sorted_nodes = smartlist_new();
|
|
|
|
|
|
|
|
/* Add every node_t that support HSDir v3 for which we do have a valid
|
|
|
|
* hsdir_index already computed for them for this consensus. */
|
|
|
|
{
|
|
|
|
networkstatus_t *c = networkstatus_get_latest_consensus();
|
|
|
|
if (!c || smartlist_len(c->routerstatus_list) == 0) {
|
|
|
|
log_warn(LD_REND, "No valid consensus so we can't get the responsible "
|
|
|
|
"hidden service directories.");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
SMARTLIST_FOREACH_BEGIN(c->routerstatus_list, const routerstatus_t *, rs) {
|
|
|
|
/* Even though this node_t object won't be modified and should be const,
|
|
|
|
* we can't add const object in a smartlist_t. */
|
|
|
|
node_t *n = node_get_mutable_by_id(rs->identity_digest);
|
|
|
|
tor_assert(n);
|
|
|
|
if (node_supports_v3_hsdir(n) && rs->is_hs_dir) {
|
2017-08-02 15:50:15 +02:00
|
|
|
if (!node_has_hsdir_index(n)) {
|
|
|
|
log_info(LD_GENERAL, "Node %s was found without hsdir index.",
|
|
|
|
node_describe(n));
|
2017-04-19 18:23:43 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
smartlist_add(sorted_nodes, n);
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(rs);
|
|
|
|
}
|
|
|
|
if (smartlist_len(sorted_nodes) == 0) {
|
|
|
|
log_warn(LD_REND, "No nodes found to be HSDir or supporting v3.");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First thing we have to do is sort all node_t by hsdir_index. The
|
|
|
|
* is_next_period tells us if we want the current or the next one. Set the
|
|
|
|
* bsearch compare function also while we are at it. */
|
|
|
|
if (is_next_period) {
|
|
|
|
smartlist_sort(sorted_nodes, compare_node_next_hsdir_index);
|
|
|
|
cmp_fct = compare_digest_to_next_hsdir_index;
|
|
|
|
} else {
|
|
|
|
smartlist_sort(sorted_nodes, compare_node_current_hsdir_index);
|
|
|
|
cmp_fct = compare_digest_to_current_hsdir_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For all replicas, we'll select a set of HSDirs using the consensus
|
|
|
|
* parameters and the sorted list. The replica starting at value 1 is
|
|
|
|
* defined by the specification. */
|
|
|
|
for (int replica = 1; replica <= hs_get_hsdir_n_replicas(); replica++) {
|
|
|
|
int idx, start, found, n_added = 0;
|
|
|
|
uint8_t hs_index[DIGEST256_LEN] = {0};
|
|
|
|
/* Number of node to add to the responsible dirs list depends on if we are
|
|
|
|
* trying to fetch or store. A client always fetches. */
|
|
|
|
int n_to_add = (is_client) ? hs_get_hsdir_spread_fetch() :
|
|
|
|
hs_get_hsdir_spread_store();
|
|
|
|
|
|
|
|
/* Get the index that we should use to select the node. */
|
|
|
|
hs_build_hs_index(replica, blinded_pk, time_period_num, hs_index);
|
|
|
|
/* The compare function pointer has been set correctly earlier. */
|
|
|
|
start = idx = smartlist_bsearch_idx(sorted_nodes, hs_index, cmp_fct,
|
|
|
|
&found);
|
|
|
|
/* Getting the length of the list if no member is greater than the key we
|
|
|
|
* are looking for so start at the first element. */
|
|
|
|
if (idx == smartlist_len(sorted_nodes)) {
|
|
|
|
start = idx = 0;
|
|
|
|
}
|
|
|
|
while (n_added < n_to_add) {
|
|
|
|
const node_t *node = smartlist_get(sorted_nodes, idx);
|
|
|
|
/* If the node has already been selected which is possible between
|
|
|
|
* replicas, the specification says to skip over. */
|
|
|
|
if (!smartlist_contains(responsible_dirs, node->rs)) {
|
|
|
|
smartlist_add(responsible_dirs, node->rs);
|
|
|
|
++n_added;
|
|
|
|
}
|
|
|
|
if (++idx == smartlist_len(sorted_nodes)) {
|
|
|
|
/* Wrap if we've reached the end of the list. */
|
|
|
|
idx = 0;
|
|
|
|
}
|
|
|
|
if (idx == start) {
|
|
|
|
/* We've gone over the whole list, stop and avoid infinite loop. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
smartlist_free(sorted_nodes);
|
|
|
|
}
|
|
|
|
|
2017-01-16 19:29:03 +01:00
|
|
|
/* Initialize the entire HS subsytem. This is called in tor_init() before any
|
|
|
|
* torrc options are loaded. Only for >= v3. */
|
|
|
|
void
|
|
|
|
hs_init(void)
|
|
|
|
{
|
|
|
|
hs_circuitmap_init();
|
|
|
|
hs_service_init();
|
|
|
|
hs_cache_init();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release and cleanup all memory of the HS subsystem (all version). This is
|
|
|
|
* called by tor_free_all(). */
|
|
|
|
void
|
|
|
|
hs_free_all(void)
|
|
|
|
{
|
|
|
|
hs_circuitmap_free_all();
|
|
|
|
hs_service_free_all();
|
|
|
|
hs_cache_free_all();
|
|
|
|
}
|
|
|
|
|